1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/memregion.h> 4 #include <linux/genalloc.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/uuid.h> 9 #include <linux/sort.h> 10 #include <linux/idr.h> 11 #include <cxlmem.h> 12 #include <cxl.h> 13 #include "core.h" 14 15 /** 16 * DOC: cxl core region 17 * 18 * CXL Regions represent mapped memory capacity in system physical address 19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL 20 * Memory ranges, Regions represent the active mapped capacity by the HDM 21 * Decoder Capability structures throughout the Host Bridges, Switches, and 22 * Endpoints in the topology. 23 * 24 * Region configuration has ordering constraints. UUID may be set at any time 25 * but is only visible for persistent regions. 26 * 1. Interleave granularity 27 * 2. Interleave size 28 * 3. Decoder targets 29 */ 30 31 static struct cxl_region *to_cxl_region(struct device *dev); 32 33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 34 char *buf) 35 { 36 struct cxl_region *cxlr = to_cxl_region(dev); 37 struct cxl_region_params *p = &cxlr->params; 38 ssize_t rc; 39 40 rc = down_read_interruptible(&cxl_region_rwsem); 41 if (rc) 42 return rc; 43 if (cxlr->mode != CXL_DECODER_PMEM) 44 rc = sysfs_emit(buf, "\n"); 45 else 46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); 47 up_read(&cxl_region_rwsem); 48 49 return rc; 50 } 51 52 static int is_dup(struct device *match, void *data) 53 { 54 struct cxl_region_params *p; 55 struct cxl_region *cxlr; 56 uuid_t *uuid = data; 57 58 if (!is_cxl_region(match)) 59 return 0; 60 61 lockdep_assert_held(&cxl_region_rwsem); 62 cxlr = to_cxl_region(match); 63 p = &cxlr->params; 64 65 if (uuid_equal(&p->uuid, uuid)) { 66 dev_dbg(match, "already has uuid: %pUb\n", uuid); 67 return -EBUSY; 68 } 69 70 return 0; 71 } 72 73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, 74 const char *buf, size_t len) 75 { 76 struct cxl_region *cxlr = to_cxl_region(dev); 77 struct cxl_region_params *p = &cxlr->params; 78 uuid_t temp; 79 ssize_t rc; 80 81 if (len != UUID_STRING_LEN + 1) 82 return -EINVAL; 83 84 rc = uuid_parse(buf, &temp); 85 if (rc) 86 return rc; 87 88 if (uuid_is_null(&temp)) 89 return -EINVAL; 90 91 rc = down_write_killable(&cxl_region_rwsem); 92 if (rc) 93 return rc; 94 95 if (uuid_equal(&p->uuid, &temp)) 96 goto out; 97 98 rc = -EBUSY; 99 if (p->state >= CXL_CONFIG_ACTIVE) 100 goto out; 101 102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); 103 if (rc < 0) 104 goto out; 105 106 uuid_copy(&p->uuid, &temp); 107 out: 108 up_write(&cxl_region_rwsem); 109 110 if (rc) 111 return rc; 112 return len; 113 } 114 static DEVICE_ATTR_RW(uuid); 115 116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, 117 struct cxl_region *cxlr) 118 { 119 return xa_load(&port->regions, (unsigned long)cxlr); 120 } 121 122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) 123 { 124 if (!cpu_cache_has_invalidate_memregion()) { 125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { 126 dev_warn_once( 127 &cxlr->dev, 128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); 129 return 0; 130 } else { 131 dev_err(&cxlr->dev, 132 "Failed to synchronize CPU cache state\n"); 133 return -ENXIO; 134 } 135 } 136 137 cpu_cache_invalidate_memregion(IORES_DESC_CXL); 138 return 0; 139 } 140 141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) 142 { 143 struct cxl_region_params *p = &cxlr->params; 144 int i, rc = 0; 145 146 /* 147 * Before region teardown attempt to flush, and if the flush 148 * fails cancel the region teardown for data consistency 149 * concerns 150 */ 151 rc = cxl_region_invalidate_memregion(cxlr); 152 if (rc) 153 return rc; 154 155 for (i = count - 1; i >= 0; i--) { 156 struct cxl_endpoint_decoder *cxled = p->targets[i]; 157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 158 struct cxl_port *iter = cxled_to_port(cxled); 159 struct cxl_dev_state *cxlds = cxlmd->cxlds; 160 struct cxl_ep *ep; 161 162 if (cxlds->rcd) 163 goto endpoint_reset; 164 165 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 166 iter = to_cxl_port(iter->dev.parent); 167 168 for (ep = cxl_ep_load(iter, cxlmd); iter; 169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 170 struct cxl_region_ref *cxl_rr; 171 struct cxl_decoder *cxld; 172 173 cxl_rr = cxl_rr_load(iter, cxlr); 174 cxld = cxl_rr->decoder; 175 if (cxld->reset) 176 rc = cxld->reset(cxld); 177 if (rc) 178 return rc; 179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 180 } 181 182 endpoint_reset: 183 rc = cxled->cxld.reset(&cxled->cxld); 184 if (rc) 185 return rc; 186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 187 } 188 189 /* all decoders associated with this region have been torn down */ 190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 191 192 return 0; 193 } 194 195 static int commit_decoder(struct cxl_decoder *cxld) 196 { 197 struct cxl_switch_decoder *cxlsd = NULL; 198 199 if (cxld->commit) 200 return cxld->commit(cxld); 201 202 if (is_switch_decoder(&cxld->dev)) 203 cxlsd = to_cxl_switch_decoder(&cxld->dev); 204 205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, 206 "->commit() is required\n")) 207 return -ENXIO; 208 return 0; 209 } 210 211 static int cxl_region_decode_commit(struct cxl_region *cxlr) 212 { 213 struct cxl_region_params *p = &cxlr->params; 214 int i, rc = 0; 215 216 for (i = 0; i < p->nr_targets; i++) { 217 struct cxl_endpoint_decoder *cxled = p->targets[i]; 218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 219 struct cxl_region_ref *cxl_rr; 220 struct cxl_decoder *cxld; 221 struct cxl_port *iter; 222 struct cxl_ep *ep; 223 224 /* commit bottom up */ 225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 226 iter = to_cxl_port(iter->dev.parent)) { 227 cxl_rr = cxl_rr_load(iter, cxlr); 228 cxld = cxl_rr->decoder; 229 rc = commit_decoder(cxld); 230 if (rc) 231 break; 232 } 233 234 if (rc) { 235 /* programming @iter failed, teardown */ 236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter; 237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 238 cxl_rr = cxl_rr_load(iter, cxlr); 239 cxld = cxl_rr->decoder; 240 if (cxld->reset) 241 cxld->reset(cxld); 242 } 243 244 cxled->cxld.reset(&cxled->cxld); 245 goto err; 246 } 247 } 248 249 return 0; 250 251 err: 252 /* undo the targets that were successfully committed */ 253 cxl_region_decode_reset(cxlr, i); 254 return rc; 255 } 256 257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr, 258 const char *buf, size_t len) 259 { 260 struct cxl_region *cxlr = to_cxl_region(dev); 261 struct cxl_region_params *p = &cxlr->params; 262 bool commit; 263 ssize_t rc; 264 265 rc = kstrtobool(buf, &commit); 266 if (rc) 267 return rc; 268 269 rc = down_write_killable(&cxl_region_rwsem); 270 if (rc) 271 return rc; 272 273 /* Already in the requested state? */ 274 if (commit && p->state >= CXL_CONFIG_COMMIT) 275 goto out; 276 if (!commit && p->state < CXL_CONFIG_COMMIT) 277 goto out; 278 279 /* Not ready to commit? */ 280 if (commit && p->state < CXL_CONFIG_ACTIVE) { 281 rc = -ENXIO; 282 goto out; 283 } 284 285 /* 286 * Invalidate caches before region setup to drop any speculative 287 * consumption of this address space 288 */ 289 rc = cxl_region_invalidate_memregion(cxlr); 290 if (rc) 291 goto out; 292 293 if (commit) { 294 rc = cxl_region_decode_commit(cxlr); 295 if (rc == 0) 296 p->state = CXL_CONFIG_COMMIT; 297 } else { 298 p->state = CXL_CONFIG_RESET_PENDING; 299 up_write(&cxl_region_rwsem); 300 device_release_driver(&cxlr->dev); 301 down_write(&cxl_region_rwsem); 302 303 /* 304 * The lock was dropped, so need to revalidate that the reset is 305 * still pending. 306 */ 307 if (p->state == CXL_CONFIG_RESET_PENDING) { 308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 309 /* 310 * Revert to committed since there may still be active 311 * decoders associated with this region, or move forward 312 * to active to mark the reset successful 313 */ 314 if (rc) 315 p->state = CXL_CONFIG_COMMIT; 316 else 317 p->state = CXL_CONFIG_ACTIVE; 318 } 319 } 320 321 out: 322 up_write(&cxl_region_rwsem); 323 324 if (rc) 325 return rc; 326 return len; 327 } 328 329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr, 330 char *buf) 331 { 332 struct cxl_region *cxlr = to_cxl_region(dev); 333 struct cxl_region_params *p = &cxlr->params; 334 ssize_t rc; 335 336 rc = down_read_interruptible(&cxl_region_rwsem); 337 if (rc) 338 return rc; 339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); 340 up_read(&cxl_region_rwsem); 341 342 return rc; 343 } 344 static DEVICE_ATTR_RW(commit); 345 346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, 347 int n) 348 { 349 struct device *dev = kobj_to_dev(kobj); 350 struct cxl_region *cxlr = to_cxl_region(dev); 351 352 /* 353 * Support tooling that expects to find a 'uuid' attribute for all 354 * regions regardless of mode. 355 */ 356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) 357 return 0444; 358 return a->mode; 359 } 360 361 static ssize_t interleave_ways_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct cxl_region *cxlr = to_cxl_region(dev); 365 struct cxl_region_params *p = &cxlr->params; 366 ssize_t rc; 367 368 rc = down_read_interruptible(&cxl_region_rwsem); 369 if (rc) 370 return rc; 371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); 372 up_read(&cxl_region_rwsem); 373 374 return rc; 375 } 376 377 static const struct attribute_group *get_cxl_region_target_group(void); 378 379 static ssize_t interleave_ways_store(struct device *dev, 380 struct device_attribute *attr, 381 const char *buf, size_t len) 382 { 383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 385 struct cxl_region *cxlr = to_cxl_region(dev); 386 struct cxl_region_params *p = &cxlr->params; 387 unsigned int val, save; 388 int rc; 389 u8 iw; 390 391 rc = kstrtouint(buf, 0, &val); 392 if (rc) 393 return rc; 394 395 rc = ways_to_eiw(val, &iw); 396 if (rc) 397 return rc; 398 399 /* 400 * Even for x3, x6, and x12 interleaves the region interleave must be a 401 * power of 2 multiple of the host bridge interleave. 402 */ 403 if (!is_power_of_2(val / cxld->interleave_ways) || 404 (val % cxld->interleave_ways)) { 405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); 406 return -EINVAL; 407 } 408 409 rc = down_write_killable(&cxl_region_rwsem); 410 if (rc) 411 return rc; 412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 413 rc = -EBUSY; 414 goto out; 415 } 416 417 save = p->interleave_ways; 418 p->interleave_ways = val; 419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 420 if (rc) 421 p->interleave_ways = save; 422 out: 423 up_write(&cxl_region_rwsem); 424 if (rc) 425 return rc; 426 return len; 427 } 428 static DEVICE_ATTR_RW(interleave_ways); 429 430 static ssize_t interleave_granularity_show(struct device *dev, 431 struct device_attribute *attr, 432 char *buf) 433 { 434 struct cxl_region *cxlr = to_cxl_region(dev); 435 struct cxl_region_params *p = &cxlr->params; 436 ssize_t rc; 437 438 rc = down_read_interruptible(&cxl_region_rwsem); 439 if (rc) 440 return rc; 441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); 442 up_read(&cxl_region_rwsem); 443 444 return rc; 445 } 446 447 static ssize_t interleave_granularity_store(struct device *dev, 448 struct device_attribute *attr, 449 const char *buf, size_t len) 450 { 451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 453 struct cxl_region *cxlr = to_cxl_region(dev); 454 struct cxl_region_params *p = &cxlr->params; 455 int rc, val; 456 u16 ig; 457 458 rc = kstrtoint(buf, 0, &val); 459 if (rc) 460 return rc; 461 462 rc = granularity_to_eig(val, &ig); 463 if (rc) 464 return rc; 465 466 /* 467 * When the host-bridge is interleaved, disallow region granularity != 468 * root granularity. Regions with a granularity less than the root 469 * interleave result in needing multiple endpoints to support a single 470 * slot in the interleave (possible to support in the future). Regions 471 * with a granularity greater than the root interleave result in invalid 472 * DPA translations (invalid to support). 473 */ 474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) 475 return -EINVAL; 476 477 rc = down_write_killable(&cxl_region_rwsem); 478 if (rc) 479 return rc; 480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 481 rc = -EBUSY; 482 goto out; 483 } 484 485 p->interleave_granularity = val; 486 out: 487 up_write(&cxl_region_rwsem); 488 if (rc) 489 return rc; 490 return len; 491 } 492 static DEVICE_ATTR_RW(interleave_granularity); 493 494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 495 char *buf) 496 { 497 struct cxl_region *cxlr = to_cxl_region(dev); 498 struct cxl_region_params *p = &cxlr->params; 499 u64 resource = -1ULL; 500 ssize_t rc; 501 502 rc = down_read_interruptible(&cxl_region_rwsem); 503 if (rc) 504 return rc; 505 if (p->res) 506 resource = p->res->start; 507 rc = sysfs_emit(buf, "%#llx\n", resource); 508 up_read(&cxl_region_rwsem); 509 510 return rc; 511 } 512 static DEVICE_ATTR_RO(resource); 513 514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 515 char *buf) 516 { 517 struct cxl_region *cxlr = to_cxl_region(dev); 518 519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); 520 } 521 static DEVICE_ATTR_RO(mode); 522 523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) 524 { 525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 526 struct cxl_region_params *p = &cxlr->params; 527 struct resource *res; 528 u64 remainder = 0; 529 530 lockdep_assert_held_write(&cxl_region_rwsem); 531 532 /* Nothing to do... */ 533 if (p->res && resource_size(p->res) == size) 534 return 0; 535 536 /* To change size the old size must be freed first */ 537 if (p->res) 538 return -EBUSY; 539 540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) 541 return -EBUSY; 542 543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */ 544 if (!p->interleave_ways || !p->interleave_granularity || 545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) 546 return -ENXIO; 547 548 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); 549 if (remainder) 550 return -EINVAL; 551 552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, 553 dev_name(&cxlr->dev)); 554 if (IS_ERR(res)) { 555 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n", 556 PTR_ERR(res)); 557 return PTR_ERR(res); 558 } 559 560 p->res = res; 561 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 562 563 return 0; 564 } 565 566 static void cxl_region_iomem_release(struct cxl_region *cxlr) 567 { 568 struct cxl_region_params *p = &cxlr->params; 569 570 if (device_is_registered(&cxlr->dev)) 571 lockdep_assert_held_write(&cxl_region_rwsem); 572 if (p->res) { 573 /* 574 * Autodiscovered regions may not have been able to insert their 575 * resource. 576 */ 577 if (p->res->parent) 578 remove_resource(p->res); 579 kfree(p->res); 580 p->res = NULL; 581 } 582 } 583 584 static int free_hpa(struct cxl_region *cxlr) 585 { 586 struct cxl_region_params *p = &cxlr->params; 587 588 lockdep_assert_held_write(&cxl_region_rwsem); 589 590 if (!p->res) 591 return 0; 592 593 if (p->state >= CXL_CONFIG_ACTIVE) 594 return -EBUSY; 595 596 cxl_region_iomem_release(cxlr); 597 p->state = CXL_CONFIG_IDLE; 598 return 0; 599 } 600 601 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 602 const char *buf, size_t len) 603 { 604 struct cxl_region *cxlr = to_cxl_region(dev); 605 u64 val; 606 int rc; 607 608 rc = kstrtou64(buf, 0, &val); 609 if (rc) 610 return rc; 611 612 rc = down_write_killable(&cxl_region_rwsem); 613 if (rc) 614 return rc; 615 616 if (val) 617 rc = alloc_hpa(cxlr, val); 618 else 619 rc = free_hpa(cxlr); 620 up_write(&cxl_region_rwsem); 621 622 if (rc) 623 return rc; 624 625 return len; 626 } 627 628 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 629 char *buf) 630 { 631 struct cxl_region *cxlr = to_cxl_region(dev); 632 struct cxl_region_params *p = &cxlr->params; 633 u64 size = 0; 634 ssize_t rc; 635 636 rc = down_read_interruptible(&cxl_region_rwsem); 637 if (rc) 638 return rc; 639 if (p->res) 640 size = resource_size(p->res); 641 rc = sysfs_emit(buf, "%#llx\n", size); 642 up_read(&cxl_region_rwsem); 643 644 return rc; 645 } 646 static DEVICE_ATTR_RW(size); 647 648 static struct attribute *cxl_region_attrs[] = { 649 &dev_attr_uuid.attr, 650 &dev_attr_commit.attr, 651 &dev_attr_interleave_ways.attr, 652 &dev_attr_interleave_granularity.attr, 653 &dev_attr_resource.attr, 654 &dev_attr_size.attr, 655 &dev_attr_mode.attr, 656 NULL, 657 }; 658 659 static const struct attribute_group cxl_region_group = { 660 .attrs = cxl_region_attrs, 661 .is_visible = cxl_region_visible, 662 }; 663 664 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) 665 { 666 struct cxl_region_params *p = &cxlr->params; 667 struct cxl_endpoint_decoder *cxled; 668 int rc; 669 670 rc = down_read_interruptible(&cxl_region_rwsem); 671 if (rc) 672 return rc; 673 674 if (pos >= p->interleave_ways) { 675 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 676 p->interleave_ways); 677 rc = -ENXIO; 678 goto out; 679 } 680 681 cxled = p->targets[pos]; 682 if (!cxled) 683 rc = sysfs_emit(buf, "\n"); 684 else 685 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); 686 out: 687 up_read(&cxl_region_rwsem); 688 689 return rc; 690 } 691 692 static int match_free_decoder(struct device *dev, void *data) 693 { 694 struct cxl_decoder *cxld; 695 int *id = data; 696 697 if (!is_switch_decoder(dev)) 698 return 0; 699 700 cxld = to_cxl_decoder(dev); 701 702 /* enforce ordered allocation */ 703 if (cxld->id != *id) 704 return 0; 705 706 if (!cxld->region) 707 return 1; 708 709 (*id)++; 710 711 return 0; 712 } 713 714 static int match_auto_decoder(struct device *dev, void *data) 715 { 716 struct cxl_region_params *p = data; 717 struct cxl_decoder *cxld; 718 struct range *r; 719 720 if (!is_switch_decoder(dev)) 721 return 0; 722 723 cxld = to_cxl_decoder(dev); 724 r = &cxld->hpa_range; 725 726 if (p->res && p->res->start == r->start && p->res->end == r->end) 727 return 1; 728 729 return 0; 730 } 731 732 static struct cxl_decoder * 733 cxl_region_find_decoder(struct cxl_port *port, 734 struct cxl_endpoint_decoder *cxled, 735 struct cxl_region *cxlr) 736 { 737 struct device *dev; 738 int id = 0; 739 740 if (port == cxled_to_port(cxled)) 741 return &cxled->cxld; 742 743 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 744 dev = device_find_child(&port->dev, &cxlr->params, 745 match_auto_decoder); 746 else 747 dev = device_find_child(&port->dev, &id, match_free_decoder); 748 if (!dev) 749 return NULL; 750 /* 751 * This decoder is pinned registered as long as the endpoint decoder is 752 * registered, and endpoint decoder unregistration holds the 753 * cxl_region_rwsem over unregister events, so no need to hold on to 754 * this extra reference. 755 */ 756 put_device(dev); 757 return to_cxl_decoder(dev); 758 } 759 760 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, 761 struct cxl_region *cxlr) 762 { 763 struct cxl_region_params *p = &cxlr->params; 764 struct cxl_region_ref *cxl_rr, *iter; 765 unsigned long index; 766 int rc; 767 768 xa_for_each(&port->regions, index, iter) { 769 struct cxl_region_params *ip = &iter->region->params; 770 771 if (!ip->res) 772 continue; 773 774 if (ip->res->start > p->res->start) { 775 dev_dbg(&cxlr->dev, 776 "%s: HPA order violation %s:%pr vs %pr\n", 777 dev_name(&port->dev), 778 dev_name(&iter->region->dev), ip->res, p->res); 779 return ERR_PTR(-EBUSY); 780 } 781 } 782 783 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL); 784 if (!cxl_rr) 785 return ERR_PTR(-ENOMEM); 786 cxl_rr->port = port; 787 cxl_rr->region = cxlr; 788 cxl_rr->nr_targets = 1; 789 xa_init(&cxl_rr->endpoints); 790 791 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); 792 if (rc) { 793 dev_dbg(&cxlr->dev, 794 "%s: failed to track region reference: %d\n", 795 dev_name(&port->dev), rc); 796 kfree(cxl_rr); 797 return ERR_PTR(rc); 798 } 799 800 return cxl_rr; 801 } 802 803 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) 804 { 805 struct cxl_region *cxlr = cxl_rr->region; 806 struct cxl_decoder *cxld = cxl_rr->decoder; 807 808 if (!cxld) 809 return; 810 811 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); 812 if (cxld->region == cxlr) { 813 cxld->region = NULL; 814 put_device(&cxlr->dev); 815 } 816 } 817 818 static void free_region_ref(struct cxl_region_ref *cxl_rr) 819 { 820 struct cxl_port *port = cxl_rr->port; 821 struct cxl_region *cxlr = cxl_rr->region; 822 823 cxl_rr_free_decoder(cxl_rr); 824 xa_erase(&port->regions, (unsigned long)cxlr); 825 xa_destroy(&cxl_rr->endpoints); 826 kfree(cxl_rr); 827 } 828 829 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, 830 struct cxl_endpoint_decoder *cxled) 831 { 832 int rc; 833 struct cxl_port *port = cxl_rr->port; 834 struct cxl_region *cxlr = cxl_rr->region; 835 struct cxl_decoder *cxld = cxl_rr->decoder; 836 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled)); 837 838 if (ep) { 839 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, 840 GFP_KERNEL); 841 if (rc) 842 return rc; 843 } 844 cxl_rr->nr_eps++; 845 846 if (!cxld->region) { 847 cxld->region = cxlr; 848 get_device(&cxlr->dev); 849 } 850 851 return 0; 852 } 853 854 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, 855 struct cxl_endpoint_decoder *cxled, 856 struct cxl_region_ref *cxl_rr) 857 { 858 struct cxl_decoder *cxld; 859 860 cxld = cxl_region_find_decoder(port, cxled, cxlr); 861 if (!cxld) { 862 dev_dbg(&cxlr->dev, "%s: no decoder available\n", 863 dev_name(&port->dev)); 864 return -EBUSY; 865 } 866 867 if (cxld->region) { 868 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 869 dev_name(&port->dev), dev_name(&cxld->dev), 870 dev_name(&cxld->region->dev)); 871 return -EBUSY; 872 } 873 874 /* 875 * Endpoints should already match the region type, but backstop that 876 * assumption with an assertion. Switch-decoders change mapping-type 877 * based on what is mapped when they are assigned to a region. 878 */ 879 dev_WARN_ONCE(&cxlr->dev, 880 port == cxled_to_port(cxled) && 881 cxld->target_type != cxlr->type, 882 "%s:%s mismatch decoder type %d -> %d\n", 883 dev_name(&cxled_to_memdev(cxled)->dev), 884 dev_name(&cxld->dev), cxld->target_type, cxlr->type); 885 cxld->target_type = cxlr->type; 886 cxl_rr->decoder = cxld; 887 return 0; 888 } 889 890 /** 891 * cxl_port_attach_region() - track a region's interest in a port by endpoint 892 * @port: port to add a new region reference 'struct cxl_region_ref' 893 * @cxlr: region to attach to @port 894 * @cxled: endpoint decoder used to create or further pin a region reference 895 * @pos: interleave position of @cxled in @cxlr 896 * 897 * The attach event is an opportunity to validate CXL decode setup 898 * constraints and record metadata needed for programming HDM decoders, 899 * in particular decoder target lists. 900 * 901 * The steps are: 902 * 903 * - validate that there are no other regions with a higher HPA already 904 * associated with @port 905 * - establish a region reference if one is not already present 906 * 907 * - additionally allocate a decoder instance that will host @cxlr on 908 * @port 909 * 910 * - pin the region reference by the endpoint 911 * - account for how many entries in @port's target list are needed to 912 * cover all of the added endpoints. 913 */ 914 static int cxl_port_attach_region(struct cxl_port *port, 915 struct cxl_region *cxlr, 916 struct cxl_endpoint_decoder *cxled, int pos) 917 { 918 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 919 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 920 struct cxl_region_ref *cxl_rr; 921 bool nr_targets_inc = false; 922 struct cxl_decoder *cxld; 923 unsigned long index; 924 int rc = -EBUSY; 925 926 lockdep_assert_held_write(&cxl_region_rwsem); 927 928 cxl_rr = cxl_rr_load(port, cxlr); 929 if (cxl_rr) { 930 struct cxl_ep *ep_iter; 931 int found = 0; 932 933 /* 934 * Walk the existing endpoints that have been attached to 935 * @cxlr at @port and see if they share the same 'next' port 936 * in the downstream direction. I.e. endpoints that share common 937 * upstream switch. 938 */ 939 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 940 if (ep_iter == ep) 941 continue; 942 if (ep_iter->next == ep->next) { 943 found++; 944 break; 945 } 946 } 947 948 /* 949 * New target port, or @port is an endpoint port that always 950 * accounts its own local decode as a target. 951 */ 952 if (!found || !ep->next) { 953 cxl_rr->nr_targets++; 954 nr_targets_inc = true; 955 } 956 } else { 957 cxl_rr = alloc_region_ref(port, cxlr); 958 if (IS_ERR(cxl_rr)) { 959 dev_dbg(&cxlr->dev, 960 "%s: failed to allocate region reference\n", 961 dev_name(&port->dev)); 962 return PTR_ERR(cxl_rr); 963 } 964 nr_targets_inc = true; 965 966 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 967 if (rc) 968 goto out_erase; 969 } 970 cxld = cxl_rr->decoder; 971 972 rc = cxl_rr_ep_add(cxl_rr, cxled); 973 if (rc) { 974 dev_dbg(&cxlr->dev, 975 "%s: failed to track endpoint %s:%s reference\n", 976 dev_name(&port->dev), dev_name(&cxlmd->dev), 977 dev_name(&cxld->dev)); 978 goto out_erase; 979 } 980 981 dev_dbg(&cxlr->dev, 982 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n", 983 dev_name(port->uport_dev), dev_name(&port->dev), 984 dev_name(&cxld->dev), dev_name(&cxlmd->dev), 985 dev_name(&cxled->cxld.dev), pos, 986 ep ? ep->next ? dev_name(ep->next->uport_dev) : 987 dev_name(&cxlmd->dev) : 988 "none", 989 cxl_rr->nr_eps, cxl_rr->nr_targets); 990 991 return 0; 992 out_erase: 993 if (nr_targets_inc) 994 cxl_rr->nr_targets--; 995 if (cxl_rr->nr_eps == 0) 996 free_region_ref(cxl_rr); 997 return rc; 998 } 999 1000 static void cxl_port_detach_region(struct cxl_port *port, 1001 struct cxl_region *cxlr, 1002 struct cxl_endpoint_decoder *cxled) 1003 { 1004 struct cxl_region_ref *cxl_rr; 1005 struct cxl_ep *ep = NULL; 1006 1007 lockdep_assert_held_write(&cxl_region_rwsem); 1008 1009 cxl_rr = cxl_rr_load(port, cxlr); 1010 if (!cxl_rr) 1011 return; 1012 1013 /* 1014 * Endpoint ports do not carry cxl_ep references, and they 1015 * never target more than one endpoint by definition 1016 */ 1017 if (cxl_rr->decoder == &cxled->cxld) 1018 cxl_rr->nr_eps--; 1019 else 1020 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); 1021 if (ep) { 1022 struct cxl_ep *ep_iter; 1023 unsigned long index; 1024 int found = 0; 1025 1026 cxl_rr->nr_eps--; 1027 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 1028 if (ep_iter->next == ep->next) { 1029 found++; 1030 break; 1031 } 1032 } 1033 if (!found) 1034 cxl_rr->nr_targets--; 1035 } 1036 1037 if (cxl_rr->nr_eps == 0) 1038 free_region_ref(cxl_rr); 1039 } 1040 1041 static int check_last_peer(struct cxl_endpoint_decoder *cxled, 1042 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr, 1043 int distance) 1044 { 1045 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1046 struct cxl_region *cxlr = cxl_rr->region; 1047 struct cxl_region_params *p = &cxlr->params; 1048 struct cxl_endpoint_decoder *cxled_peer; 1049 struct cxl_port *port = cxl_rr->port; 1050 struct cxl_memdev *cxlmd_peer; 1051 struct cxl_ep *ep_peer; 1052 int pos = cxled->pos; 1053 1054 /* 1055 * If this position wants to share a dport with the last endpoint mapped 1056 * then that endpoint, at index 'position - distance', must also be 1057 * mapped by this dport. 1058 */ 1059 if (pos < distance) { 1060 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", 1061 dev_name(port->uport_dev), dev_name(&port->dev), 1062 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1063 return -ENXIO; 1064 } 1065 cxled_peer = p->targets[pos - distance]; 1066 cxlmd_peer = cxled_to_memdev(cxled_peer); 1067 ep_peer = cxl_ep_load(port, cxlmd_peer); 1068 if (ep->dport != ep_peer->dport) { 1069 dev_dbg(&cxlr->dev, 1070 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n", 1071 dev_name(port->uport_dev), dev_name(&port->dev), 1072 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, 1073 dev_name(&cxlmd_peer->dev), 1074 dev_name(&cxled_peer->cxld.dev)); 1075 return -ENXIO; 1076 } 1077 1078 return 0; 1079 } 1080 1081 static int cxl_port_setup_targets(struct cxl_port *port, 1082 struct cxl_region *cxlr, 1083 struct cxl_endpoint_decoder *cxled) 1084 { 1085 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1086 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; 1087 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 1088 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1089 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1090 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 1091 struct cxl_region_params *p = &cxlr->params; 1092 struct cxl_decoder *cxld = cxl_rr->decoder; 1093 struct cxl_switch_decoder *cxlsd; 1094 u16 eig, peig; 1095 u8 eiw, peiw; 1096 1097 /* 1098 * While root level decoders support x3, x6, x12, switch level 1099 * decoders only support powers of 2 up to x16. 1100 */ 1101 if (!is_power_of_2(cxl_rr->nr_targets)) { 1102 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", 1103 dev_name(port->uport_dev), dev_name(&port->dev), 1104 cxl_rr->nr_targets); 1105 return -EINVAL; 1106 } 1107 1108 cxlsd = to_cxl_switch_decoder(&cxld->dev); 1109 if (cxl_rr->nr_targets_set) { 1110 int i, distance; 1111 1112 /* 1113 * Passthrough decoders impose no distance requirements between 1114 * peers 1115 */ 1116 if (cxl_rr->nr_targets == 1) 1117 distance = 0; 1118 else 1119 distance = p->nr_targets / cxl_rr->nr_targets; 1120 for (i = 0; i < cxl_rr->nr_targets_set; i++) 1121 if (ep->dport == cxlsd->target[i]) { 1122 rc = check_last_peer(cxled, ep, cxl_rr, 1123 distance); 1124 if (rc) 1125 return rc; 1126 goto out_target_set; 1127 } 1128 goto add_target; 1129 } 1130 1131 if (is_cxl_root(parent_port)) { 1132 /* 1133 * Root decoder IG is always set to value in CFMWS which 1134 * may be different than this region's IG. We can use the 1135 * region's IG here since interleave_granularity_store() 1136 * does not allow interleaved host-bridges with 1137 * root IG != region IG. 1138 */ 1139 parent_ig = p->interleave_granularity; 1140 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; 1141 /* 1142 * For purposes of address bit routing, use power-of-2 math for 1143 * switch ports. 1144 */ 1145 if (!is_power_of_2(parent_iw)) 1146 parent_iw /= 3; 1147 } else { 1148 struct cxl_region_ref *parent_rr; 1149 struct cxl_decoder *parent_cxld; 1150 1151 parent_rr = cxl_rr_load(parent_port, cxlr); 1152 parent_cxld = parent_rr->decoder; 1153 parent_ig = parent_cxld->interleave_granularity; 1154 parent_iw = parent_cxld->interleave_ways; 1155 } 1156 1157 rc = granularity_to_eig(parent_ig, &peig); 1158 if (rc) { 1159 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", 1160 dev_name(parent_port->uport_dev), 1161 dev_name(&parent_port->dev), parent_ig); 1162 return rc; 1163 } 1164 1165 rc = ways_to_eiw(parent_iw, &peiw); 1166 if (rc) { 1167 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", 1168 dev_name(parent_port->uport_dev), 1169 dev_name(&parent_port->dev), parent_iw); 1170 return rc; 1171 } 1172 1173 iw = cxl_rr->nr_targets; 1174 rc = ways_to_eiw(iw, &eiw); 1175 if (rc) { 1176 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", 1177 dev_name(port->uport_dev), dev_name(&port->dev), iw); 1178 return rc; 1179 } 1180 1181 /* 1182 * Interleave granularity is a multiple of @parent_port granularity. 1183 * Multiplier is the parent port interleave ways. 1184 */ 1185 rc = granularity_to_eig(parent_ig * parent_iw, &eig); 1186 if (rc) { 1187 dev_dbg(&cxlr->dev, 1188 "%s: invalid granularity calculation (%d * %d)\n", 1189 dev_name(&parent_port->dev), parent_ig, parent_iw); 1190 return rc; 1191 } 1192 1193 rc = eig_to_granularity(eig, &ig); 1194 if (rc) { 1195 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", 1196 dev_name(port->uport_dev), dev_name(&port->dev), 1197 256 << eig); 1198 return rc; 1199 } 1200 1201 if (iw > 8 || iw > cxlsd->nr_targets) { 1202 dev_dbg(&cxlr->dev, 1203 "%s:%s:%s: ways: %d overflows targets: %d\n", 1204 dev_name(port->uport_dev), dev_name(&port->dev), 1205 dev_name(&cxld->dev), iw, cxlsd->nr_targets); 1206 return -ENXIO; 1207 } 1208 1209 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1210 if (cxld->interleave_ways != iw || 1211 cxld->interleave_granularity != ig || 1212 cxld->hpa_range.start != p->res->start || 1213 cxld->hpa_range.end != p->res->end || 1214 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { 1215 dev_err(&cxlr->dev, 1216 "%s:%s %s expected iw: %d ig: %d %pr\n", 1217 dev_name(port->uport_dev), dev_name(&port->dev), 1218 __func__, iw, ig, p->res); 1219 dev_err(&cxlr->dev, 1220 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n", 1221 dev_name(port->uport_dev), dev_name(&port->dev), 1222 __func__, cxld->interleave_ways, 1223 cxld->interleave_granularity, 1224 (cxld->flags & CXL_DECODER_F_ENABLE) ? 1225 "enabled" : 1226 "disabled", 1227 cxld->hpa_range.start, cxld->hpa_range.end); 1228 return -ENXIO; 1229 } 1230 } else { 1231 cxld->interleave_ways = iw; 1232 cxld->interleave_granularity = ig; 1233 cxld->hpa_range = (struct range) { 1234 .start = p->res->start, 1235 .end = p->res->end, 1236 }; 1237 } 1238 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), 1239 dev_name(&port->dev), iw, ig); 1240 add_target: 1241 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { 1242 dev_dbg(&cxlr->dev, 1243 "%s:%s: targets full trying to add %s:%s at %d\n", 1244 dev_name(port->uport_dev), dev_name(&port->dev), 1245 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1246 return -ENXIO; 1247 } 1248 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1249 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { 1250 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", 1251 dev_name(port->uport_dev), dev_name(&port->dev), 1252 dev_name(&cxlsd->cxld.dev), 1253 dev_name(ep->dport->dport_dev), 1254 cxl_rr->nr_targets_set); 1255 return -ENXIO; 1256 } 1257 } else 1258 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; 1259 inc = 1; 1260 out_target_set: 1261 cxl_rr->nr_targets_set += inc; 1262 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", 1263 dev_name(port->uport_dev), dev_name(&port->dev), 1264 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), 1265 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1266 1267 return 0; 1268 } 1269 1270 static void cxl_port_reset_targets(struct cxl_port *port, 1271 struct cxl_region *cxlr) 1272 { 1273 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1274 struct cxl_decoder *cxld; 1275 1276 /* 1277 * After the last endpoint has been detached the entire cxl_rr may now 1278 * be gone. 1279 */ 1280 if (!cxl_rr) 1281 return; 1282 cxl_rr->nr_targets_set = 0; 1283 1284 cxld = cxl_rr->decoder; 1285 cxld->hpa_range = (struct range) { 1286 .start = 0, 1287 .end = -1, 1288 }; 1289 } 1290 1291 static void cxl_region_teardown_targets(struct cxl_region *cxlr) 1292 { 1293 struct cxl_region_params *p = &cxlr->params; 1294 struct cxl_endpoint_decoder *cxled; 1295 struct cxl_dev_state *cxlds; 1296 struct cxl_memdev *cxlmd; 1297 struct cxl_port *iter; 1298 struct cxl_ep *ep; 1299 int i; 1300 1301 /* 1302 * In the auto-discovery case skip automatic teardown since the 1303 * address space is already active 1304 */ 1305 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 1306 return; 1307 1308 for (i = 0; i < p->nr_targets; i++) { 1309 cxled = p->targets[i]; 1310 cxlmd = cxled_to_memdev(cxled); 1311 cxlds = cxlmd->cxlds; 1312 1313 if (cxlds->rcd) 1314 continue; 1315 1316 iter = cxled_to_port(cxled); 1317 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1318 iter = to_cxl_port(iter->dev.parent); 1319 1320 for (ep = cxl_ep_load(iter, cxlmd); iter; 1321 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) 1322 cxl_port_reset_targets(iter, cxlr); 1323 } 1324 } 1325 1326 static int cxl_region_setup_targets(struct cxl_region *cxlr) 1327 { 1328 struct cxl_region_params *p = &cxlr->params; 1329 struct cxl_endpoint_decoder *cxled; 1330 struct cxl_dev_state *cxlds; 1331 int i, rc, rch = 0, vh = 0; 1332 struct cxl_memdev *cxlmd; 1333 struct cxl_port *iter; 1334 struct cxl_ep *ep; 1335 1336 for (i = 0; i < p->nr_targets; i++) { 1337 cxled = p->targets[i]; 1338 cxlmd = cxled_to_memdev(cxled); 1339 cxlds = cxlmd->cxlds; 1340 1341 /* validate that all targets agree on topology */ 1342 if (!cxlds->rcd) { 1343 vh++; 1344 } else { 1345 rch++; 1346 continue; 1347 } 1348 1349 iter = cxled_to_port(cxled); 1350 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1351 iter = to_cxl_port(iter->dev.parent); 1352 1353 /* 1354 * Descend the topology tree programming / validating 1355 * targets while looking for conflicts. 1356 */ 1357 for (ep = cxl_ep_load(iter, cxlmd); iter; 1358 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 1359 rc = cxl_port_setup_targets(iter, cxlr, cxled); 1360 if (rc) { 1361 cxl_region_teardown_targets(cxlr); 1362 return rc; 1363 } 1364 } 1365 } 1366 1367 if (rch && vh) { 1368 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); 1369 cxl_region_teardown_targets(cxlr); 1370 return -ENXIO; 1371 } 1372 1373 return 0; 1374 } 1375 1376 static int cxl_region_validate_position(struct cxl_region *cxlr, 1377 struct cxl_endpoint_decoder *cxled, 1378 int pos) 1379 { 1380 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1381 struct cxl_region_params *p = &cxlr->params; 1382 int i; 1383 1384 if (pos < 0 || pos >= p->interleave_ways) { 1385 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1386 p->interleave_ways); 1387 return -ENXIO; 1388 } 1389 1390 if (p->targets[pos] == cxled) 1391 return 0; 1392 1393 if (p->targets[pos]) { 1394 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; 1395 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target); 1396 1397 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", 1398 pos, dev_name(&cxlmd_target->dev), 1399 dev_name(&cxled_target->cxld.dev)); 1400 return -EBUSY; 1401 } 1402 1403 for (i = 0; i < p->interleave_ways; i++) { 1404 struct cxl_endpoint_decoder *cxled_target; 1405 struct cxl_memdev *cxlmd_target; 1406 1407 cxled_target = p->targets[i]; 1408 if (!cxled_target) 1409 continue; 1410 1411 cxlmd_target = cxled_to_memdev(cxled_target); 1412 if (cxlmd_target == cxlmd) { 1413 dev_dbg(&cxlr->dev, 1414 "%s already specified at position %d via: %s\n", 1415 dev_name(&cxlmd->dev), pos, 1416 dev_name(&cxled_target->cxld.dev)); 1417 return -EBUSY; 1418 } 1419 } 1420 1421 return 0; 1422 } 1423 1424 static int cxl_region_attach_position(struct cxl_region *cxlr, 1425 struct cxl_root_decoder *cxlrd, 1426 struct cxl_endpoint_decoder *cxled, 1427 const struct cxl_dport *dport, int pos) 1428 { 1429 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1430 struct cxl_port *iter; 1431 int rc; 1432 1433 if (cxlrd->calc_hb(cxlrd, pos) != dport) { 1434 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", 1435 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1436 dev_name(&cxlrd->cxlsd.cxld.dev)); 1437 return -ENXIO; 1438 } 1439 1440 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1441 iter = to_cxl_port(iter->dev.parent)) { 1442 rc = cxl_port_attach_region(iter, cxlr, cxled, pos); 1443 if (rc) 1444 goto err; 1445 } 1446 1447 return 0; 1448 1449 err: 1450 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1451 iter = to_cxl_port(iter->dev.parent)) 1452 cxl_port_detach_region(iter, cxlr, cxled); 1453 return rc; 1454 } 1455 1456 static int cxl_region_attach_auto(struct cxl_region *cxlr, 1457 struct cxl_endpoint_decoder *cxled, int pos) 1458 { 1459 struct cxl_region_params *p = &cxlr->params; 1460 1461 if (cxled->state != CXL_DECODER_STATE_AUTO) { 1462 dev_err(&cxlr->dev, 1463 "%s: unable to add decoder to autodetected region\n", 1464 dev_name(&cxled->cxld.dev)); 1465 return -EINVAL; 1466 } 1467 1468 if (pos >= 0) { 1469 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", 1470 dev_name(&cxled->cxld.dev), pos); 1471 return -EINVAL; 1472 } 1473 1474 if (p->nr_targets >= p->interleave_ways) { 1475 dev_err(&cxlr->dev, "%s: no more target slots available\n", 1476 dev_name(&cxled->cxld.dev)); 1477 return -ENXIO; 1478 } 1479 1480 /* 1481 * Temporarily record the endpoint decoder into the target array. Yes, 1482 * this means that userspace can view devices in the wrong position 1483 * before the region activates, and must be careful to understand when 1484 * it might be racing region autodiscovery. 1485 */ 1486 pos = p->nr_targets; 1487 p->targets[pos] = cxled; 1488 cxled->pos = pos; 1489 p->nr_targets++; 1490 1491 return 0; 1492 } 1493 1494 static int cmp_interleave_pos(const void *a, const void *b) 1495 { 1496 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a; 1497 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b; 1498 1499 return cxled_a->pos - cxled_b->pos; 1500 } 1501 1502 static struct cxl_port *next_port(struct cxl_port *port) 1503 { 1504 if (!port->parent_dport) 1505 return NULL; 1506 return port->parent_dport->port; 1507 } 1508 1509 static int match_switch_decoder_by_range(struct device *dev, void *data) 1510 { 1511 struct cxl_switch_decoder *cxlsd; 1512 struct range *r1, *r2 = data; 1513 1514 if (!is_switch_decoder(dev)) 1515 return 0; 1516 1517 cxlsd = to_cxl_switch_decoder(dev); 1518 r1 = &cxlsd->cxld.hpa_range; 1519 1520 if (is_root_decoder(dev)) 1521 return range_contains(r1, r2); 1522 return (r1->start == r2->start && r1->end == r2->end); 1523 } 1524 1525 static int find_pos_and_ways(struct cxl_port *port, struct range *range, 1526 int *pos, int *ways) 1527 { 1528 struct cxl_switch_decoder *cxlsd; 1529 struct cxl_port *parent; 1530 struct device *dev; 1531 int rc = -ENXIO; 1532 1533 parent = next_port(port); 1534 if (!parent) 1535 return rc; 1536 1537 dev = device_find_child(&parent->dev, range, 1538 match_switch_decoder_by_range); 1539 if (!dev) { 1540 dev_err(port->uport_dev, 1541 "failed to find decoder mapping %#llx-%#llx\n", 1542 range->start, range->end); 1543 return rc; 1544 } 1545 cxlsd = to_cxl_switch_decoder(dev); 1546 *ways = cxlsd->cxld.interleave_ways; 1547 1548 for (int i = 0; i < *ways; i++) { 1549 if (cxlsd->target[i] == port->parent_dport) { 1550 *pos = i; 1551 rc = 0; 1552 break; 1553 } 1554 } 1555 put_device(dev); 1556 1557 return rc; 1558 } 1559 1560 /** 1561 * cxl_calc_interleave_pos() - calculate an endpoint position in a region 1562 * @cxled: endpoint decoder member of given region 1563 * 1564 * The endpoint position is calculated by traversing the topology from 1565 * the endpoint to the root decoder and iteratively applying this 1566 * calculation: 1567 * 1568 * position = position * parent_ways + parent_pos; 1569 * 1570 * ...where @position is inferred from switch and root decoder target lists. 1571 * 1572 * Return: position >= 0 on success 1573 * -ENXIO on failure 1574 */ 1575 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled) 1576 { 1577 struct cxl_port *iter, *port = cxled_to_port(cxled); 1578 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1579 struct range *range = &cxled->cxld.hpa_range; 1580 int parent_ways = 0, parent_pos = 0, pos = 0; 1581 int rc; 1582 1583 /* 1584 * Example: the expected interleave order of the 4-way region shown 1585 * below is: mem0, mem2, mem1, mem3 1586 * 1587 * root_port 1588 * / \ 1589 * host_bridge_0 host_bridge_1 1590 * | | | | 1591 * mem0 mem1 mem2 mem3 1592 * 1593 * In the example the calculator will iterate twice. The first iteration 1594 * uses the mem position in the host-bridge and the ways of the host- 1595 * bridge to generate the first, or local, position. The second 1596 * iteration uses the host-bridge position in the root_port and the ways 1597 * of the root_port to refine the position. 1598 * 1599 * A trace of the calculation per endpoint looks like this: 1600 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0 1601 * pos = 0 * 2 + 0 pos = 0 * 2 + 1 1602 * pos: 0 pos: 1 1603 * 1604 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1 1605 * pos = 1 * 2 + 0 pos = 1 * 2 + 1 1606 * pos: 2 pos = 3 1607 * 1608 * Note that while this example is simple, the method applies to more 1609 * complex topologies, including those with switches. 1610 */ 1611 1612 /* Iterate from endpoint to root_port refining the position */ 1613 for (iter = port; iter; iter = next_port(iter)) { 1614 if (is_cxl_root(iter)) 1615 break; 1616 1617 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways); 1618 if (rc) 1619 return rc; 1620 1621 pos = pos * parent_ways + parent_pos; 1622 } 1623 1624 dev_dbg(&cxlmd->dev, 1625 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", 1626 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), 1627 dev_name(&port->dev), range->start, range->end, pos); 1628 1629 return pos; 1630 } 1631 1632 static int cxl_region_sort_targets(struct cxl_region *cxlr) 1633 { 1634 struct cxl_region_params *p = &cxlr->params; 1635 int i, rc = 0; 1636 1637 for (i = 0; i < p->nr_targets; i++) { 1638 struct cxl_endpoint_decoder *cxled = p->targets[i]; 1639 1640 cxled->pos = cxl_calc_interleave_pos(cxled); 1641 /* 1642 * Record that sorting failed, but still continue to calc 1643 * cxled->pos so that follow-on code paths can reliably 1644 * do p->targets[cxled->pos] to self-reference their entry. 1645 */ 1646 if (cxled->pos < 0) 1647 rc = -ENXIO; 1648 } 1649 /* Keep the cxlr target list in interleave position order */ 1650 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), 1651 cmp_interleave_pos, NULL); 1652 1653 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); 1654 return rc; 1655 } 1656 1657 static int cxl_region_attach(struct cxl_region *cxlr, 1658 struct cxl_endpoint_decoder *cxled, int pos) 1659 { 1660 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1661 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1662 struct cxl_region_params *p = &cxlr->params; 1663 struct cxl_port *ep_port, *root_port; 1664 struct cxl_dport *dport; 1665 int rc = -ENXIO; 1666 1667 if (cxled->mode != cxlr->mode) { 1668 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", 1669 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); 1670 return -EINVAL; 1671 } 1672 1673 if (cxled->mode == CXL_DECODER_DEAD) { 1674 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); 1675 return -ENODEV; 1676 } 1677 1678 /* all full of members, or interleave config not established? */ 1679 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { 1680 dev_dbg(&cxlr->dev, "region already active\n"); 1681 return -EBUSY; 1682 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { 1683 dev_dbg(&cxlr->dev, "interleave config missing\n"); 1684 return -ENXIO; 1685 } 1686 1687 if (p->nr_targets >= p->interleave_ways) { 1688 dev_dbg(&cxlr->dev, "region already has %d endpoints\n", 1689 p->nr_targets); 1690 return -EINVAL; 1691 } 1692 1693 ep_port = cxled_to_port(cxled); 1694 root_port = cxlrd_to_port(cxlrd); 1695 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); 1696 if (!dport) { 1697 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", 1698 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1699 dev_name(cxlr->dev.parent)); 1700 return -ENXIO; 1701 } 1702 1703 if (cxled->cxld.target_type != cxlr->type) { 1704 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", 1705 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1706 cxled->cxld.target_type, cxlr->type); 1707 return -ENXIO; 1708 } 1709 1710 if (!cxled->dpa_res) { 1711 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", 1712 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); 1713 return -ENXIO; 1714 } 1715 1716 if (resource_size(cxled->dpa_res) * p->interleave_ways != 1717 resource_size(p->res)) { 1718 dev_dbg(&cxlr->dev, 1719 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", 1720 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1721 (u64)resource_size(cxled->dpa_res), p->interleave_ways, 1722 (u64)resource_size(p->res)); 1723 return -EINVAL; 1724 } 1725 1726 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1727 int i; 1728 1729 rc = cxl_region_attach_auto(cxlr, cxled, pos); 1730 if (rc) 1731 return rc; 1732 1733 /* await more targets to arrive... */ 1734 if (p->nr_targets < p->interleave_ways) 1735 return 0; 1736 1737 /* 1738 * All targets are here, which implies all PCI enumeration that 1739 * affects this region has been completed. Walk the topology to 1740 * sort the devices into their relative region decode position. 1741 */ 1742 rc = cxl_region_sort_targets(cxlr); 1743 if (rc) 1744 return rc; 1745 1746 for (i = 0; i < p->nr_targets; i++) { 1747 cxled = p->targets[i]; 1748 ep_port = cxled_to_port(cxled); 1749 dport = cxl_find_dport_by_dev(root_port, 1750 ep_port->host_bridge); 1751 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, 1752 dport, i); 1753 if (rc) 1754 return rc; 1755 } 1756 1757 rc = cxl_region_setup_targets(cxlr); 1758 if (rc) 1759 return rc; 1760 1761 /* 1762 * If target setup succeeds in the autodiscovery case 1763 * then the region is already committed. 1764 */ 1765 p->state = CXL_CONFIG_COMMIT; 1766 1767 return 0; 1768 } 1769 1770 rc = cxl_region_validate_position(cxlr, cxled, pos); 1771 if (rc) 1772 return rc; 1773 1774 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos); 1775 if (rc) 1776 return rc; 1777 1778 p->targets[pos] = cxled; 1779 cxled->pos = pos; 1780 p->nr_targets++; 1781 1782 if (p->nr_targets == p->interleave_ways) { 1783 rc = cxl_region_setup_targets(cxlr); 1784 if (rc) 1785 return rc; 1786 p->state = CXL_CONFIG_ACTIVE; 1787 } 1788 1789 cxled->cxld.interleave_ways = p->interleave_ways; 1790 cxled->cxld.interleave_granularity = p->interleave_granularity; 1791 cxled->cxld.hpa_range = (struct range) { 1792 .start = p->res->start, 1793 .end = p->res->end, 1794 }; 1795 1796 if (p->nr_targets != p->interleave_ways) 1797 return 0; 1798 1799 /* 1800 * Test the auto-discovery position calculator function 1801 * against this successfully created user-defined region. 1802 * A fail message here means that this interleave config 1803 * will fail when presented as CXL_REGION_F_AUTO. 1804 */ 1805 for (int i = 0; i < p->nr_targets; i++) { 1806 struct cxl_endpoint_decoder *cxled = p->targets[i]; 1807 int test_pos; 1808 1809 test_pos = cxl_calc_interleave_pos(cxled); 1810 dev_dbg(&cxled->cxld.dev, 1811 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", 1812 (test_pos == cxled->pos) ? "success" : "fail", 1813 test_pos, cxled->pos); 1814 } 1815 1816 return 0; 1817 } 1818 1819 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) 1820 { 1821 struct cxl_port *iter, *ep_port = cxled_to_port(cxled); 1822 struct cxl_region *cxlr = cxled->cxld.region; 1823 struct cxl_region_params *p; 1824 int rc = 0; 1825 1826 lockdep_assert_held_write(&cxl_region_rwsem); 1827 1828 if (!cxlr) 1829 return 0; 1830 1831 p = &cxlr->params; 1832 get_device(&cxlr->dev); 1833 1834 if (p->state > CXL_CONFIG_ACTIVE) { 1835 /* 1836 * TODO: tear down all impacted regions if a device is 1837 * removed out of order 1838 */ 1839 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 1840 if (rc) 1841 goto out; 1842 p->state = CXL_CONFIG_ACTIVE; 1843 } 1844 1845 for (iter = ep_port; !is_cxl_root(iter); 1846 iter = to_cxl_port(iter->dev.parent)) 1847 cxl_port_detach_region(iter, cxlr, cxled); 1848 1849 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || 1850 p->targets[cxled->pos] != cxled) { 1851 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1852 1853 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", 1854 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1855 cxled->pos); 1856 goto out; 1857 } 1858 1859 if (p->state == CXL_CONFIG_ACTIVE) { 1860 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 1861 cxl_region_teardown_targets(cxlr); 1862 } 1863 p->targets[cxled->pos] = NULL; 1864 p->nr_targets--; 1865 cxled->cxld.hpa_range = (struct range) { 1866 .start = 0, 1867 .end = -1, 1868 }; 1869 1870 /* notify the region driver that one of its targets has departed */ 1871 up_write(&cxl_region_rwsem); 1872 device_release_driver(&cxlr->dev); 1873 down_write(&cxl_region_rwsem); 1874 out: 1875 put_device(&cxlr->dev); 1876 return rc; 1877 } 1878 1879 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) 1880 { 1881 down_write(&cxl_region_rwsem); 1882 cxled->mode = CXL_DECODER_DEAD; 1883 cxl_region_detach(cxled); 1884 up_write(&cxl_region_rwsem); 1885 } 1886 1887 static int attach_target(struct cxl_region *cxlr, 1888 struct cxl_endpoint_decoder *cxled, int pos, 1889 unsigned int state) 1890 { 1891 int rc = 0; 1892 1893 if (state == TASK_INTERRUPTIBLE) 1894 rc = down_write_killable(&cxl_region_rwsem); 1895 else 1896 down_write(&cxl_region_rwsem); 1897 if (rc) 1898 return rc; 1899 1900 down_read(&cxl_dpa_rwsem); 1901 rc = cxl_region_attach(cxlr, cxled, pos); 1902 up_read(&cxl_dpa_rwsem); 1903 up_write(&cxl_region_rwsem); 1904 return rc; 1905 } 1906 1907 static int detach_target(struct cxl_region *cxlr, int pos) 1908 { 1909 struct cxl_region_params *p = &cxlr->params; 1910 int rc; 1911 1912 rc = down_write_killable(&cxl_region_rwsem); 1913 if (rc) 1914 return rc; 1915 1916 if (pos >= p->interleave_ways) { 1917 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1918 p->interleave_ways); 1919 rc = -ENXIO; 1920 goto out; 1921 } 1922 1923 if (!p->targets[pos]) { 1924 rc = 0; 1925 goto out; 1926 } 1927 1928 rc = cxl_region_detach(p->targets[pos]); 1929 out: 1930 up_write(&cxl_region_rwsem); 1931 return rc; 1932 } 1933 1934 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos, 1935 size_t len) 1936 { 1937 int rc; 1938 1939 if (sysfs_streq(buf, "\n")) 1940 rc = detach_target(cxlr, pos); 1941 else { 1942 struct device *dev; 1943 1944 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf); 1945 if (!dev) 1946 return -ENODEV; 1947 1948 if (!is_endpoint_decoder(dev)) { 1949 rc = -EINVAL; 1950 goto out; 1951 } 1952 1953 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos, 1954 TASK_INTERRUPTIBLE); 1955 out: 1956 put_device(dev); 1957 } 1958 1959 if (rc < 0) 1960 return rc; 1961 return len; 1962 } 1963 1964 #define TARGET_ATTR_RW(n) \ 1965 static ssize_t target##n##_show( \ 1966 struct device *dev, struct device_attribute *attr, char *buf) \ 1967 { \ 1968 return show_targetN(to_cxl_region(dev), buf, (n)); \ 1969 } \ 1970 static ssize_t target##n##_store(struct device *dev, \ 1971 struct device_attribute *attr, \ 1972 const char *buf, size_t len) \ 1973 { \ 1974 return store_targetN(to_cxl_region(dev), buf, (n), len); \ 1975 } \ 1976 static DEVICE_ATTR_RW(target##n) 1977 1978 TARGET_ATTR_RW(0); 1979 TARGET_ATTR_RW(1); 1980 TARGET_ATTR_RW(2); 1981 TARGET_ATTR_RW(3); 1982 TARGET_ATTR_RW(4); 1983 TARGET_ATTR_RW(5); 1984 TARGET_ATTR_RW(6); 1985 TARGET_ATTR_RW(7); 1986 TARGET_ATTR_RW(8); 1987 TARGET_ATTR_RW(9); 1988 TARGET_ATTR_RW(10); 1989 TARGET_ATTR_RW(11); 1990 TARGET_ATTR_RW(12); 1991 TARGET_ATTR_RW(13); 1992 TARGET_ATTR_RW(14); 1993 TARGET_ATTR_RW(15); 1994 1995 static struct attribute *target_attrs[] = { 1996 &dev_attr_target0.attr, 1997 &dev_attr_target1.attr, 1998 &dev_attr_target2.attr, 1999 &dev_attr_target3.attr, 2000 &dev_attr_target4.attr, 2001 &dev_attr_target5.attr, 2002 &dev_attr_target6.attr, 2003 &dev_attr_target7.attr, 2004 &dev_attr_target8.attr, 2005 &dev_attr_target9.attr, 2006 &dev_attr_target10.attr, 2007 &dev_attr_target11.attr, 2008 &dev_attr_target12.attr, 2009 &dev_attr_target13.attr, 2010 &dev_attr_target14.attr, 2011 &dev_attr_target15.attr, 2012 NULL, 2013 }; 2014 2015 static umode_t cxl_region_target_visible(struct kobject *kobj, 2016 struct attribute *a, int n) 2017 { 2018 struct device *dev = kobj_to_dev(kobj); 2019 struct cxl_region *cxlr = to_cxl_region(dev); 2020 struct cxl_region_params *p = &cxlr->params; 2021 2022 if (n < p->interleave_ways) 2023 return a->mode; 2024 return 0; 2025 } 2026 2027 static const struct attribute_group cxl_region_target_group = { 2028 .attrs = target_attrs, 2029 .is_visible = cxl_region_target_visible, 2030 }; 2031 2032 static const struct attribute_group *get_cxl_region_target_group(void) 2033 { 2034 return &cxl_region_target_group; 2035 } 2036 2037 static const struct attribute_group *region_groups[] = { 2038 &cxl_base_attribute_group, 2039 &cxl_region_group, 2040 &cxl_region_target_group, 2041 NULL, 2042 }; 2043 2044 static void cxl_region_release(struct device *dev) 2045 { 2046 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 2047 struct cxl_region *cxlr = to_cxl_region(dev); 2048 int id = atomic_read(&cxlrd->region_id); 2049 2050 /* 2051 * Try to reuse the recently idled id rather than the cached 2052 * next id to prevent the region id space from increasing 2053 * unnecessarily. 2054 */ 2055 if (cxlr->id < id) 2056 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { 2057 memregion_free(id); 2058 goto out; 2059 } 2060 2061 memregion_free(cxlr->id); 2062 out: 2063 put_device(dev->parent); 2064 kfree(cxlr); 2065 } 2066 2067 const struct device_type cxl_region_type = { 2068 .name = "cxl_region", 2069 .release = cxl_region_release, 2070 .groups = region_groups 2071 }; 2072 2073 bool is_cxl_region(struct device *dev) 2074 { 2075 return dev->type == &cxl_region_type; 2076 } 2077 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); 2078 2079 static struct cxl_region *to_cxl_region(struct device *dev) 2080 { 2081 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, 2082 "not a cxl_region device\n")) 2083 return NULL; 2084 2085 return container_of(dev, struct cxl_region, dev); 2086 } 2087 2088 static void unregister_region(void *dev) 2089 { 2090 struct cxl_region *cxlr = to_cxl_region(dev); 2091 struct cxl_region_params *p = &cxlr->params; 2092 int i; 2093 2094 device_del(dev); 2095 2096 /* 2097 * Now that region sysfs is shutdown, the parameter block is now 2098 * read-only, so no need to hold the region rwsem to access the 2099 * region parameters. 2100 */ 2101 for (i = 0; i < p->interleave_ways; i++) 2102 detach_target(cxlr, i); 2103 2104 cxl_region_iomem_release(cxlr); 2105 put_device(dev); 2106 } 2107 2108 static struct lock_class_key cxl_region_key; 2109 2110 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id) 2111 { 2112 struct cxl_region *cxlr; 2113 struct device *dev; 2114 2115 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL); 2116 if (!cxlr) { 2117 memregion_free(id); 2118 return ERR_PTR(-ENOMEM); 2119 } 2120 2121 dev = &cxlr->dev; 2122 device_initialize(dev); 2123 lockdep_set_class(&dev->mutex, &cxl_region_key); 2124 dev->parent = &cxlrd->cxlsd.cxld.dev; 2125 /* 2126 * Keep root decoder pinned through cxl_region_release to fixup 2127 * region id allocations 2128 */ 2129 get_device(dev->parent); 2130 device_set_pm_not_required(dev); 2131 dev->bus = &cxl_bus_type; 2132 dev->type = &cxl_region_type; 2133 cxlr->id = id; 2134 2135 return cxlr; 2136 } 2137 2138 /** 2139 * devm_cxl_add_region - Adds a region to a decoder 2140 * @cxlrd: root decoder 2141 * @id: memregion id to create, or memregion_free() on failure 2142 * @mode: mode for the endpoint decoders of this region 2143 * @type: select whether this is an expander or accelerator (type-2 or type-3) 2144 * 2145 * This is the second step of region initialization. Regions exist within an 2146 * address space which is mapped by a @cxlrd. 2147 * 2148 * Return: 0 if the region was added to the @cxlrd, else returns negative error 2149 * code. The region will be named "regionZ" where Z is the unique region number. 2150 */ 2151 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, 2152 int id, 2153 enum cxl_decoder_mode mode, 2154 enum cxl_decoder_type type) 2155 { 2156 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); 2157 struct cxl_region *cxlr; 2158 struct device *dev; 2159 int rc; 2160 2161 switch (mode) { 2162 case CXL_DECODER_RAM: 2163 case CXL_DECODER_PMEM: 2164 break; 2165 default: 2166 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); 2167 return ERR_PTR(-EINVAL); 2168 } 2169 2170 cxlr = cxl_region_alloc(cxlrd, id); 2171 if (IS_ERR(cxlr)) 2172 return cxlr; 2173 cxlr->mode = mode; 2174 cxlr->type = type; 2175 2176 dev = &cxlr->dev; 2177 rc = dev_set_name(dev, "region%d", id); 2178 if (rc) 2179 goto err; 2180 2181 rc = device_add(dev); 2182 if (rc) 2183 goto err; 2184 2185 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); 2186 if (rc) 2187 return ERR_PTR(rc); 2188 2189 dev_dbg(port->uport_dev, "%s: created %s\n", 2190 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); 2191 return cxlr; 2192 2193 err: 2194 put_device(dev); 2195 return ERR_PTR(rc); 2196 } 2197 2198 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf) 2199 { 2200 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); 2201 } 2202 2203 static ssize_t create_pmem_region_show(struct device *dev, 2204 struct device_attribute *attr, char *buf) 2205 { 2206 return __create_region_show(to_cxl_root_decoder(dev), buf); 2207 } 2208 2209 static ssize_t create_ram_region_show(struct device *dev, 2210 struct device_attribute *attr, char *buf) 2211 { 2212 return __create_region_show(to_cxl_root_decoder(dev), buf); 2213 } 2214 2215 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, 2216 enum cxl_decoder_mode mode, int id) 2217 { 2218 int rc; 2219 2220 rc = memregion_alloc(GFP_KERNEL); 2221 if (rc < 0) 2222 return ERR_PTR(rc); 2223 2224 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { 2225 memregion_free(rc); 2226 return ERR_PTR(-EBUSY); 2227 } 2228 2229 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2230 } 2231 2232 static ssize_t create_pmem_region_store(struct device *dev, 2233 struct device_attribute *attr, 2234 const char *buf, size_t len) 2235 { 2236 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2237 struct cxl_region *cxlr; 2238 int rc, id; 2239 2240 rc = sscanf(buf, "region%d\n", &id); 2241 if (rc != 1) 2242 return -EINVAL; 2243 2244 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); 2245 if (IS_ERR(cxlr)) 2246 return PTR_ERR(cxlr); 2247 2248 return len; 2249 } 2250 DEVICE_ATTR_RW(create_pmem_region); 2251 2252 static ssize_t create_ram_region_store(struct device *dev, 2253 struct device_attribute *attr, 2254 const char *buf, size_t len) 2255 { 2256 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2257 struct cxl_region *cxlr; 2258 int rc, id; 2259 2260 rc = sscanf(buf, "region%d\n", &id); 2261 if (rc != 1) 2262 return -EINVAL; 2263 2264 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); 2265 if (IS_ERR(cxlr)) 2266 return PTR_ERR(cxlr); 2267 2268 return len; 2269 } 2270 DEVICE_ATTR_RW(create_ram_region); 2271 2272 static ssize_t region_show(struct device *dev, struct device_attribute *attr, 2273 char *buf) 2274 { 2275 struct cxl_decoder *cxld = to_cxl_decoder(dev); 2276 ssize_t rc; 2277 2278 rc = down_read_interruptible(&cxl_region_rwsem); 2279 if (rc) 2280 return rc; 2281 2282 if (cxld->region) 2283 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); 2284 else 2285 rc = sysfs_emit(buf, "\n"); 2286 up_read(&cxl_region_rwsem); 2287 2288 return rc; 2289 } 2290 DEVICE_ATTR_RO(region); 2291 2292 static struct cxl_region * 2293 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name) 2294 { 2295 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 2296 struct device *region_dev; 2297 2298 region_dev = device_find_child_by_name(&cxld->dev, name); 2299 if (!region_dev) 2300 return ERR_PTR(-ENODEV); 2301 2302 return to_cxl_region(region_dev); 2303 } 2304 2305 static ssize_t delete_region_store(struct device *dev, 2306 struct device_attribute *attr, 2307 const char *buf, size_t len) 2308 { 2309 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2310 struct cxl_port *port = to_cxl_port(dev->parent); 2311 struct cxl_region *cxlr; 2312 2313 cxlr = cxl_find_region_by_name(cxlrd, buf); 2314 if (IS_ERR(cxlr)) 2315 return PTR_ERR(cxlr); 2316 2317 devm_release_action(port->uport_dev, unregister_region, cxlr); 2318 put_device(&cxlr->dev); 2319 2320 return len; 2321 } 2322 DEVICE_ATTR_WO(delete_region); 2323 2324 static void cxl_pmem_region_release(struct device *dev) 2325 { 2326 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); 2327 int i; 2328 2329 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 2330 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; 2331 2332 put_device(&cxlmd->dev); 2333 } 2334 2335 kfree(cxlr_pmem); 2336 } 2337 2338 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = { 2339 &cxl_base_attribute_group, 2340 NULL, 2341 }; 2342 2343 const struct device_type cxl_pmem_region_type = { 2344 .name = "cxl_pmem_region", 2345 .release = cxl_pmem_region_release, 2346 .groups = cxl_pmem_region_attribute_groups, 2347 }; 2348 2349 bool is_cxl_pmem_region(struct device *dev) 2350 { 2351 return dev->type == &cxl_pmem_region_type; 2352 } 2353 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL); 2354 2355 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) 2356 { 2357 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev), 2358 "not a cxl_pmem_region device\n")) 2359 return NULL; 2360 return container_of(dev, struct cxl_pmem_region, dev); 2361 } 2362 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); 2363 2364 struct cxl_poison_context { 2365 struct cxl_port *port; 2366 enum cxl_decoder_mode mode; 2367 u64 offset; 2368 }; 2369 2370 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd, 2371 struct cxl_poison_context *ctx) 2372 { 2373 struct cxl_dev_state *cxlds = cxlmd->cxlds; 2374 u64 offset, length; 2375 int rc = 0; 2376 2377 /* 2378 * Collect poison for the remaining unmapped resources 2379 * after poison is collected by committed endpoints. 2380 * 2381 * Knowing that PMEM must always follow RAM, get poison 2382 * for unmapped resources based on the last decoder's mode: 2383 * ram: scan remains of ram range, then any pmem range 2384 * pmem: scan remains of pmem range 2385 */ 2386 2387 if (ctx->mode == CXL_DECODER_RAM) { 2388 offset = ctx->offset; 2389 length = resource_size(&cxlds->ram_res) - offset; 2390 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2391 if (rc == -EFAULT) 2392 rc = 0; 2393 if (rc) 2394 return rc; 2395 } 2396 if (ctx->mode == CXL_DECODER_PMEM) { 2397 offset = ctx->offset; 2398 length = resource_size(&cxlds->dpa_res) - offset; 2399 if (!length) 2400 return 0; 2401 } else if (resource_size(&cxlds->pmem_res)) { 2402 offset = cxlds->pmem_res.start; 2403 length = resource_size(&cxlds->pmem_res); 2404 } else { 2405 return 0; 2406 } 2407 2408 return cxl_mem_get_poison(cxlmd, offset, length, NULL); 2409 } 2410 2411 static int poison_by_decoder(struct device *dev, void *arg) 2412 { 2413 struct cxl_poison_context *ctx = arg; 2414 struct cxl_endpoint_decoder *cxled; 2415 struct cxl_memdev *cxlmd; 2416 u64 offset, length; 2417 int rc = 0; 2418 2419 if (!is_endpoint_decoder(dev)) 2420 return rc; 2421 2422 cxled = to_cxl_endpoint_decoder(dev); 2423 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) 2424 return rc; 2425 2426 /* 2427 * Regions are only created with single mode decoders: pmem or ram. 2428 * Linux does not support mixed mode decoders. This means that 2429 * reading poison per endpoint decoder adheres to the requirement 2430 * that poison reads of pmem and ram must be separated. 2431 * CXL 3.0 Spec 8.2.9.8.4.1 2432 */ 2433 if (cxled->mode == CXL_DECODER_MIXED) { 2434 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); 2435 return rc; 2436 } 2437 2438 cxlmd = cxled_to_memdev(cxled); 2439 if (cxled->skip) { 2440 offset = cxled->dpa_res->start - cxled->skip; 2441 length = cxled->skip; 2442 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2443 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2444 rc = 0; 2445 if (rc) 2446 return rc; 2447 } 2448 2449 offset = cxled->dpa_res->start; 2450 length = cxled->dpa_res->end - offset + 1; 2451 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); 2452 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2453 rc = 0; 2454 if (rc) 2455 return rc; 2456 2457 /* Iterate until commit_end is reached */ 2458 if (cxled->cxld.id == ctx->port->commit_end) { 2459 ctx->offset = cxled->dpa_res->end + 1; 2460 ctx->mode = cxled->mode; 2461 return 1; 2462 } 2463 2464 return 0; 2465 } 2466 2467 int cxl_get_poison_by_endpoint(struct cxl_port *port) 2468 { 2469 struct cxl_poison_context ctx; 2470 int rc = 0; 2471 2472 ctx = (struct cxl_poison_context) { 2473 .port = port 2474 }; 2475 2476 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); 2477 if (rc == 1) 2478 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), 2479 &ctx); 2480 2481 return rc; 2482 } 2483 2484 static struct lock_class_key cxl_pmem_region_key; 2485 2486 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) 2487 { 2488 struct cxl_region_params *p = &cxlr->params; 2489 struct cxl_nvdimm_bridge *cxl_nvb; 2490 struct cxl_pmem_region *cxlr_pmem; 2491 struct device *dev; 2492 int i; 2493 2494 down_read(&cxl_region_rwsem); 2495 if (p->state != CXL_CONFIG_COMMIT) { 2496 cxlr_pmem = ERR_PTR(-ENXIO); 2497 goto out; 2498 } 2499 2500 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 2501 GFP_KERNEL); 2502 if (!cxlr_pmem) { 2503 cxlr_pmem = ERR_PTR(-ENOMEM); 2504 goto out; 2505 } 2506 2507 cxlr_pmem->hpa_range.start = p->res->start; 2508 cxlr_pmem->hpa_range.end = p->res->end; 2509 2510 /* Snapshot the region configuration underneath the cxl_region_rwsem */ 2511 cxlr_pmem->nr_mappings = p->nr_targets; 2512 for (i = 0; i < p->nr_targets; i++) { 2513 struct cxl_endpoint_decoder *cxled = p->targets[i]; 2514 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2515 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 2516 2517 /* 2518 * Regions never span CXL root devices, so by definition the 2519 * bridge for one device is the same for all. 2520 */ 2521 if (i == 0) { 2522 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); 2523 if (!cxl_nvb) { 2524 cxlr_pmem = ERR_PTR(-ENODEV); 2525 goto out; 2526 } 2527 cxlr->cxl_nvb = cxl_nvb; 2528 } 2529 m->cxlmd = cxlmd; 2530 get_device(&cxlmd->dev); 2531 m->start = cxled->dpa_res->start; 2532 m->size = resource_size(cxled->dpa_res); 2533 m->position = i; 2534 } 2535 2536 dev = &cxlr_pmem->dev; 2537 cxlr_pmem->cxlr = cxlr; 2538 cxlr->cxlr_pmem = cxlr_pmem; 2539 device_initialize(dev); 2540 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); 2541 device_set_pm_not_required(dev); 2542 dev->parent = &cxlr->dev; 2543 dev->bus = &cxl_bus_type; 2544 dev->type = &cxl_pmem_region_type; 2545 out: 2546 up_read(&cxl_region_rwsem); 2547 2548 return cxlr_pmem; 2549 } 2550 2551 static void cxl_dax_region_release(struct device *dev) 2552 { 2553 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev); 2554 2555 kfree(cxlr_dax); 2556 } 2557 2558 static const struct attribute_group *cxl_dax_region_attribute_groups[] = { 2559 &cxl_base_attribute_group, 2560 NULL, 2561 }; 2562 2563 const struct device_type cxl_dax_region_type = { 2564 .name = "cxl_dax_region", 2565 .release = cxl_dax_region_release, 2566 .groups = cxl_dax_region_attribute_groups, 2567 }; 2568 2569 static bool is_cxl_dax_region(struct device *dev) 2570 { 2571 return dev->type == &cxl_dax_region_type; 2572 } 2573 2574 struct cxl_dax_region *to_cxl_dax_region(struct device *dev) 2575 { 2576 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev), 2577 "not a cxl_dax_region device\n")) 2578 return NULL; 2579 return container_of(dev, struct cxl_dax_region, dev); 2580 } 2581 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL); 2582 2583 static struct lock_class_key cxl_dax_region_key; 2584 2585 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) 2586 { 2587 struct cxl_region_params *p = &cxlr->params; 2588 struct cxl_dax_region *cxlr_dax; 2589 struct device *dev; 2590 2591 down_read(&cxl_region_rwsem); 2592 if (p->state != CXL_CONFIG_COMMIT) { 2593 cxlr_dax = ERR_PTR(-ENXIO); 2594 goto out; 2595 } 2596 2597 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL); 2598 if (!cxlr_dax) { 2599 cxlr_dax = ERR_PTR(-ENOMEM); 2600 goto out; 2601 } 2602 2603 cxlr_dax->hpa_range.start = p->res->start; 2604 cxlr_dax->hpa_range.end = p->res->end; 2605 2606 dev = &cxlr_dax->dev; 2607 cxlr_dax->cxlr = cxlr; 2608 device_initialize(dev); 2609 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); 2610 device_set_pm_not_required(dev); 2611 dev->parent = &cxlr->dev; 2612 dev->bus = &cxl_bus_type; 2613 dev->type = &cxl_dax_region_type; 2614 out: 2615 up_read(&cxl_region_rwsem); 2616 2617 return cxlr_dax; 2618 } 2619 2620 static void cxlr_pmem_unregister(void *_cxlr_pmem) 2621 { 2622 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem; 2623 struct cxl_region *cxlr = cxlr_pmem->cxlr; 2624 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2625 2626 /* 2627 * Either the bridge is in ->remove() context under the device_lock(), 2628 * or cxlr_release_nvdimm() is cancelling the bridge's release action 2629 * for @cxlr_pmem and doing it itself (while manually holding the bridge 2630 * lock). 2631 */ 2632 device_lock_assert(&cxl_nvb->dev); 2633 cxlr->cxlr_pmem = NULL; 2634 cxlr_pmem->cxlr = NULL; 2635 device_unregister(&cxlr_pmem->dev); 2636 } 2637 2638 static void cxlr_release_nvdimm(void *_cxlr) 2639 { 2640 struct cxl_region *cxlr = _cxlr; 2641 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2642 2643 device_lock(&cxl_nvb->dev); 2644 if (cxlr->cxlr_pmem) 2645 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, 2646 cxlr->cxlr_pmem); 2647 device_unlock(&cxl_nvb->dev); 2648 cxlr->cxl_nvb = NULL; 2649 put_device(&cxl_nvb->dev); 2650 } 2651 2652 /** 2653 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge 2654 * @cxlr: parent CXL region for this pmem region bridge device 2655 * 2656 * Return: 0 on success negative error code on failure. 2657 */ 2658 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) 2659 { 2660 struct cxl_pmem_region *cxlr_pmem; 2661 struct cxl_nvdimm_bridge *cxl_nvb; 2662 struct device *dev; 2663 int rc; 2664 2665 cxlr_pmem = cxl_pmem_region_alloc(cxlr); 2666 if (IS_ERR(cxlr_pmem)) 2667 return PTR_ERR(cxlr_pmem); 2668 cxl_nvb = cxlr->cxl_nvb; 2669 2670 dev = &cxlr_pmem->dev; 2671 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); 2672 if (rc) 2673 goto err; 2674 2675 rc = device_add(dev); 2676 if (rc) 2677 goto err; 2678 2679 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2680 dev_name(dev)); 2681 2682 device_lock(&cxl_nvb->dev); 2683 if (cxl_nvb->dev.driver) 2684 rc = devm_add_action_or_reset(&cxl_nvb->dev, 2685 cxlr_pmem_unregister, cxlr_pmem); 2686 else 2687 rc = -ENXIO; 2688 device_unlock(&cxl_nvb->dev); 2689 2690 if (rc) 2691 goto err_bridge; 2692 2693 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */ 2694 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); 2695 2696 err: 2697 put_device(dev); 2698 err_bridge: 2699 put_device(&cxl_nvb->dev); 2700 cxlr->cxl_nvb = NULL; 2701 return rc; 2702 } 2703 2704 static void cxlr_dax_unregister(void *_cxlr_dax) 2705 { 2706 struct cxl_dax_region *cxlr_dax = _cxlr_dax; 2707 2708 device_unregister(&cxlr_dax->dev); 2709 } 2710 2711 static int devm_cxl_add_dax_region(struct cxl_region *cxlr) 2712 { 2713 struct cxl_dax_region *cxlr_dax; 2714 struct device *dev; 2715 int rc; 2716 2717 cxlr_dax = cxl_dax_region_alloc(cxlr); 2718 if (IS_ERR(cxlr_dax)) 2719 return PTR_ERR(cxlr_dax); 2720 2721 dev = &cxlr_dax->dev; 2722 rc = dev_set_name(dev, "dax_region%d", cxlr->id); 2723 if (rc) 2724 goto err; 2725 2726 rc = device_add(dev); 2727 if (rc) 2728 goto err; 2729 2730 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2731 dev_name(dev)); 2732 2733 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, 2734 cxlr_dax); 2735 err: 2736 put_device(dev); 2737 return rc; 2738 } 2739 2740 static int match_root_decoder_by_range(struct device *dev, void *data) 2741 { 2742 struct range *r1, *r2 = data; 2743 struct cxl_root_decoder *cxlrd; 2744 2745 if (!is_root_decoder(dev)) 2746 return 0; 2747 2748 cxlrd = to_cxl_root_decoder(dev); 2749 r1 = &cxlrd->cxlsd.cxld.hpa_range; 2750 return range_contains(r1, r2); 2751 } 2752 2753 static int match_region_by_range(struct device *dev, void *data) 2754 { 2755 struct cxl_region_params *p; 2756 struct cxl_region *cxlr; 2757 struct range *r = data; 2758 int rc = 0; 2759 2760 if (!is_cxl_region(dev)) 2761 return 0; 2762 2763 cxlr = to_cxl_region(dev); 2764 p = &cxlr->params; 2765 2766 down_read(&cxl_region_rwsem); 2767 if (p->res && p->res->start == r->start && p->res->end == r->end) 2768 rc = 1; 2769 up_read(&cxl_region_rwsem); 2770 2771 return rc; 2772 } 2773 2774 /* Establish an empty region covering the given HPA range */ 2775 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, 2776 struct cxl_endpoint_decoder *cxled) 2777 { 2778 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2779 struct cxl_port *port = cxlrd_to_port(cxlrd); 2780 struct range *hpa = &cxled->cxld.hpa_range; 2781 struct cxl_region_params *p; 2782 struct cxl_region *cxlr; 2783 struct resource *res; 2784 int rc; 2785 2786 do { 2787 cxlr = __create_region(cxlrd, cxled->mode, 2788 atomic_read(&cxlrd->region_id)); 2789 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); 2790 2791 if (IS_ERR(cxlr)) { 2792 dev_err(cxlmd->dev.parent, 2793 "%s:%s: %s failed assign region: %ld\n", 2794 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2795 __func__, PTR_ERR(cxlr)); 2796 return cxlr; 2797 } 2798 2799 down_write(&cxl_region_rwsem); 2800 p = &cxlr->params; 2801 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 2802 dev_err(cxlmd->dev.parent, 2803 "%s:%s: %s autodiscovery interrupted\n", 2804 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2805 __func__); 2806 rc = -EBUSY; 2807 goto err; 2808 } 2809 2810 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); 2811 2812 res = kmalloc(sizeof(*res), GFP_KERNEL); 2813 if (!res) { 2814 rc = -ENOMEM; 2815 goto err; 2816 } 2817 2818 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), 2819 dev_name(&cxlr->dev)); 2820 rc = insert_resource(cxlrd->res, res); 2821 if (rc) { 2822 /* 2823 * Platform-firmware may not have split resources like "System 2824 * RAM" on CXL window boundaries see cxl_region_iomem_release() 2825 */ 2826 dev_warn(cxlmd->dev.parent, 2827 "%s:%s: %s %s cannot insert resource\n", 2828 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2829 __func__, dev_name(&cxlr->dev)); 2830 } 2831 2832 p->res = res; 2833 p->interleave_ways = cxled->cxld.interleave_ways; 2834 p->interleave_granularity = cxled->cxld.interleave_granularity; 2835 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 2836 2837 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 2838 if (rc) 2839 goto err; 2840 2841 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", 2842 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, 2843 dev_name(&cxlr->dev), p->res, p->interleave_ways, 2844 p->interleave_granularity); 2845 2846 /* ...to match put_device() in cxl_add_to_region() */ 2847 get_device(&cxlr->dev); 2848 up_write(&cxl_region_rwsem); 2849 2850 return cxlr; 2851 2852 err: 2853 up_write(&cxl_region_rwsem); 2854 devm_release_action(port->uport_dev, unregister_region, cxlr); 2855 return ERR_PTR(rc); 2856 } 2857 2858 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) 2859 { 2860 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2861 struct range *hpa = &cxled->cxld.hpa_range; 2862 struct cxl_decoder *cxld = &cxled->cxld; 2863 struct device *cxlrd_dev, *region_dev; 2864 struct cxl_root_decoder *cxlrd; 2865 struct cxl_region_params *p; 2866 struct cxl_region *cxlr; 2867 bool attach = false; 2868 int rc; 2869 2870 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, 2871 match_root_decoder_by_range); 2872 if (!cxlrd_dev) { 2873 dev_err(cxlmd->dev.parent, 2874 "%s:%s no CXL window for range %#llx:%#llx\n", 2875 dev_name(&cxlmd->dev), dev_name(&cxld->dev), 2876 cxld->hpa_range.start, cxld->hpa_range.end); 2877 return -ENXIO; 2878 } 2879 2880 cxlrd = to_cxl_root_decoder(cxlrd_dev); 2881 2882 /* 2883 * Ensure that if multiple threads race to construct_region() for @hpa 2884 * one does the construction and the others add to that. 2885 */ 2886 mutex_lock(&cxlrd->range_lock); 2887 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, 2888 match_region_by_range); 2889 if (!region_dev) { 2890 cxlr = construct_region(cxlrd, cxled); 2891 region_dev = &cxlr->dev; 2892 } else 2893 cxlr = to_cxl_region(region_dev); 2894 mutex_unlock(&cxlrd->range_lock); 2895 2896 rc = PTR_ERR_OR_ZERO(cxlr); 2897 if (rc) 2898 goto out; 2899 2900 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); 2901 2902 down_read(&cxl_region_rwsem); 2903 p = &cxlr->params; 2904 attach = p->state == CXL_CONFIG_COMMIT; 2905 up_read(&cxl_region_rwsem); 2906 2907 if (attach) { 2908 /* 2909 * If device_attach() fails the range may still be active via 2910 * the platform-firmware memory map, otherwise the driver for 2911 * regions is local to this file, so driver matching can't fail. 2912 */ 2913 if (device_attach(&cxlr->dev) < 0) 2914 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", 2915 p->res); 2916 } 2917 2918 put_device(region_dev); 2919 out: 2920 put_device(cxlrd_dev); 2921 return rc; 2922 } 2923 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL); 2924 2925 static int is_system_ram(struct resource *res, void *arg) 2926 { 2927 struct cxl_region *cxlr = arg; 2928 struct cxl_region_params *p = &cxlr->params; 2929 2930 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); 2931 return 1; 2932 } 2933 2934 static int cxl_region_probe(struct device *dev) 2935 { 2936 struct cxl_region *cxlr = to_cxl_region(dev); 2937 struct cxl_region_params *p = &cxlr->params; 2938 int rc; 2939 2940 rc = down_read_interruptible(&cxl_region_rwsem); 2941 if (rc) { 2942 dev_dbg(&cxlr->dev, "probe interrupted\n"); 2943 return rc; 2944 } 2945 2946 if (p->state < CXL_CONFIG_COMMIT) { 2947 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); 2948 rc = -ENXIO; 2949 goto out; 2950 } 2951 2952 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { 2953 dev_err(&cxlr->dev, 2954 "failed to activate, re-commit region and retry\n"); 2955 rc = -ENXIO; 2956 goto out; 2957 } 2958 2959 /* 2960 * From this point on any path that changes the region's state away from 2961 * CXL_CONFIG_COMMIT is also responsible for releasing the driver. 2962 */ 2963 out: 2964 up_read(&cxl_region_rwsem); 2965 2966 if (rc) 2967 return rc; 2968 2969 switch (cxlr->mode) { 2970 case CXL_DECODER_PMEM: 2971 return devm_cxl_add_pmem_region(cxlr); 2972 case CXL_DECODER_RAM: 2973 /* 2974 * The region can not be manged by CXL if any portion of 2975 * it is already online as 'System RAM' 2976 */ 2977 if (walk_iomem_res_desc(IORES_DESC_NONE, 2978 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, 2979 p->res->start, p->res->end, cxlr, 2980 is_system_ram) > 0) 2981 return 0; 2982 return devm_cxl_add_dax_region(cxlr); 2983 default: 2984 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", 2985 cxlr->mode); 2986 return -ENXIO; 2987 } 2988 } 2989 2990 static struct cxl_driver cxl_region_driver = { 2991 .name = "cxl_region", 2992 .probe = cxl_region_probe, 2993 .id = CXL_DEVICE_REGION, 2994 }; 2995 2996 int cxl_region_init(void) 2997 { 2998 return cxl_driver_register(&cxl_region_driver); 2999 } 3000 3001 void cxl_region_exit(void) 3002 { 3003 cxl_driver_unregister(&cxl_region_driver); 3004 } 3005 3006 MODULE_IMPORT_NS(CXL); 3007 MODULE_IMPORT_NS(DEVMEM); 3008 MODULE_ALIAS_CXL(CXL_DEVICE_REGION); 3009