1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/memregion.h> 4 #include <linux/genalloc.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/uuid.h> 9 #include <linux/sort.h> 10 #include <linux/idr.h> 11 #include <cxlmem.h> 12 #include <cxl.h> 13 #include "core.h" 14 15 /** 16 * DOC: cxl core region 17 * 18 * CXL Regions represent mapped memory capacity in system physical address 19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL 20 * Memory ranges, Regions represent the active mapped capacity by the HDM 21 * Decoder Capability structures throughout the Host Bridges, Switches, and 22 * Endpoints in the topology. 23 * 24 * Region configuration has ordering constraints. UUID may be set at any time 25 * but is only visible for persistent regions. 26 * 1. Interleave granularity 27 * 2. Interleave size 28 * 3. Decoder targets 29 */ 30 31 static struct cxl_region *to_cxl_region(struct device *dev); 32 33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 34 char *buf) 35 { 36 struct cxl_region *cxlr = to_cxl_region(dev); 37 struct cxl_region_params *p = &cxlr->params; 38 ssize_t rc; 39 40 rc = down_read_interruptible(&cxl_region_rwsem); 41 if (rc) 42 return rc; 43 if (cxlr->mode != CXL_DECODER_PMEM) 44 rc = sysfs_emit(buf, "\n"); 45 else 46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); 47 up_read(&cxl_region_rwsem); 48 49 return rc; 50 } 51 52 static int is_dup(struct device *match, void *data) 53 { 54 struct cxl_region_params *p; 55 struct cxl_region *cxlr; 56 uuid_t *uuid = data; 57 58 if (!is_cxl_region(match)) 59 return 0; 60 61 lockdep_assert_held(&cxl_region_rwsem); 62 cxlr = to_cxl_region(match); 63 p = &cxlr->params; 64 65 if (uuid_equal(&p->uuid, uuid)) { 66 dev_dbg(match, "already has uuid: %pUb\n", uuid); 67 return -EBUSY; 68 } 69 70 return 0; 71 } 72 73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, 74 const char *buf, size_t len) 75 { 76 struct cxl_region *cxlr = to_cxl_region(dev); 77 struct cxl_region_params *p = &cxlr->params; 78 uuid_t temp; 79 ssize_t rc; 80 81 if (len != UUID_STRING_LEN + 1) 82 return -EINVAL; 83 84 rc = uuid_parse(buf, &temp); 85 if (rc) 86 return rc; 87 88 if (uuid_is_null(&temp)) 89 return -EINVAL; 90 91 rc = down_write_killable(&cxl_region_rwsem); 92 if (rc) 93 return rc; 94 95 if (uuid_equal(&p->uuid, &temp)) 96 goto out; 97 98 rc = -EBUSY; 99 if (p->state >= CXL_CONFIG_ACTIVE) 100 goto out; 101 102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); 103 if (rc < 0) 104 goto out; 105 106 uuid_copy(&p->uuid, &temp); 107 out: 108 up_write(&cxl_region_rwsem); 109 110 if (rc) 111 return rc; 112 return len; 113 } 114 static DEVICE_ATTR_RW(uuid); 115 116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, 117 struct cxl_region *cxlr) 118 { 119 return xa_load(&port->regions, (unsigned long)cxlr); 120 } 121 122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) 123 { 124 if (!cpu_cache_has_invalidate_memregion()) { 125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { 126 dev_warn_once( 127 &cxlr->dev, 128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); 129 return 0; 130 } else { 131 dev_err(&cxlr->dev, 132 "Failed to synchronize CPU cache state\n"); 133 return -ENXIO; 134 } 135 } 136 137 cpu_cache_invalidate_memregion(IORES_DESC_CXL); 138 return 0; 139 } 140 141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) 142 { 143 struct cxl_region_params *p = &cxlr->params; 144 int i, rc = 0; 145 146 /* 147 * Before region teardown attempt to flush, and if the flush 148 * fails cancel the region teardown for data consistency 149 * concerns 150 */ 151 rc = cxl_region_invalidate_memregion(cxlr); 152 if (rc) 153 return rc; 154 155 for (i = count - 1; i >= 0; i--) { 156 struct cxl_endpoint_decoder *cxled = p->targets[i]; 157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 158 struct cxl_port *iter = cxled_to_port(cxled); 159 struct cxl_dev_state *cxlds = cxlmd->cxlds; 160 struct cxl_ep *ep; 161 162 if (cxlds->rcd) 163 goto endpoint_reset; 164 165 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 166 iter = to_cxl_port(iter->dev.parent); 167 168 for (ep = cxl_ep_load(iter, cxlmd); iter; 169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 170 struct cxl_region_ref *cxl_rr; 171 struct cxl_decoder *cxld; 172 173 cxl_rr = cxl_rr_load(iter, cxlr); 174 cxld = cxl_rr->decoder; 175 if (cxld->reset) 176 rc = cxld->reset(cxld); 177 if (rc) 178 return rc; 179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 180 } 181 182 endpoint_reset: 183 rc = cxled->cxld.reset(&cxled->cxld); 184 if (rc) 185 return rc; 186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 187 } 188 189 /* all decoders associated with this region have been torn down */ 190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 191 192 return 0; 193 } 194 195 static int commit_decoder(struct cxl_decoder *cxld) 196 { 197 struct cxl_switch_decoder *cxlsd = NULL; 198 199 if (cxld->commit) 200 return cxld->commit(cxld); 201 202 if (is_switch_decoder(&cxld->dev)) 203 cxlsd = to_cxl_switch_decoder(&cxld->dev); 204 205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, 206 "->commit() is required\n")) 207 return -ENXIO; 208 return 0; 209 } 210 211 static int cxl_region_decode_commit(struct cxl_region *cxlr) 212 { 213 struct cxl_region_params *p = &cxlr->params; 214 int i, rc = 0; 215 216 for (i = 0; i < p->nr_targets; i++) { 217 struct cxl_endpoint_decoder *cxled = p->targets[i]; 218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 219 struct cxl_region_ref *cxl_rr; 220 struct cxl_decoder *cxld; 221 struct cxl_port *iter; 222 struct cxl_ep *ep; 223 224 /* commit bottom up */ 225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 226 iter = to_cxl_port(iter->dev.parent)) { 227 cxl_rr = cxl_rr_load(iter, cxlr); 228 cxld = cxl_rr->decoder; 229 rc = commit_decoder(cxld); 230 if (rc) 231 break; 232 } 233 234 if (rc) { 235 /* programming @iter failed, teardown */ 236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter; 237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 238 cxl_rr = cxl_rr_load(iter, cxlr); 239 cxld = cxl_rr->decoder; 240 if (cxld->reset) 241 cxld->reset(cxld); 242 } 243 244 cxled->cxld.reset(&cxled->cxld); 245 goto err; 246 } 247 } 248 249 return 0; 250 251 err: 252 /* undo the targets that were successfully committed */ 253 cxl_region_decode_reset(cxlr, i); 254 return rc; 255 } 256 257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr, 258 const char *buf, size_t len) 259 { 260 struct cxl_region *cxlr = to_cxl_region(dev); 261 struct cxl_region_params *p = &cxlr->params; 262 bool commit; 263 ssize_t rc; 264 265 rc = kstrtobool(buf, &commit); 266 if (rc) 267 return rc; 268 269 rc = down_write_killable(&cxl_region_rwsem); 270 if (rc) 271 return rc; 272 273 /* Already in the requested state? */ 274 if (commit && p->state >= CXL_CONFIG_COMMIT) 275 goto out; 276 if (!commit && p->state < CXL_CONFIG_COMMIT) 277 goto out; 278 279 /* Not ready to commit? */ 280 if (commit && p->state < CXL_CONFIG_ACTIVE) { 281 rc = -ENXIO; 282 goto out; 283 } 284 285 /* 286 * Invalidate caches before region setup to drop any speculative 287 * consumption of this address space 288 */ 289 rc = cxl_region_invalidate_memregion(cxlr); 290 if (rc) 291 return rc; 292 293 if (commit) { 294 rc = cxl_region_decode_commit(cxlr); 295 if (rc == 0) 296 p->state = CXL_CONFIG_COMMIT; 297 } else { 298 p->state = CXL_CONFIG_RESET_PENDING; 299 up_write(&cxl_region_rwsem); 300 device_release_driver(&cxlr->dev); 301 down_write(&cxl_region_rwsem); 302 303 /* 304 * The lock was dropped, so need to revalidate that the reset is 305 * still pending. 306 */ 307 if (p->state == CXL_CONFIG_RESET_PENDING) { 308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 309 /* 310 * Revert to committed since there may still be active 311 * decoders associated with this region, or move forward 312 * to active to mark the reset successful 313 */ 314 if (rc) 315 p->state = CXL_CONFIG_COMMIT; 316 else 317 p->state = CXL_CONFIG_ACTIVE; 318 } 319 } 320 321 out: 322 up_write(&cxl_region_rwsem); 323 324 if (rc) 325 return rc; 326 return len; 327 } 328 329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr, 330 char *buf) 331 { 332 struct cxl_region *cxlr = to_cxl_region(dev); 333 struct cxl_region_params *p = &cxlr->params; 334 ssize_t rc; 335 336 rc = down_read_interruptible(&cxl_region_rwsem); 337 if (rc) 338 return rc; 339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); 340 up_read(&cxl_region_rwsem); 341 342 return rc; 343 } 344 static DEVICE_ATTR_RW(commit); 345 346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, 347 int n) 348 { 349 struct device *dev = kobj_to_dev(kobj); 350 struct cxl_region *cxlr = to_cxl_region(dev); 351 352 /* 353 * Support tooling that expects to find a 'uuid' attribute for all 354 * regions regardless of mode. 355 */ 356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) 357 return 0444; 358 return a->mode; 359 } 360 361 static ssize_t interleave_ways_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct cxl_region *cxlr = to_cxl_region(dev); 365 struct cxl_region_params *p = &cxlr->params; 366 ssize_t rc; 367 368 rc = down_read_interruptible(&cxl_region_rwsem); 369 if (rc) 370 return rc; 371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); 372 up_read(&cxl_region_rwsem); 373 374 return rc; 375 } 376 377 static const struct attribute_group *get_cxl_region_target_group(void); 378 379 static ssize_t interleave_ways_store(struct device *dev, 380 struct device_attribute *attr, 381 const char *buf, size_t len) 382 { 383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 385 struct cxl_region *cxlr = to_cxl_region(dev); 386 struct cxl_region_params *p = &cxlr->params; 387 unsigned int val, save; 388 int rc; 389 u8 iw; 390 391 rc = kstrtouint(buf, 0, &val); 392 if (rc) 393 return rc; 394 395 rc = ways_to_eiw(val, &iw); 396 if (rc) 397 return rc; 398 399 /* 400 * Even for x3, x9, and x12 interleaves the region interleave must be a 401 * power of 2 multiple of the host bridge interleave. 402 */ 403 if (!is_power_of_2(val / cxld->interleave_ways) || 404 (val % cxld->interleave_ways)) { 405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); 406 return -EINVAL; 407 } 408 409 rc = down_write_killable(&cxl_region_rwsem); 410 if (rc) 411 return rc; 412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 413 rc = -EBUSY; 414 goto out; 415 } 416 417 save = p->interleave_ways; 418 p->interleave_ways = val; 419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 420 if (rc) 421 p->interleave_ways = save; 422 out: 423 up_write(&cxl_region_rwsem); 424 if (rc) 425 return rc; 426 return len; 427 } 428 static DEVICE_ATTR_RW(interleave_ways); 429 430 static ssize_t interleave_granularity_show(struct device *dev, 431 struct device_attribute *attr, 432 char *buf) 433 { 434 struct cxl_region *cxlr = to_cxl_region(dev); 435 struct cxl_region_params *p = &cxlr->params; 436 ssize_t rc; 437 438 rc = down_read_interruptible(&cxl_region_rwsem); 439 if (rc) 440 return rc; 441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); 442 up_read(&cxl_region_rwsem); 443 444 return rc; 445 } 446 447 static ssize_t interleave_granularity_store(struct device *dev, 448 struct device_attribute *attr, 449 const char *buf, size_t len) 450 { 451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 453 struct cxl_region *cxlr = to_cxl_region(dev); 454 struct cxl_region_params *p = &cxlr->params; 455 int rc, val; 456 u16 ig; 457 458 rc = kstrtoint(buf, 0, &val); 459 if (rc) 460 return rc; 461 462 rc = granularity_to_eig(val, &ig); 463 if (rc) 464 return rc; 465 466 /* 467 * When the host-bridge is interleaved, disallow region granularity != 468 * root granularity. Regions with a granularity less than the root 469 * interleave result in needing multiple endpoints to support a single 470 * slot in the interleave (possible to support in the future). Regions 471 * with a granularity greater than the root interleave result in invalid 472 * DPA translations (invalid to support). 473 */ 474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) 475 return -EINVAL; 476 477 rc = down_write_killable(&cxl_region_rwsem); 478 if (rc) 479 return rc; 480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 481 rc = -EBUSY; 482 goto out; 483 } 484 485 p->interleave_granularity = val; 486 out: 487 up_write(&cxl_region_rwsem); 488 if (rc) 489 return rc; 490 return len; 491 } 492 static DEVICE_ATTR_RW(interleave_granularity); 493 494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 495 char *buf) 496 { 497 struct cxl_region *cxlr = to_cxl_region(dev); 498 struct cxl_region_params *p = &cxlr->params; 499 u64 resource = -1ULL; 500 ssize_t rc; 501 502 rc = down_read_interruptible(&cxl_region_rwsem); 503 if (rc) 504 return rc; 505 if (p->res) 506 resource = p->res->start; 507 rc = sysfs_emit(buf, "%#llx\n", resource); 508 up_read(&cxl_region_rwsem); 509 510 return rc; 511 } 512 static DEVICE_ATTR_RO(resource); 513 514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 515 char *buf) 516 { 517 struct cxl_region *cxlr = to_cxl_region(dev); 518 519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); 520 } 521 static DEVICE_ATTR_RO(mode); 522 523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) 524 { 525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 526 struct cxl_region_params *p = &cxlr->params; 527 struct resource *res; 528 u32 remainder = 0; 529 530 lockdep_assert_held_write(&cxl_region_rwsem); 531 532 /* Nothing to do... */ 533 if (p->res && resource_size(p->res) == size) 534 return 0; 535 536 /* To change size the old size must be freed first */ 537 if (p->res) 538 return -EBUSY; 539 540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) 541 return -EBUSY; 542 543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */ 544 if (!p->interleave_ways || !p->interleave_granularity || 545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) 546 return -ENXIO; 547 548 div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder); 549 if (remainder) 550 return -EINVAL; 551 552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, 553 dev_name(&cxlr->dev)); 554 if (IS_ERR(res)) { 555 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n", 556 PTR_ERR(res)); 557 return PTR_ERR(res); 558 } 559 560 p->res = res; 561 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 562 563 return 0; 564 } 565 566 static void cxl_region_iomem_release(struct cxl_region *cxlr) 567 { 568 struct cxl_region_params *p = &cxlr->params; 569 570 if (device_is_registered(&cxlr->dev)) 571 lockdep_assert_held_write(&cxl_region_rwsem); 572 if (p->res) { 573 /* 574 * Autodiscovered regions may not have been able to insert their 575 * resource. 576 */ 577 if (p->res->parent) 578 remove_resource(p->res); 579 kfree(p->res); 580 p->res = NULL; 581 } 582 } 583 584 static int free_hpa(struct cxl_region *cxlr) 585 { 586 struct cxl_region_params *p = &cxlr->params; 587 588 lockdep_assert_held_write(&cxl_region_rwsem); 589 590 if (!p->res) 591 return 0; 592 593 if (p->state >= CXL_CONFIG_ACTIVE) 594 return -EBUSY; 595 596 cxl_region_iomem_release(cxlr); 597 p->state = CXL_CONFIG_IDLE; 598 return 0; 599 } 600 601 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 602 const char *buf, size_t len) 603 { 604 struct cxl_region *cxlr = to_cxl_region(dev); 605 u64 val; 606 int rc; 607 608 rc = kstrtou64(buf, 0, &val); 609 if (rc) 610 return rc; 611 612 rc = down_write_killable(&cxl_region_rwsem); 613 if (rc) 614 return rc; 615 616 if (val) 617 rc = alloc_hpa(cxlr, val); 618 else 619 rc = free_hpa(cxlr); 620 up_write(&cxl_region_rwsem); 621 622 if (rc) 623 return rc; 624 625 return len; 626 } 627 628 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 629 char *buf) 630 { 631 struct cxl_region *cxlr = to_cxl_region(dev); 632 struct cxl_region_params *p = &cxlr->params; 633 u64 size = 0; 634 ssize_t rc; 635 636 rc = down_read_interruptible(&cxl_region_rwsem); 637 if (rc) 638 return rc; 639 if (p->res) 640 size = resource_size(p->res); 641 rc = sysfs_emit(buf, "%#llx\n", size); 642 up_read(&cxl_region_rwsem); 643 644 return rc; 645 } 646 static DEVICE_ATTR_RW(size); 647 648 static struct attribute *cxl_region_attrs[] = { 649 &dev_attr_uuid.attr, 650 &dev_attr_commit.attr, 651 &dev_attr_interleave_ways.attr, 652 &dev_attr_interleave_granularity.attr, 653 &dev_attr_resource.attr, 654 &dev_attr_size.attr, 655 &dev_attr_mode.attr, 656 NULL, 657 }; 658 659 static const struct attribute_group cxl_region_group = { 660 .attrs = cxl_region_attrs, 661 .is_visible = cxl_region_visible, 662 }; 663 664 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) 665 { 666 struct cxl_region_params *p = &cxlr->params; 667 struct cxl_endpoint_decoder *cxled; 668 int rc; 669 670 rc = down_read_interruptible(&cxl_region_rwsem); 671 if (rc) 672 return rc; 673 674 if (pos >= p->interleave_ways) { 675 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 676 p->interleave_ways); 677 rc = -ENXIO; 678 goto out; 679 } 680 681 cxled = p->targets[pos]; 682 if (!cxled) 683 rc = sysfs_emit(buf, "\n"); 684 else 685 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); 686 out: 687 up_read(&cxl_region_rwsem); 688 689 return rc; 690 } 691 692 static int match_free_decoder(struct device *dev, void *data) 693 { 694 struct cxl_decoder *cxld; 695 int *id = data; 696 697 if (!is_switch_decoder(dev)) 698 return 0; 699 700 cxld = to_cxl_decoder(dev); 701 702 /* enforce ordered allocation */ 703 if (cxld->id != *id) 704 return 0; 705 706 if (!cxld->region) 707 return 1; 708 709 (*id)++; 710 711 return 0; 712 } 713 714 static int match_auto_decoder(struct device *dev, void *data) 715 { 716 struct cxl_region_params *p = data; 717 struct cxl_decoder *cxld; 718 struct range *r; 719 720 if (!is_switch_decoder(dev)) 721 return 0; 722 723 cxld = to_cxl_decoder(dev); 724 r = &cxld->hpa_range; 725 726 if (p->res && p->res->start == r->start && p->res->end == r->end) 727 return 1; 728 729 return 0; 730 } 731 732 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, 733 struct cxl_region *cxlr) 734 { 735 struct device *dev; 736 int id = 0; 737 738 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 739 dev = device_find_child(&port->dev, &cxlr->params, 740 match_auto_decoder); 741 else 742 dev = device_find_child(&port->dev, &id, match_free_decoder); 743 if (!dev) 744 return NULL; 745 /* 746 * This decoder is pinned registered as long as the endpoint decoder is 747 * registered, and endpoint decoder unregistration holds the 748 * cxl_region_rwsem over unregister events, so no need to hold on to 749 * this extra reference. 750 */ 751 put_device(dev); 752 return to_cxl_decoder(dev); 753 } 754 755 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, 756 struct cxl_region *cxlr) 757 { 758 struct cxl_region_params *p = &cxlr->params; 759 struct cxl_region_ref *cxl_rr, *iter; 760 unsigned long index; 761 int rc; 762 763 xa_for_each(&port->regions, index, iter) { 764 struct cxl_region_params *ip = &iter->region->params; 765 766 if (!ip->res) 767 continue; 768 769 if (ip->res->start > p->res->start) { 770 dev_dbg(&cxlr->dev, 771 "%s: HPA order violation %s:%pr vs %pr\n", 772 dev_name(&port->dev), 773 dev_name(&iter->region->dev), ip->res, p->res); 774 return ERR_PTR(-EBUSY); 775 } 776 } 777 778 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL); 779 if (!cxl_rr) 780 return ERR_PTR(-ENOMEM); 781 cxl_rr->port = port; 782 cxl_rr->region = cxlr; 783 cxl_rr->nr_targets = 1; 784 xa_init(&cxl_rr->endpoints); 785 786 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); 787 if (rc) { 788 dev_dbg(&cxlr->dev, 789 "%s: failed to track region reference: %d\n", 790 dev_name(&port->dev), rc); 791 kfree(cxl_rr); 792 return ERR_PTR(rc); 793 } 794 795 return cxl_rr; 796 } 797 798 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) 799 { 800 struct cxl_region *cxlr = cxl_rr->region; 801 struct cxl_decoder *cxld = cxl_rr->decoder; 802 803 if (!cxld) 804 return; 805 806 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); 807 if (cxld->region == cxlr) { 808 cxld->region = NULL; 809 put_device(&cxlr->dev); 810 } 811 } 812 813 static void free_region_ref(struct cxl_region_ref *cxl_rr) 814 { 815 struct cxl_port *port = cxl_rr->port; 816 struct cxl_region *cxlr = cxl_rr->region; 817 818 cxl_rr_free_decoder(cxl_rr); 819 xa_erase(&port->regions, (unsigned long)cxlr); 820 xa_destroy(&cxl_rr->endpoints); 821 kfree(cxl_rr); 822 } 823 824 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, 825 struct cxl_endpoint_decoder *cxled) 826 { 827 int rc; 828 struct cxl_port *port = cxl_rr->port; 829 struct cxl_region *cxlr = cxl_rr->region; 830 struct cxl_decoder *cxld = cxl_rr->decoder; 831 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled)); 832 833 if (ep) { 834 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, 835 GFP_KERNEL); 836 if (rc) 837 return rc; 838 } 839 cxl_rr->nr_eps++; 840 841 if (!cxld->region) { 842 cxld->region = cxlr; 843 get_device(&cxlr->dev); 844 } 845 846 return 0; 847 } 848 849 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, 850 struct cxl_endpoint_decoder *cxled, 851 struct cxl_region_ref *cxl_rr) 852 { 853 struct cxl_decoder *cxld; 854 855 if (port == cxled_to_port(cxled)) 856 cxld = &cxled->cxld; 857 else 858 cxld = cxl_region_find_decoder(port, cxlr); 859 if (!cxld) { 860 dev_dbg(&cxlr->dev, "%s: no decoder available\n", 861 dev_name(&port->dev)); 862 return -EBUSY; 863 } 864 865 if (cxld->region) { 866 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 867 dev_name(&port->dev), dev_name(&cxld->dev), 868 dev_name(&cxld->region->dev)); 869 return -EBUSY; 870 } 871 872 /* 873 * Endpoints should already match the region type, but backstop that 874 * assumption with an assertion. Switch-decoders change mapping-type 875 * based on what is mapped when they are assigned to a region. 876 */ 877 dev_WARN_ONCE(&cxlr->dev, 878 port == cxled_to_port(cxled) && 879 cxld->target_type != cxlr->type, 880 "%s:%s mismatch decoder type %d -> %d\n", 881 dev_name(&cxled_to_memdev(cxled)->dev), 882 dev_name(&cxld->dev), cxld->target_type, cxlr->type); 883 cxld->target_type = cxlr->type; 884 cxl_rr->decoder = cxld; 885 return 0; 886 } 887 888 /** 889 * cxl_port_attach_region() - track a region's interest in a port by endpoint 890 * @port: port to add a new region reference 'struct cxl_region_ref' 891 * @cxlr: region to attach to @port 892 * @cxled: endpoint decoder used to create or further pin a region reference 893 * @pos: interleave position of @cxled in @cxlr 894 * 895 * The attach event is an opportunity to validate CXL decode setup 896 * constraints and record metadata needed for programming HDM decoders, 897 * in particular decoder target lists. 898 * 899 * The steps are: 900 * 901 * - validate that there are no other regions with a higher HPA already 902 * associated with @port 903 * - establish a region reference if one is not already present 904 * 905 * - additionally allocate a decoder instance that will host @cxlr on 906 * @port 907 * 908 * - pin the region reference by the endpoint 909 * - account for how many entries in @port's target list are needed to 910 * cover all of the added endpoints. 911 */ 912 static int cxl_port_attach_region(struct cxl_port *port, 913 struct cxl_region *cxlr, 914 struct cxl_endpoint_decoder *cxled, int pos) 915 { 916 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 917 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 918 struct cxl_region_ref *cxl_rr; 919 bool nr_targets_inc = false; 920 struct cxl_decoder *cxld; 921 unsigned long index; 922 int rc = -EBUSY; 923 924 lockdep_assert_held_write(&cxl_region_rwsem); 925 926 cxl_rr = cxl_rr_load(port, cxlr); 927 if (cxl_rr) { 928 struct cxl_ep *ep_iter; 929 int found = 0; 930 931 /* 932 * Walk the existing endpoints that have been attached to 933 * @cxlr at @port and see if they share the same 'next' port 934 * in the downstream direction. I.e. endpoints that share common 935 * upstream switch. 936 */ 937 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 938 if (ep_iter == ep) 939 continue; 940 if (ep_iter->next == ep->next) { 941 found++; 942 break; 943 } 944 } 945 946 /* 947 * New target port, or @port is an endpoint port that always 948 * accounts its own local decode as a target. 949 */ 950 if (!found || !ep->next) { 951 cxl_rr->nr_targets++; 952 nr_targets_inc = true; 953 } 954 } else { 955 cxl_rr = alloc_region_ref(port, cxlr); 956 if (IS_ERR(cxl_rr)) { 957 dev_dbg(&cxlr->dev, 958 "%s: failed to allocate region reference\n", 959 dev_name(&port->dev)); 960 return PTR_ERR(cxl_rr); 961 } 962 nr_targets_inc = true; 963 964 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 965 if (rc) 966 goto out_erase; 967 } 968 cxld = cxl_rr->decoder; 969 970 rc = cxl_rr_ep_add(cxl_rr, cxled); 971 if (rc) { 972 dev_dbg(&cxlr->dev, 973 "%s: failed to track endpoint %s:%s reference\n", 974 dev_name(&port->dev), dev_name(&cxlmd->dev), 975 dev_name(&cxld->dev)); 976 goto out_erase; 977 } 978 979 dev_dbg(&cxlr->dev, 980 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n", 981 dev_name(port->uport_dev), dev_name(&port->dev), 982 dev_name(&cxld->dev), dev_name(&cxlmd->dev), 983 dev_name(&cxled->cxld.dev), pos, 984 ep ? ep->next ? dev_name(ep->next->uport_dev) : 985 dev_name(&cxlmd->dev) : 986 "none", 987 cxl_rr->nr_eps, cxl_rr->nr_targets); 988 989 return 0; 990 out_erase: 991 if (nr_targets_inc) 992 cxl_rr->nr_targets--; 993 if (cxl_rr->nr_eps == 0) 994 free_region_ref(cxl_rr); 995 return rc; 996 } 997 998 static void cxl_port_detach_region(struct cxl_port *port, 999 struct cxl_region *cxlr, 1000 struct cxl_endpoint_decoder *cxled) 1001 { 1002 struct cxl_region_ref *cxl_rr; 1003 struct cxl_ep *ep = NULL; 1004 1005 lockdep_assert_held_write(&cxl_region_rwsem); 1006 1007 cxl_rr = cxl_rr_load(port, cxlr); 1008 if (!cxl_rr) 1009 return; 1010 1011 /* 1012 * Endpoint ports do not carry cxl_ep references, and they 1013 * never target more than one endpoint by definition 1014 */ 1015 if (cxl_rr->decoder == &cxled->cxld) 1016 cxl_rr->nr_eps--; 1017 else 1018 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); 1019 if (ep) { 1020 struct cxl_ep *ep_iter; 1021 unsigned long index; 1022 int found = 0; 1023 1024 cxl_rr->nr_eps--; 1025 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 1026 if (ep_iter->next == ep->next) { 1027 found++; 1028 break; 1029 } 1030 } 1031 if (!found) 1032 cxl_rr->nr_targets--; 1033 } 1034 1035 if (cxl_rr->nr_eps == 0) 1036 free_region_ref(cxl_rr); 1037 } 1038 1039 static int check_last_peer(struct cxl_endpoint_decoder *cxled, 1040 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr, 1041 int distance) 1042 { 1043 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1044 struct cxl_region *cxlr = cxl_rr->region; 1045 struct cxl_region_params *p = &cxlr->params; 1046 struct cxl_endpoint_decoder *cxled_peer; 1047 struct cxl_port *port = cxl_rr->port; 1048 struct cxl_memdev *cxlmd_peer; 1049 struct cxl_ep *ep_peer; 1050 int pos = cxled->pos; 1051 1052 /* 1053 * If this position wants to share a dport with the last endpoint mapped 1054 * then that endpoint, at index 'position - distance', must also be 1055 * mapped by this dport. 1056 */ 1057 if (pos < distance) { 1058 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", 1059 dev_name(port->uport_dev), dev_name(&port->dev), 1060 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1061 return -ENXIO; 1062 } 1063 cxled_peer = p->targets[pos - distance]; 1064 cxlmd_peer = cxled_to_memdev(cxled_peer); 1065 ep_peer = cxl_ep_load(port, cxlmd_peer); 1066 if (ep->dport != ep_peer->dport) { 1067 dev_dbg(&cxlr->dev, 1068 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n", 1069 dev_name(port->uport_dev), dev_name(&port->dev), 1070 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, 1071 dev_name(&cxlmd_peer->dev), 1072 dev_name(&cxled_peer->cxld.dev)); 1073 return -ENXIO; 1074 } 1075 1076 return 0; 1077 } 1078 1079 static int cxl_port_setup_targets(struct cxl_port *port, 1080 struct cxl_region *cxlr, 1081 struct cxl_endpoint_decoder *cxled) 1082 { 1083 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1084 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; 1085 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 1086 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1087 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1088 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 1089 struct cxl_region_params *p = &cxlr->params; 1090 struct cxl_decoder *cxld = cxl_rr->decoder; 1091 struct cxl_switch_decoder *cxlsd; 1092 u16 eig, peig; 1093 u8 eiw, peiw; 1094 1095 /* 1096 * While root level decoders support x3, x6, x12, switch level 1097 * decoders only support powers of 2 up to x16. 1098 */ 1099 if (!is_power_of_2(cxl_rr->nr_targets)) { 1100 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", 1101 dev_name(port->uport_dev), dev_name(&port->dev), 1102 cxl_rr->nr_targets); 1103 return -EINVAL; 1104 } 1105 1106 cxlsd = to_cxl_switch_decoder(&cxld->dev); 1107 if (cxl_rr->nr_targets_set) { 1108 int i, distance; 1109 1110 /* 1111 * Passthrough decoders impose no distance requirements between 1112 * peers 1113 */ 1114 if (cxl_rr->nr_targets == 1) 1115 distance = 0; 1116 else 1117 distance = p->nr_targets / cxl_rr->nr_targets; 1118 for (i = 0; i < cxl_rr->nr_targets_set; i++) 1119 if (ep->dport == cxlsd->target[i]) { 1120 rc = check_last_peer(cxled, ep, cxl_rr, 1121 distance); 1122 if (rc) 1123 return rc; 1124 goto out_target_set; 1125 } 1126 goto add_target; 1127 } 1128 1129 if (is_cxl_root(parent_port)) { 1130 parent_ig = cxlrd->cxlsd.cxld.interleave_granularity; 1131 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; 1132 /* 1133 * For purposes of address bit routing, use power-of-2 math for 1134 * switch ports. 1135 */ 1136 if (!is_power_of_2(parent_iw)) 1137 parent_iw /= 3; 1138 } else { 1139 struct cxl_region_ref *parent_rr; 1140 struct cxl_decoder *parent_cxld; 1141 1142 parent_rr = cxl_rr_load(parent_port, cxlr); 1143 parent_cxld = parent_rr->decoder; 1144 parent_ig = parent_cxld->interleave_granularity; 1145 parent_iw = parent_cxld->interleave_ways; 1146 } 1147 1148 rc = granularity_to_eig(parent_ig, &peig); 1149 if (rc) { 1150 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", 1151 dev_name(parent_port->uport_dev), 1152 dev_name(&parent_port->dev), parent_ig); 1153 return rc; 1154 } 1155 1156 rc = ways_to_eiw(parent_iw, &peiw); 1157 if (rc) { 1158 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", 1159 dev_name(parent_port->uport_dev), 1160 dev_name(&parent_port->dev), parent_iw); 1161 return rc; 1162 } 1163 1164 iw = cxl_rr->nr_targets; 1165 rc = ways_to_eiw(iw, &eiw); 1166 if (rc) { 1167 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", 1168 dev_name(port->uport_dev), dev_name(&port->dev), iw); 1169 return rc; 1170 } 1171 1172 /* 1173 * Interleave granularity is a multiple of @parent_port granularity. 1174 * Multiplier is the parent port interleave ways. 1175 */ 1176 rc = granularity_to_eig(parent_ig * parent_iw, &eig); 1177 if (rc) { 1178 dev_dbg(&cxlr->dev, 1179 "%s: invalid granularity calculation (%d * %d)\n", 1180 dev_name(&parent_port->dev), parent_ig, parent_iw); 1181 return rc; 1182 } 1183 1184 rc = eig_to_granularity(eig, &ig); 1185 if (rc) { 1186 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", 1187 dev_name(port->uport_dev), dev_name(&port->dev), 1188 256 << eig); 1189 return rc; 1190 } 1191 1192 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1193 if (cxld->interleave_ways != iw || 1194 cxld->interleave_granularity != ig || 1195 cxld->hpa_range.start != p->res->start || 1196 cxld->hpa_range.end != p->res->end || 1197 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { 1198 dev_err(&cxlr->dev, 1199 "%s:%s %s expected iw: %d ig: %d %pr\n", 1200 dev_name(port->uport_dev), dev_name(&port->dev), 1201 __func__, iw, ig, p->res); 1202 dev_err(&cxlr->dev, 1203 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n", 1204 dev_name(port->uport_dev), dev_name(&port->dev), 1205 __func__, cxld->interleave_ways, 1206 cxld->interleave_granularity, 1207 (cxld->flags & CXL_DECODER_F_ENABLE) ? 1208 "enabled" : 1209 "disabled", 1210 cxld->hpa_range.start, cxld->hpa_range.end); 1211 return -ENXIO; 1212 } 1213 } else { 1214 cxld->interleave_ways = iw; 1215 cxld->interleave_granularity = ig; 1216 cxld->hpa_range = (struct range) { 1217 .start = p->res->start, 1218 .end = p->res->end, 1219 }; 1220 } 1221 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), 1222 dev_name(&port->dev), iw, ig); 1223 add_target: 1224 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { 1225 dev_dbg(&cxlr->dev, 1226 "%s:%s: targets full trying to add %s:%s at %d\n", 1227 dev_name(port->uport_dev), dev_name(&port->dev), 1228 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1229 return -ENXIO; 1230 } 1231 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1232 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { 1233 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", 1234 dev_name(port->uport_dev), dev_name(&port->dev), 1235 dev_name(&cxlsd->cxld.dev), 1236 dev_name(ep->dport->dport_dev), 1237 cxl_rr->nr_targets_set); 1238 return -ENXIO; 1239 } 1240 } else 1241 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; 1242 inc = 1; 1243 out_target_set: 1244 cxl_rr->nr_targets_set += inc; 1245 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", 1246 dev_name(port->uport_dev), dev_name(&port->dev), 1247 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), 1248 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1249 1250 return 0; 1251 } 1252 1253 static void cxl_port_reset_targets(struct cxl_port *port, 1254 struct cxl_region *cxlr) 1255 { 1256 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1257 struct cxl_decoder *cxld; 1258 1259 /* 1260 * After the last endpoint has been detached the entire cxl_rr may now 1261 * be gone. 1262 */ 1263 if (!cxl_rr) 1264 return; 1265 cxl_rr->nr_targets_set = 0; 1266 1267 cxld = cxl_rr->decoder; 1268 cxld->hpa_range = (struct range) { 1269 .start = 0, 1270 .end = -1, 1271 }; 1272 } 1273 1274 static void cxl_region_teardown_targets(struct cxl_region *cxlr) 1275 { 1276 struct cxl_region_params *p = &cxlr->params; 1277 struct cxl_endpoint_decoder *cxled; 1278 struct cxl_dev_state *cxlds; 1279 struct cxl_memdev *cxlmd; 1280 struct cxl_port *iter; 1281 struct cxl_ep *ep; 1282 int i; 1283 1284 /* 1285 * In the auto-discovery case skip automatic teardown since the 1286 * address space is already active 1287 */ 1288 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 1289 return; 1290 1291 for (i = 0; i < p->nr_targets; i++) { 1292 cxled = p->targets[i]; 1293 cxlmd = cxled_to_memdev(cxled); 1294 cxlds = cxlmd->cxlds; 1295 1296 if (cxlds->rcd) 1297 continue; 1298 1299 iter = cxled_to_port(cxled); 1300 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1301 iter = to_cxl_port(iter->dev.parent); 1302 1303 for (ep = cxl_ep_load(iter, cxlmd); iter; 1304 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) 1305 cxl_port_reset_targets(iter, cxlr); 1306 } 1307 } 1308 1309 static int cxl_region_setup_targets(struct cxl_region *cxlr) 1310 { 1311 struct cxl_region_params *p = &cxlr->params; 1312 struct cxl_endpoint_decoder *cxled; 1313 struct cxl_dev_state *cxlds; 1314 int i, rc, rch = 0, vh = 0; 1315 struct cxl_memdev *cxlmd; 1316 struct cxl_port *iter; 1317 struct cxl_ep *ep; 1318 1319 for (i = 0; i < p->nr_targets; i++) { 1320 cxled = p->targets[i]; 1321 cxlmd = cxled_to_memdev(cxled); 1322 cxlds = cxlmd->cxlds; 1323 1324 /* validate that all targets agree on topology */ 1325 if (!cxlds->rcd) { 1326 vh++; 1327 } else { 1328 rch++; 1329 continue; 1330 } 1331 1332 iter = cxled_to_port(cxled); 1333 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1334 iter = to_cxl_port(iter->dev.parent); 1335 1336 /* 1337 * Descend the topology tree programming / validating 1338 * targets while looking for conflicts. 1339 */ 1340 for (ep = cxl_ep_load(iter, cxlmd); iter; 1341 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 1342 rc = cxl_port_setup_targets(iter, cxlr, cxled); 1343 if (rc) { 1344 cxl_region_teardown_targets(cxlr); 1345 return rc; 1346 } 1347 } 1348 } 1349 1350 if (rch && vh) { 1351 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); 1352 cxl_region_teardown_targets(cxlr); 1353 return -ENXIO; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int cxl_region_validate_position(struct cxl_region *cxlr, 1360 struct cxl_endpoint_decoder *cxled, 1361 int pos) 1362 { 1363 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1364 struct cxl_region_params *p = &cxlr->params; 1365 int i; 1366 1367 if (pos < 0 || pos >= p->interleave_ways) { 1368 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1369 p->interleave_ways); 1370 return -ENXIO; 1371 } 1372 1373 if (p->targets[pos] == cxled) 1374 return 0; 1375 1376 if (p->targets[pos]) { 1377 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; 1378 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target); 1379 1380 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", 1381 pos, dev_name(&cxlmd_target->dev), 1382 dev_name(&cxled_target->cxld.dev)); 1383 return -EBUSY; 1384 } 1385 1386 for (i = 0; i < p->interleave_ways; i++) { 1387 struct cxl_endpoint_decoder *cxled_target; 1388 struct cxl_memdev *cxlmd_target; 1389 1390 cxled_target = p->targets[i]; 1391 if (!cxled_target) 1392 continue; 1393 1394 cxlmd_target = cxled_to_memdev(cxled_target); 1395 if (cxlmd_target == cxlmd) { 1396 dev_dbg(&cxlr->dev, 1397 "%s already specified at position %d via: %s\n", 1398 dev_name(&cxlmd->dev), pos, 1399 dev_name(&cxled_target->cxld.dev)); 1400 return -EBUSY; 1401 } 1402 } 1403 1404 return 0; 1405 } 1406 1407 static int cxl_region_attach_position(struct cxl_region *cxlr, 1408 struct cxl_root_decoder *cxlrd, 1409 struct cxl_endpoint_decoder *cxled, 1410 const struct cxl_dport *dport, int pos) 1411 { 1412 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1413 struct cxl_port *iter; 1414 int rc; 1415 1416 if (cxlrd->calc_hb(cxlrd, pos) != dport) { 1417 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", 1418 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1419 dev_name(&cxlrd->cxlsd.cxld.dev)); 1420 return -ENXIO; 1421 } 1422 1423 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1424 iter = to_cxl_port(iter->dev.parent)) { 1425 rc = cxl_port_attach_region(iter, cxlr, cxled, pos); 1426 if (rc) 1427 goto err; 1428 } 1429 1430 return 0; 1431 1432 err: 1433 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1434 iter = to_cxl_port(iter->dev.parent)) 1435 cxl_port_detach_region(iter, cxlr, cxled); 1436 return rc; 1437 } 1438 1439 static int cxl_region_attach_auto(struct cxl_region *cxlr, 1440 struct cxl_endpoint_decoder *cxled, int pos) 1441 { 1442 struct cxl_region_params *p = &cxlr->params; 1443 1444 if (cxled->state != CXL_DECODER_STATE_AUTO) { 1445 dev_err(&cxlr->dev, 1446 "%s: unable to add decoder to autodetected region\n", 1447 dev_name(&cxled->cxld.dev)); 1448 return -EINVAL; 1449 } 1450 1451 if (pos >= 0) { 1452 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", 1453 dev_name(&cxled->cxld.dev), pos); 1454 return -EINVAL; 1455 } 1456 1457 if (p->nr_targets >= p->interleave_ways) { 1458 dev_err(&cxlr->dev, "%s: no more target slots available\n", 1459 dev_name(&cxled->cxld.dev)); 1460 return -ENXIO; 1461 } 1462 1463 /* 1464 * Temporarily record the endpoint decoder into the target array. Yes, 1465 * this means that userspace can view devices in the wrong position 1466 * before the region activates, and must be careful to understand when 1467 * it might be racing region autodiscovery. 1468 */ 1469 pos = p->nr_targets; 1470 p->targets[pos] = cxled; 1471 cxled->pos = pos; 1472 p->nr_targets++; 1473 1474 return 0; 1475 } 1476 1477 static struct cxl_port *next_port(struct cxl_port *port) 1478 { 1479 if (!port->parent_dport) 1480 return NULL; 1481 return port->parent_dport->port; 1482 } 1483 1484 static int match_switch_decoder_by_range(struct device *dev, void *data) 1485 { 1486 struct cxl_switch_decoder *cxlsd; 1487 struct range *r1, *r2 = data; 1488 1489 if (!is_switch_decoder(dev)) 1490 return 0; 1491 1492 cxlsd = to_cxl_switch_decoder(dev); 1493 r1 = &cxlsd->cxld.hpa_range; 1494 1495 if (is_root_decoder(dev)) 1496 return range_contains(r1, r2); 1497 return (r1->start == r2->start && r1->end == r2->end); 1498 } 1499 1500 static void find_positions(const struct cxl_switch_decoder *cxlsd, 1501 const struct cxl_port *iter_a, 1502 const struct cxl_port *iter_b, int *a_pos, 1503 int *b_pos) 1504 { 1505 int i; 1506 1507 for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) { 1508 if (cxlsd->target[i] == iter_a->parent_dport) 1509 *a_pos = i; 1510 else if (cxlsd->target[i] == iter_b->parent_dport) 1511 *b_pos = i; 1512 if (*a_pos >= 0 && *b_pos >= 0) 1513 break; 1514 } 1515 } 1516 1517 static int cmp_decode_pos(const void *a, const void *b) 1518 { 1519 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a; 1520 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b; 1521 struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a); 1522 struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b); 1523 struct cxl_port *port_a = cxled_to_port(cxled_a); 1524 struct cxl_port *port_b = cxled_to_port(cxled_b); 1525 struct cxl_port *iter_a, *iter_b, *port = NULL; 1526 struct cxl_switch_decoder *cxlsd; 1527 struct device *dev; 1528 int a_pos, b_pos; 1529 unsigned int seq; 1530 1531 /* Exit early if any prior sorting failed */ 1532 if (cxled_a->pos < 0 || cxled_b->pos < 0) 1533 return 0; 1534 1535 /* 1536 * Walk up the hierarchy to find a shared port, find the decoder that 1537 * maps the range, compare the relative position of those dport 1538 * mappings. 1539 */ 1540 for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) { 1541 struct cxl_port *next_a, *next_b; 1542 1543 next_a = next_port(iter_a); 1544 if (!next_a) 1545 break; 1546 1547 for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) { 1548 next_b = next_port(iter_b); 1549 if (next_a != next_b) 1550 continue; 1551 port = next_a; 1552 break; 1553 } 1554 1555 if (port) 1556 break; 1557 } 1558 1559 if (!port) { 1560 dev_err(cxlmd_a->dev.parent, 1561 "failed to find shared port with %s\n", 1562 dev_name(cxlmd_b->dev.parent)); 1563 goto err; 1564 } 1565 1566 dev = device_find_child(&port->dev, &cxled_a->cxld.hpa_range, 1567 match_switch_decoder_by_range); 1568 if (!dev) { 1569 struct range *range = &cxled_a->cxld.hpa_range; 1570 1571 dev_err(port->uport_dev, 1572 "failed to find decoder that maps %#llx-%#llx\n", 1573 range->start, range->end); 1574 goto err; 1575 } 1576 1577 cxlsd = to_cxl_switch_decoder(dev); 1578 do { 1579 seq = read_seqbegin(&cxlsd->target_lock); 1580 find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos); 1581 } while (read_seqretry(&cxlsd->target_lock, seq)); 1582 1583 put_device(dev); 1584 1585 if (a_pos < 0 || b_pos < 0) { 1586 dev_err(port->uport_dev, 1587 "failed to find shared decoder for %s and %s\n", 1588 dev_name(cxlmd_a->dev.parent), 1589 dev_name(cxlmd_b->dev.parent)); 1590 goto err; 1591 } 1592 1593 dev_dbg(port->uport_dev, "%s comes %s %s\n", 1594 dev_name(cxlmd_a->dev.parent), 1595 a_pos - b_pos < 0 ? "before" : "after", 1596 dev_name(cxlmd_b->dev.parent)); 1597 1598 return a_pos - b_pos; 1599 err: 1600 cxled_a->pos = -1; 1601 return 0; 1602 } 1603 1604 static int cxl_region_sort_targets(struct cxl_region *cxlr) 1605 { 1606 struct cxl_region_params *p = &cxlr->params; 1607 int i, rc = 0; 1608 1609 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos, 1610 NULL); 1611 1612 for (i = 0; i < p->nr_targets; i++) { 1613 struct cxl_endpoint_decoder *cxled = p->targets[i]; 1614 1615 /* 1616 * Record that sorting failed, but still continue to restore 1617 * cxled->pos with its ->targets[] position so that follow-on 1618 * code paths can reliably do p->targets[cxled->pos] to 1619 * self-reference their entry. 1620 */ 1621 if (cxled->pos < 0) 1622 rc = -ENXIO; 1623 cxled->pos = i; 1624 } 1625 1626 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); 1627 return rc; 1628 } 1629 1630 static int cxl_region_attach(struct cxl_region *cxlr, 1631 struct cxl_endpoint_decoder *cxled, int pos) 1632 { 1633 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1634 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1635 struct cxl_region_params *p = &cxlr->params; 1636 struct cxl_port *ep_port, *root_port; 1637 struct cxl_dport *dport; 1638 int rc = -ENXIO; 1639 1640 if (cxled->mode != cxlr->mode) { 1641 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", 1642 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); 1643 return -EINVAL; 1644 } 1645 1646 if (cxled->mode == CXL_DECODER_DEAD) { 1647 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); 1648 return -ENODEV; 1649 } 1650 1651 /* all full of members, or interleave config not established? */ 1652 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { 1653 dev_dbg(&cxlr->dev, "region already active\n"); 1654 return -EBUSY; 1655 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { 1656 dev_dbg(&cxlr->dev, "interleave config missing\n"); 1657 return -ENXIO; 1658 } 1659 1660 ep_port = cxled_to_port(cxled); 1661 root_port = cxlrd_to_port(cxlrd); 1662 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); 1663 if (!dport) { 1664 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", 1665 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1666 dev_name(cxlr->dev.parent)); 1667 return -ENXIO; 1668 } 1669 1670 if (cxled->cxld.target_type != cxlr->type) { 1671 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", 1672 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1673 cxled->cxld.target_type, cxlr->type); 1674 return -ENXIO; 1675 } 1676 1677 if (!cxled->dpa_res) { 1678 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", 1679 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); 1680 return -ENXIO; 1681 } 1682 1683 if (resource_size(cxled->dpa_res) * p->interleave_ways != 1684 resource_size(p->res)) { 1685 dev_dbg(&cxlr->dev, 1686 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", 1687 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1688 (u64)resource_size(cxled->dpa_res), p->interleave_ways, 1689 (u64)resource_size(p->res)); 1690 return -EINVAL; 1691 } 1692 1693 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1694 int i; 1695 1696 rc = cxl_region_attach_auto(cxlr, cxled, pos); 1697 if (rc) 1698 return rc; 1699 1700 /* await more targets to arrive... */ 1701 if (p->nr_targets < p->interleave_ways) 1702 return 0; 1703 1704 /* 1705 * All targets are here, which implies all PCI enumeration that 1706 * affects this region has been completed. Walk the topology to 1707 * sort the devices into their relative region decode position. 1708 */ 1709 rc = cxl_region_sort_targets(cxlr); 1710 if (rc) 1711 return rc; 1712 1713 for (i = 0; i < p->nr_targets; i++) { 1714 cxled = p->targets[i]; 1715 ep_port = cxled_to_port(cxled); 1716 dport = cxl_find_dport_by_dev(root_port, 1717 ep_port->host_bridge); 1718 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, 1719 dport, i); 1720 if (rc) 1721 return rc; 1722 } 1723 1724 rc = cxl_region_setup_targets(cxlr); 1725 if (rc) 1726 return rc; 1727 1728 /* 1729 * If target setup succeeds in the autodiscovery case 1730 * then the region is already committed. 1731 */ 1732 p->state = CXL_CONFIG_COMMIT; 1733 1734 return 0; 1735 } 1736 1737 rc = cxl_region_validate_position(cxlr, cxled, pos); 1738 if (rc) 1739 return rc; 1740 1741 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos); 1742 if (rc) 1743 return rc; 1744 1745 p->targets[pos] = cxled; 1746 cxled->pos = pos; 1747 p->nr_targets++; 1748 1749 if (p->nr_targets == p->interleave_ways) { 1750 rc = cxl_region_setup_targets(cxlr); 1751 if (rc) 1752 goto err_decrement; 1753 p->state = CXL_CONFIG_ACTIVE; 1754 } 1755 1756 cxled->cxld.interleave_ways = p->interleave_ways; 1757 cxled->cxld.interleave_granularity = p->interleave_granularity; 1758 cxled->cxld.hpa_range = (struct range) { 1759 .start = p->res->start, 1760 .end = p->res->end, 1761 }; 1762 1763 return 0; 1764 1765 err_decrement: 1766 p->nr_targets--; 1767 cxled->pos = -1; 1768 p->targets[pos] = NULL; 1769 return rc; 1770 } 1771 1772 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) 1773 { 1774 struct cxl_port *iter, *ep_port = cxled_to_port(cxled); 1775 struct cxl_region *cxlr = cxled->cxld.region; 1776 struct cxl_region_params *p; 1777 int rc = 0; 1778 1779 lockdep_assert_held_write(&cxl_region_rwsem); 1780 1781 if (!cxlr) 1782 return 0; 1783 1784 p = &cxlr->params; 1785 get_device(&cxlr->dev); 1786 1787 if (p->state > CXL_CONFIG_ACTIVE) { 1788 /* 1789 * TODO: tear down all impacted regions if a device is 1790 * removed out of order 1791 */ 1792 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 1793 if (rc) 1794 goto out; 1795 p->state = CXL_CONFIG_ACTIVE; 1796 } 1797 1798 for (iter = ep_port; !is_cxl_root(iter); 1799 iter = to_cxl_port(iter->dev.parent)) 1800 cxl_port_detach_region(iter, cxlr, cxled); 1801 1802 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || 1803 p->targets[cxled->pos] != cxled) { 1804 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1805 1806 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", 1807 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1808 cxled->pos); 1809 goto out; 1810 } 1811 1812 if (p->state == CXL_CONFIG_ACTIVE) { 1813 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 1814 cxl_region_teardown_targets(cxlr); 1815 } 1816 p->targets[cxled->pos] = NULL; 1817 p->nr_targets--; 1818 cxled->cxld.hpa_range = (struct range) { 1819 .start = 0, 1820 .end = -1, 1821 }; 1822 1823 /* notify the region driver that one of its targets has departed */ 1824 up_write(&cxl_region_rwsem); 1825 device_release_driver(&cxlr->dev); 1826 down_write(&cxl_region_rwsem); 1827 out: 1828 put_device(&cxlr->dev); 1829 return rc; 1830 } 1831 1832 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) 1833 { 1834 down_write(&cxl_region_rwsem); 1835 cxled->mode = CXL_DECODER_DEAD; 1836 cxl_region_detach(cxled); 1837 up_write(&cxl_region_rwsem); 1838 } 1839 1840 static int attach_target(struct cxl_region *cxlr, 1841 struct cxl_endpoint_decoder *cxled, int pos, 1842 unsigned int state) 1843 { 1844 int rc = 0; 1845 1846 if (state == TASK_INTERRUPTIBLE) 1847 rc = down_write_killable(&cxl_region_rwsem); 1848 else 1849 down_write(&cxl_region_rwsem); 1850 if (rc) 1851 return rc; 1852 1853 down_read(&cxl_dpa_rwsem); 1854 rc = cxl_region_attach(cxlr, cxled, pos); 1855 up_read(&cxl_dpa_rwsem); 1856 up_write(&cxl_region_rwsem); 1857 return rc; 1858 } 1859 1860 static int detach_target(struct cxl_region *cxlr, int pos) 1861 { 1862 struct cxl_region_params *p = &cxlr->params; 1863 int rc; 1864 1865 rc = down_write_killable(&cxl_region_rwsem); 1866 if (rc) 1867 return rc; 1868 1869 if (pos >= p->interleave_ways) { 1870 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1871 p->interleave_ways); 1872 rc = -ENXIO; 1873 goto out; 1874 } 1875 1876 if (!p->targets[pos]) { 1877 rc = 0; 1878 goto out; 1879 } 1880 1881 rc = cxl_region_detach(p->targets[pos]); 1882 out: 1883 up_write(&cxl_region_rwsem); 1884 return rc; 1885 } 1886 1887 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos, 1888 size_t len) 1889 { 1890 int rc; 1891 1892 if (sysfs_streq(buf, "\n")) 1893 rc = detach_target(cxlr, pos); 1894 else { 1895 struct device *dev; 1896 1897 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf); 1898 if (!dev) 1899 return -ENODEV; 1900 1901 if (!is_endpoint_decoder(dev)) { 1902 rc = -EINVAL; 1903 goto out; 1904 } 1905 1906 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos, 1907 TASK_INTERRUPTIBLE); 1908 out: 1909 put_device(dev); 1910 } 1911 1912 if (rc < 0) 1913 return rc; 1914 return len; 1915 } 1916 1917 #define TARGET_ATTR_RW(n) \ 1918 static ssize_t target##n##_show( \ 1919 struct device *dev, struct device_attribute *attr, char *buf) \ 1920 { \ 1921 return show_targetN(to_cxl_region(dev), buf, (n)); \ 1922 } \ 1923 static ssize_t target##n##_store(struct device *dev, \ 1924 struct device_attribute *attr, \ 1925 const char *buf, size_t len) \ 1926 { \ 1927 return store_targetN(to_cxl_region(dev), buf, (n), len); \ 1928 } \ 1929 static DEVICE_ATTR_RW(target##n) 1930 1931 TARGET_ATTR_RW(0); 1932 TARGET_ATTR_RW(1); 1933 TARGET_ATTR_RW(2); 1934 TARGET_ATTR_RW(3); 1935 TARGET_ATTR_RW(4); 1936 TARGET_ATTR_RW(5); 1937 TARGET_ATTR_RW(6); 1938 TARGET_ATTR_RW(7); 1939 TARGET_ATTR_RW(8); 1940 TARGET_ATTR_RW(9); 1941 TARGET_ATTR_RW(10); 1942 TARGET_ATTR_RW(11); 1943 TARGET_ATTR_RW(12); 1944 TARGET_ATTR_RW(13); 1945 TARGET_ATTR_RW(14); 1946 TARGET_ATTR_RW(15); 1947 1948 static struct attribute *target_attrs[] = { 1949 &dev_attr_target0.attr, 1950 &dev_attr_target1.attr, 1951 &dev_attr_target2.attr, 1952 &dev_attr_target3.attr, 1953 &dev_attr_target4.attr, 1954 &dev_attr_target5.attr, 1955 &dev_attr_target6.attr, 1956 &dev_attr_target7.attr, 1957 &dev_attr_target8.attr, 1958 &dev_attr_target9.attr, 1959 &dev_attr_target10.attr, 1960 &dev_attr_target11.attr, 1961 &dev_attr_target12.attr, 1962 &dev_attr_target13.attr, 1963 &dev_attr_target14.attr, 1964 &dev_attr_target15.attr, 1965 NULL, 1966 }; 1967 1968 static umode_t cxl_region_target_visible(struct kobject *kobj, 1969 struct attribute *a, int n) 1970 { 1971 struct device *dev = kobj_to_dev(kobj); 1972 struct cxl_region *cxlr = to_cxl_region(dev); 1973 struct cxl_region_params *p = &cxlr->params; 1974 1975 if (n < p->interleave_ways) 1976 return a->mode; 1977 return 0; 1978 } 1979 1980 static const struct attribute_group cxl_region_target_group = { 1981 .attrs = target_attrs, 1982 .is_visible = cxl_region_target_visible, 1983 }; 1984 1985 static const struct attribute_group *get_cxl_region_target_group(void) 1986 { 1987 return &cxl_region_target_group; 1988 } 1989 1990 static const struct attribute_group *region_groups[] = { 1991 &cxl_base_attribute_group, 1992 &cxl_region_group, 1993 &cxl_region_target_group, 1994 NULL, 1995 }; 1996 1997 static void cxl_region_release(struct device *dev) 1998 { 1999 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 2000 struct cxl_region *cxlr = to_cxl_region(dev); 2001 int id = atomic_read(&cxlrd->region_id); 2002 2003 /* 2004 * Try to reuse the recently idled id rather than the cached 2005 * next id to prevent the region id space from increasing 2006 * unnecessarily. 2007 */ 2008 if (cxlr->id < id) 2009 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { 2010 memregion_free(id); 2011 goto out; 2012 } 2013 2014 memregion_free(cxlr->id); 2015 out: 2016 put_device(dev->parent); 2017 kfree(cxlr); 2018 } 2019 2020 const struct device_type cxl_region_type = { 2021 .name = "cxl_region", 2022 .release = cxl_region_release, 2023 .groups = region_groups 2024 }; 2025 2026 bool is_cxl_region(struct device *dev) 2027 { 2028 return dev->type == &cxl_region_type; 2029 } 2030 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); 2031 2032 static struct cxl_region *to_cxl_region(struct device *dev) 2033 { 2034 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, 2035 "not a cxl_region device\n")) 2036 return NULL; 2037 2038 return container_of(dev, struct cxl_region, dev); 2039 } 2040 2041 static void unregister_region(void *dev) 2042 { 2043 struct cxl_region *cxlr = to_cxl_region(dev); 2044 struct cxl_region_params *p = &cxlr->params; 2045 int i; 2046 2047 device_del(dev); 2048 2049 /* 2050 * Now that region sysfs is shutdown, the parameter block is now 2051 * read-only, so no need to hold the region rwsem to access the 2052 * region parameters. 2053 */ 2054 for (i = 0; i < p->interleave_ways; i++) 2055 detach_target(cxlr, i); 2056 2057 cxl_region_iomem_release(cxlr); 2058 put_device(dev); 2059 } 2060 2061 static struct lock_class_key cxl_region_key; 2062 2063 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id) 2064 { 2065 struct cxl_region *cxlr; 2066 struct device *dev; 2067 2068 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL); 2069 if (!cxlr) { 2070 memregion_free(id); 2071 return ERR_PTR(-ENOMEM); 2072 } 2073 2074 dev = &cxlr->dev; 2075 device_initialize(dev); 2076 lockdep_set_class(&dev->mutex, &cxl_region_key); 2077 dev->parent = &cxlrd->cxlsd.cxld.dev; 2078 /* 2079 * Keep root decoder pinned through cxl_region_release to fixup 2080 * region id allocations 2081 */ 2082 get_device(dev->parent); 2083 device_set_pm_not_required(dev); 2084 dev->bus = &cxl_bus_type; 2085 dev->type = &cxl_region_type; 2086 cxlr->id = id; 2087 2088 return cxlr; 2089 } 2090 2091 /** 2092 * devm_cxl_add_region - Adds a region to a decoder 2093 * @cxlrd: root decoder 2094 * @id: memregion id to create, or memregion_free() on failure 2095 * @mode: mode for the endpoint decoders of this region 2096 * @type: select whether this is an expander or accelerator (type-2 or type-3) 2097 * 2098 * This is the second step of region initialization. Regions exist within an 2099 * address space which is mapped by a @cxlrd. 2100 * 2101 * Return: 0 if the region was added to the @cxlrd, else returns negative error 2102 * code. The region will be named "regionZ" where Z is the unique region number. 2103 */ 2104 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, 2105 int id, 2106 enum cxl_decoder_mode mode, 2107 enum cxl_decoder_type type) 2108 { 2109 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); 2110 struct cxl_region *cxlr; 2111 struct device *dev; 2112 int rc; 2113 2114 switch (mode) { 2115 case CXL_DECODER_RAM: 2116 case CXL_DECODER_PMEM: 2117 break; 2118 default: 2119 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); 2120 return ERR_PTR(-EINVAL); 2121 } 2122 2123 cxlr = cxl_region_alloc(cxlrd, id); 2124 if (IS_ERR(cxlr)) 2125 return cxlr; 2126 cxlr->mode = mode; 2127 cxlr->type = type; 2128 2129 dev = &cxlr->dev; 2130 rc = dev_set_name(dev, "region%d", id); 2131 if (rc) 2132 goto err; 2133 2134 rc = device_add(dev); 2135 if (rc) 2136 goto err; 2137 2138 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); 2139 if (rc) 2140 return ERR_PTR(rc); 2141 2142 dev_dbg(port->uport_dev, "%s: created %s\n", 2143 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); 2144 return cxlr; 2145 2146 err: 2147 put_device(dev); 2148 return ERR_PTR(rc); 2149 } 2150 2151 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf) 2152 { 2153 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); 2154 } 2155 2156 static ssize_t create_pmem_region_show(struct device *dev, 2157 struct device_attribute *attr, char *buf) 2158 { 2159 return __create_region_show(to_cxl_root_decoder(dev), buf); 2160 } 2161 2162 static ssize_t create_ram_region_show(struct device *dev, 2163 struct device_attribute *attr, char *buf) 2164 { 2165 return __create_region_show(to_cxl_root_decoder(dev), buf); 2166 } 2167 2168 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, 2169 enum cxl_decoder_mode mode, int id) 2170 { 2171 int rc; 2172 2173 rc = memregion_alloc(GFP_KERNEL); 2174 if (rc < 0) 2175 return ERR_PTR(rc); 2176 2177 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { 2178 memregion_free(rc); 2179 return ERR_PTR(-EBUSY); 2180 } 2181 2182 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2183 } 2184 2185 static ssize_t create_pmem_region_store(struct device *dev, 2186 struct device_attribute *attr, 2187 const char *buf, size_t len) 2188 { 2189 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2190 struct cxl_region *cxlr; 2191 int rc, id; 2192 2193 rc = sscanf(buf, "region%d\n", &id); 2194 if (rc != 1) 2195 return -EINVAL; 2196 2197 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); 2198 if (IS_ERR(cxlr)) 2199 return PTR_ERR(cxlr); 2200 2201 return len; 2202 } 2203 DEVICE_ATTR_RW(create_pmem_region); 2204 2205 static ssize_t create_ram_region_store(struct device *dev, 2206 struct device_attribute *attr, 2207 const char *buf, size_t len) 2208 { 2209 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2210 struct cxl_region *cxlr; 2211 int rc, id; 2212 2213 rc = sscanf(buf, "region%d\n", &id); 2214 if (rc != 1) 2215 return -EINVAL; 2216 2217 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); 2218 if (IS_ERR(cxlr)) 2219 return PTR_ERR(cxlr); 2220 2221 return len; 2222 } 2223 DEVICE_ATTR_RW(create_ram_region); 2224 2225 static ssize_t region_show(struct device *dev, struct device_attribute *attr, 2226 char *buf) 2227 { 2228 struct cxl_decoder *cxld = to_cxl_decoder(dev); 2229 ssize_t rc; 2230 2231 rc = down_read_interruptible(&cxl_region_rwsem); 2232 if (rc) 2233 return rc; 2234 2235 if (cxld->region) 2236 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); 2237 else 2238 rc = sysfs_emit(buf, "\n"); 2239 up_read(&cxl_region_rwsem); 2240 2241 return rc; 2242 } 2243 DEVICE_ATTR_RO(region); 2244 2245 static struct cxl_region * 2246 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name) 2247 { 2248 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 2249 struct device *region_dev; 2250 2251 region_dev = device_find_child_by_name(&cxld->dev, name); 2252 if (!region_dev) 2253 return ERR_PTR(-ENODEV); 2254 2255 return to_cxl_region(region_dev); 2256 } 2257 2258 static ssize_t delete_region_store(struct device *dev, 2259 struct device_attribute *attr, 2260 const char *buf, size_t len) 2261 { 2262 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2263 struct cxl_port *port = to_cxl_port(dev->parent); 2264 struct cxl_region *cxlr; 2265 2266 cxlr = cxl_find_region_by_name(cxlrd, buf); 2267 if (IS_ERR(cxlr)) 2268 return PTR_ERR(cxlr); 2269 2270 devm_release_action(port->uport_dev, unregister_region, cxlr); 2271 put_device(&cxlr->dev); 2272 2273 return len; 2274 } 2275 DEVICE_ATTR_WO(delete_region); 2276 2277 static void cxl_pmem_region_release(struct device *dev) 2278 { 2279 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); 2280 int i; 2281 2282 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 2283 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; 2284 2285 put_device(&cxlmd->dev); 2286 } 2287 2288 kfree(cxlr_pmem); 2289 } 2290 2291 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = { 2292 &cxl_base_attribute_group, 2293 NULL, 2294 }; 2295 2296 const struct device_type cxl_pmem_region_type = { 2297 .name = "cxl_pmem_region", 2298 .release = cxl_pmem_region_release, 2299 .groups = cxl_pmem_region_attribute_groups, 2300 }; 2301 2302 bool is_cxl_pmem_region(struct device *dev) 2303 { 2304 return dev->type == &cxl_pmem_region_type; 2305 } 2306 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL); 2307 2308 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) 2309 { 2310 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev), 2311 "not a cxl_pmem_region device\n")) 2312 return NULL; 2313 return container_of(dev, struct cxl_pmem_region, dev); 2314 } 2315 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); 2316 2317 struct cxl_poison_context { 2318 struct cxl_port *port; 2319 enum cxl_decoder_mode mode; 2320 u64 offset; 2321 }; 2322 2323 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd, 2324 struct cxl_poison_context *ctx) 2325 { 2326 struct cxl_dev_state *cxlds = cxlmd->cxlds; 2327 u64 offset, length; 2328 int rc = 0; 2329 2330 /* 2331 * Collect poison for the remaining unmapped resources 2332 * after poison is collected by committed endpoints. 2333 * 2334 * Knowing that PMEM must always follow RAM, get poison 2335 * for unmapped resources based on the last decoder's mode: 2336 * ram: scan remains of ram range, then any pmem range 2337 * pmem: scan remains of pmem range 2338 */ 2339 2340 if (ctx->mode == CXL_DECODER_RAM) { 2341 offset = ctx->offset; 2342 length = resource_size(&cxlds->ram_res) - offset; 2343 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2344 if (rc == -EFAULT) 2345 rc = 0; 2346 if (rc) 2347 return rc; 2348 } 2349 if (ctx->mode == CXL_DECODER_PMEM) { 2350 offset = ctx->offset; 2351 length = resource_size(&cxlds->dpa_res) - offset; 2352 if (!length) 2353 return 0; 2354 } else if (resource_size(&cxlds->pmem_res)) { 2355 offset = cxlds->pmem_res.start; 2356 length = resource_size(&cxlds->pmem_res); 2357 } else { 2358 return 0; 2359 } 2360 2361 return cxl_mem_get_poison(cxlmd, offset, length, NULL); 2362 } 2363 2364 static int poison_by_decoder(struct device *dev, void *arg) 2365 { 2366 struct cxl_poison_context *ctx = arg; 2367 struct cxl_endpoint_decoder *cxled; 2368 struct cxl_memdev *cxlmd; 2369 u64 offset, length; 2370 int rc = 0; 2371 2372 if (!is_endpoint_decoder(dev)) 2373 return rc; 2374 2375 cxled = to_cxl_endpoint_decoder(dev); 2376 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) 2377 return rc; 2378 2379 /* 2380 * Regions are only created with single mode decoders: pmem or ram. 2381 * Linux does not support mixed mode decoders. This means that 2382 * reading poison per endpoint decoder adheres to the requirement 2383 * that poison reads of pmem and ram must be separated. 2384 * CXL 3.0 Spec 8.2.9.8.4.1 2385 */ 2386 if (cxled->mode == CXL_DECODER_MIXED) { 2387 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); 2388 return rc; 2389 } 2390 2391 cxlmd = cxled_to_memdev(cxled); 2392 if (cxled->skip) { 2393 offset = cxled->dpa_res->start - cxled->skip; 2394 length = cxled->skip; 2395 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2396 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2397 rc = 0; 2398 if (rc) 2399 return rc; 2400 } 2401 2402 offset = cxled->dpa_res->start; 2403 length = cxled->dpa_res->end - offset + 1; 2404 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); 2405 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2406 rc = 0; 2407 if (rc) 2408 return rc; 2409 2410 /* Iterate until commit_end is reached */ 2411 if (cxled->cxld.id == ctx->port->commit_end) { 2412 ctx->offset = cxled->dpa_res->end + 1; 2413 ctx->mode = cxled->mode; 2414 return 1; 2415 } 2416 2417 return 0; 2418 } 2419 2420 int cxl_get_poison_by_endpoint(struct cxl_port *port) 2421 { 2422 struct cxl_poison_context ctx; 2423 int rc = 0; 2424 2425 rc = down_read_interruptible(&cxl_region_rwsem); 2426 if (rc) 2427 return rc; 2428 2429 ctx = (struct cxl_poison_context) { 2430 .port = port 2431 }; 2432 2433 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); 2434 if (rc == 1) 2435 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), 2436 &ctx); 2437 2438 up_read(&cxl_region_rwsem); 2439 return rc; 2440 } 2441 2442 static struct lock_class_key cxl_pmem_region_key; 2443 2444 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) 2445 { 2446 struct cxl_region_params *p = &cxlr->params; 2447 struct cxl_nvdimm_bridge *cxl_nvb; 2448 struct cxl_pmem_region *cxlr_pmem; 2449 struct device *dev; 2450 int i; 2451 2452 down_read(&cxl_region_rwsem); 2453 if (p->state != CXL_CONFIG_COMMIT) { 2454 cxlr_pmem = ERR_PTR(-ENXIO); 2455 goto out; 2456 } 2457 2458 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 2459 GFP_KERNEL); 2460 if (!cxlr_pmem) { 2461 cxlr_pmem = ERR_PTR(-ENOMEM); 2462 goto out; 2463 } 2464 2465 cxlr_pmem->hpa_range.start = p->res->start; 2466 cxlr_pmem->hpa_range.end = p->res->end; 2467 2468 /* Snapshot the region configuration underneath the cxl_region_rwsem */ 2469 cxlr_pmem->nr_mappings = p->nr_targets; 2470 for (i = 0; i < p->nr_targets; i++) { 2471 struct cxl_endpoint_decoder *cxled = p->targets[i]; 2472 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2473 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 2474 2475 /* 2476 * Regions never span CXL root devices, so by definition the 2477 * bridge for one device is the same for all. 2478 */ 2479 if (i == 0) { 2480 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); 2481 if (!cxl_nvb) { 2482 cxlr_pmem = ERR_PTR(-ENODEV); 2483 goto out; 2484 } 2485 cxlr->cxl_nvb = cxl_nvb; 2486 } 2487 m->cxlmd = cxlmd; 2488 get_device(&cxlmd->dev); 2489 m->start = cxled->dpa_res->start; 2490 m->size = resource_size(cxled->dpa_res); 2491 m->position = i; 2492 } 2493 2494 dev = &cxlr_pmem->dev; 2495 cxlr_pmem->cxlr = cxlr; 2496 cxlr->cxlr_pmem = cxlr_pmem; 2497 device_initialize(dev); 2498 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); 2499 device_set_pm_not_required(dev); 2500 dev->parent = &cxlr->dev; 2501 dev->bus = &cxl_bus_type; 2502 dev->type = &cxl_pmem_region_type; 2503 out: 2504 up_read(&cxl_region_rwsem); 2505 2506 return cxlr_pmem; 2507 } 2508 2509 static void cxl_dax_region_release(struct device *dev) 2510 { 2511 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev); 2512 2513 kfree(cxlr_dax); 2514 } 2515 2516 static const struct attribute_group *cxl_dax_region_attribute_groups[] = { 2517 &cxl_base_attribute_group, 2518 NULL, 2519 }; 2520 2521 const struct device_type cxl_dax_region_type = { 2522 .name = "cxl_dax_region", 2523 .release = cxl_dax_region_release, 2524 .groups = cxl_dax_region_attribute_groups, 2525 }; 2526 2527 static bool is_cxl_dax_region(struct device *dev) 2528 { 2529 return dev->type == &cxl_dax_region_type; 2530 } 2531 2532 struct cxl_dax_region *to_cxl_dax_region(struct device *dev) 2533 { 2534 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev), 2535 "not a cxl_dax_region device\n")) 2536 return NULL; 2537 return container_of(dev, struct cxl_dax_region, dev); 2538 } 2539 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL); 2540 2541 static struct lock_class_key cxl_dax_region_key; 2542 2543 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) 2544 { 2545 struct cxl_region_params *p = &cxlr->params; 2546 struct cxl_dax_region *cxlr_dax; 2547 struct device *dev; 2548 2549 down_read(&cxl_region_rwsem); 2550 if (p->state != CXL_CONFIG_COMMIT) { 2551 cxlr_dax = ERR_PTR(-ENXIO); 2552 goto out; 2553 } 2554 2555 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL); 2556 if (!cxlr_dax) { 2557 cxlr_dax = ERR_PTR(-ENOMEM); 2558 goto out; 2559 } 2560 2561 cxlr_dax->hpa_range.start = p->res->start; 2562 cxlr_dax->hpa_range.end = p->res->end; 2563 2564 dev = &cxlr_dax->dev; 2565 cxlr_dax->cxlr = cxlr; 2566 device_initialize(dev); 2567 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); 2568 device_set_pm_not_required(dev); 2569 dev->parent = &cxlr->dev; 2570 dev->bus = &cxl_bus_type; 2571 dev->type = &cxl_dax_region_type; 2572 out: 2573 up_read(&cxl_region_rwsem); 2574 2575 return cxlr_dax; 2576 } 2577 2578 static void cxlr_pmem_unregister(void *_cxlr_pmem) 2579 { 2580 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem; 2581 struct cxl_region *cxlr = cxlr_pmem->cxlr; 2582 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2583 2584 /* 2585 * Either the bridge is in ->remove() context under the device_lock(), 2586 * or cxlr_release_nvdimm() is cancelling the bridge's release action 2587 * for @cxlr_pmem and doing it itself (while manually holding the bridge 2588 * lock). 2589 */ 2590 device_lock_assert(&cxl_nvb->dev); 2591 cxlr->cxlr_pmem = NULL; 2592 cxlr_pmem->cxlr = NULL; 2593 device_unregister(&cxlr_pmem->dev); 2594 } 2595 2596 static void cxlr_release_nvdimm(void *_cxlr) 2597 { 2598 struct cxl_region *cxlr = _cxlr; 2599 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2600 2601 device_lock(&cxl_nvb->dev); 2602 if (cxlr->cxlr_pmem) 2603 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, 2604 cxlr->cxlr_pmem); 2605 device_unlock(&cxl_nvb->dev); 2606 cxlr->cxl_nvb = NULL; 2607 put_device(&cxl_nvb->dev); 2608 } 2609 2610 /** 2611 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge 2612 * @cxlr: parent CXL region for this pmem region bridge device 2613 * 2614 * Return: 0 on success negative error code on failure. 2615 */ 2616 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) 2617 { 2618 struct cxl_pmem_region *cxlr_pmem; 2619 struct cxl_nvdimm_bridge *cxl_nvb; 2620 struct device *dev; 2621 int rc; 2622 2623 cxlr_pmem = cxl_pmem_region_alloc(cxlr); 2624 if (IS_ERR(cxlr_pmem)) 2625 return PTR_ERR(cxlr_pmem); 2626 cxl_nvb = cxlr->cxl_nvb; 2627 2628 dev = &cxlr_pmem->dev; 2629 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); 2630 if (rc) 2631 goto err; 2632 2633 rc = device_add(dev); 2634 if (rc) 2635 goto err; 2636 2637 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2638 dev_name(dev)); 2639 2640 device_lock(&cxl_nvb->dev); 2641 if (cxl_nvb->dev.driver) 2642 rc = devm_add_action_or_reset(&cxl_nvb->dev, 2643 cxlr_pmem_unregister, cxlr_pmem); 2644 else 2645 rc = -ENXIO; 2646 device_unlock(&cxl_nvb->dev); 2647 2648 if (rc) 2649 goto err_bridge; 2650 2651 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */ 2652 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); 2653 2654 err: 2655 put_device(dev); 2656 err_bridge: 2657 put_device(&cxl_nvb->dev); 2658 cxlr->cxl_nvb = NULL; 2659 return rc; 2660 } 2661 2662 static void cxlr_dax_unregister(void *_cxlr_dax) 2663 { 2664 struct cxl_dax_region *cxlr_dax = _cxlr_dax; 2665 2666 device_unregister(&cxlr_dax->dev); 2667 } 2668 2669 static int devm_cxl_add_dax_region(struct cxl_region *cxlr) 2670 { 2671 struct cxl_dax_region *cxlr_dax; 2672 struct device *dev; 2673 int rc; 2674 2675 cxlr_dax = cxl_dax_region_alloc(cxlr); 2676 if (IS_ERR(cxlr_dax)) 2677 return PTR_ERR(cxlr_dax); 2678 2679 dev = &cxlr_dax->dev; 2680 rc = dev_set_name(dev, "dax_region%d", cxlr->id); 2681 if (rc) 2682 goto err; 2683 2684 rc = device_add(dev); 2685 if (rc) 2686 goto err; 2687 2688 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2689 dev_name(dev)); 2690 2691 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, 2692 cxlr_dax); 2693 err: 2694 put_device(dev); 2695 return rc; 2696 } 2697 2698 static int match_root_decoder_by_range(struct device *dev, void *data) 2699 { 2700 struct range *r1, *r2 = data; 2701 struct cxl_root_decoder *cxlrd; 2702 2703 if (!is_root_decoder(dev)) 2704 return 0; 2705 2706 cxlrd = to_cxl_root_decoder(dev); 2707 r1 = &cxlrd->cxlsd.cxld.hpa_range; 2708 return range_contains(r1, r2); 2709 } 2710 2711 static int match_region_by_range(struct device *dev, void *data) 2712 { 2713 struct cxl_region_params *p; 2714 struct cxl_region *cxlr; 2715 struct range *r = data; 2716 int rc = 0; 2717 2718 if (!is_cxl_region(dev)) 2719 return 0; 2720 2721 cxlr = to_cxl_region(dev); 2722 p = &cxlr->params; 2723 2724 down_read(&cxl_region_rwsem); 2725 if (p->res && p->res->start == r->start && p->res->end == r->end) 2726 rc = 1; 2727 up_read(&cxl_region_rwsem); 2728 2729 return rc; 2730 } 2731 2732 /* Establish an empty region covering the given HPA range */ 2733 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, 2734 struct cxl_endpoint_decoder *cxled) 2735 { 2736 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2737 struct cxl_port *port = cxlrd_to_port(cxlrd); 2738 struct range *hpa = &cxled->cxld.hpa_range; 2739 struct cxl_region_params *p; 2740 struct cxl_region *cxlr; 2741 struct resource *res; 2742 int rc; 2743 2744 do { 2745 cxlr = __create_region(cxlrd, cxled->mode, 2746 atomic_read(&cxlrd->region_id)); 2747 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); 2748 2749 if (IS_ERR(cxlr)) { 2750 dev_err(cxlmd->dev.parent, 2751 "%s:%s: %s failed assign region: %ld\n", 2752 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2753 __func__, PTR_ERR(cxlr)); 2754 return cxlr; 2755 } 2756 2757 down_write(&cxl_region_rwsem); 2758 p = &cxlr->params; 2759 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 2760 dev_err(cxlmd->dev.parent, 2761 "%s:%s: %s autodiscovery interrupted\n", 2762 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2763 __func__); 2764 rc = -EBUSY; 2765 goto err; 2766 } 2767 2768 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); 2769 2770 res = kmalloc(sizeof(*res), GFP_KERNEL); 2771 if (!res) { 2772 rc = -ENOMEM; 2773 goto err; 2774 } 2775 2776 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), 2777 dev_name(&cxlr->dev)); 2778 rc = insert_resource(cxlrd->res, res); 2779 if (rc) { 2780 /* 2781 * Platform-firmware may not have split resources like "System 2782 * RAM" on CXL window boundaries see cxl_region_iomem_release() 2783 */ 2784 dev_warn(cxlmd->dev.parent, 2785 "%s:%s: %s %s cannot insert resource\n", 2786 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2787 __func__, dev_name(&cxlr->dev)); 2788 } 2789 2790 p->res = res; 2791 p->interleave_ways = cxled->cxld.interleave_ways; 2792 p->interleave_granularity = cxled->cxld.interleave_granularity; 2793 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 2794 2795 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 2796 if (rc) 2797 goto err; 2798 2799 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", 2800 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, 2801 dev_name(&cxlr->dev), p->res, p->interleave_ways, 2802 p->interleave_granularity); 2803 2804 /* ...to match put_device() in cxl_add_to_region() */ 2805 get_device(&cxlr->dev); 2806 up_write(&cxl_region_rwsem); 2807 2808 return cxlr; 2809 2810 err: 2811 up_write(&cxl_region_rwsem); 2812 devm_release_action(port->uport_dev, unregister_region, cxlr); 2813 return ERR_PTR(rc); 2814 } 2815 2816 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) 2817 { 2818 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2819 struct range *hpa = &cxled->cxld.hpa_range; 2820 struct cxl_decoder *cxld = &cxled->cxld; 2821 struct device *cxlrd_dev, *region_dev; 2822 struct cxl_root_decoder *cxlrd; 2823 struct cxl_region_params *p; 2824 struct cxl_region *cxlr; 2825 bool attach = false; 2826 int rc; 2827 2828 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, 2829 match_root_decoder_by_range); 2830 if (!cxlrd_dev) { 2831 dev_err(cxlmd->dev.parent, 2832 "%s:%s no CXL window for range %#llx:%#llx\n", 2833 dev_name(&cxlmd->dev), dev_name(&cxld->dev), 2834 cxld->hpa_range.start, cxld->hpa_range.end); 2835 return -ENXIO; 2836 } 2837 2838 cxlrd = to_cxl_root_decoder(cxlrd_dev); 2839 2840 /* 2841 * Ensure that if multiple threads race to construct_region() for @hpa 2842 * one does the construction and the others add to that. 2843 */ 2844 mutex_lock(&cxlrd->range_lock); 2845 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, 2846 match_region_by_range); 2847 if (!region_dev) { 2848 cxlr = construct_region(cxlrd, cxled); 2849 region_dev = &cxlr->dev; 2850 } else 2851 cxlr = to_cxl_region(region_dev); 2852 mutex_unlock(&cxlrd->range_lock); 2853 2854 rc = PTR_ERR_OR_ZERO(cxlr); 2855 if (rc) 2856 goto out; 2857 2858 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); 2859 2860 down_read(&cxl_region_rwsem); 2861 p = &cxlr->params; 2862 attach = p->state == CXL_CONFIG_COMMIT; 2863 up_read(&cxl_region_rwsem); 2864 2865 if (attach) { 2866 /* 2867 * If device_attach() fails the range may still be active via 2868 * the platform-firmware memory map, otherwise the driver for 2869 * regions is local to this file, so driver matching can't fail. 2870 */ 2871 if (device_attach(&cxlr->dev) < 0) 2872 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", 2873 p->res); 2874 } 2875 2876 put_device(region_dev); 2877 out: 2878 put_device(cxlrd_dev); 2879 return rc; 2880 } 2881 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL); 2882 2883 static int is_system_ram(struct resource *res, void *arg) 2884 { 2885 struct cxl_region *cxlr = arg; 2886 struct cxl_region_params *p = &cxlr->params; 2887 2888 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); 2889 return 1; 2890 } 2891 2892 static int cxl_region_probe(struct device *dev) 2893 { 2894 struct cxl_region *cxlr = to_cxl_region(dev); 2895 struct cxl_region_params *p = &cxlr->params; 2896 int rc; 2897 2898 rc = down_read_interruptible(&cxl_region_rwsem); 2899 if (rc) { 2900 dev_dbg(&cxlr->dev, "probe interrupted\n"); 2901 return rc; 2902 } 2903 2904 if (p->state < CXL_CONFIG_COMMIT) { 2905 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); 2906 rc = -ENXIO; 2907 goto out; 2908 } 2909 2910 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { 2911 dev_err(&cxlr->dev, 2912 "failed to activate, re-commit region and retry\n"); 2913 rc = -ENXIO; 2914 goto out; 2915 } 2916 2917 /* 2918 * From this point on any path that changes the region's state away from 2919 * CXL_CONFIG_COMMIT is also responsible for releasing the driver. 2920 */ 2921 out: 2922 up_read(&cxl_region_rwsem); 2923 2924 if (rc) 2925 return rc; 2926 2927 switch (cxlr->mode) { 2928 case CXL_DECODER_PMEM: 2929 return devm_cxl_add_pmem_region(cxlr); 2930 case CXL_DECODER_RAM: 2931 /* 2932 * The region can not be manged by CXL if any portion of 2933 * it is already online as 'System RAM' 2934 */ 2935 if (walk_iomem_res_desc(IORES_DESC_NONE, 2936 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, 2937 p->res->start, p->res->end, cxlr, 2938 is_system_ram) > 0) 2939 return 0; 2940 return devm_cxl_add_dax_region(cxlr); 2941 default: 2942 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", 2943 cxlr->mode); 2944 return -ENXIO; 2945 } 2946 } 2947 2948 static struct cxl_driver cxl_region_driver = { 2949 .name = "cxl_region", 2950 .probe = cxl_region_probe, 2951 .id = CXL_DEVICE_REGION, 2952 }; 2953 2954 int cxl_region_init(void) 2955 { 2956 return cxl_driver_register(&cxl_region_driver); 2957 } 2958 2959 void cxl_region_exit(void) 2960 { 2961 cxl_driver_unregister(&cxl_region_driver); 2962 } 2963 2964 MODULE_IMPORT_NS(CXL); 2965 MODULE_IMPORT_NS(DEVMEM); 2966 MODULE_ALIAS_CXL(CXL_DEVICE_REGION); 2967