1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-hi-lo.h> 4 #include <linux/seq_file.h> 5 #include <linux/device.h> 6 #include <linux/delay.h> 7 8 #include "cxlmem.h" 9 #include "core.h" 10 11 /** 12 * DOC: cxl core hdm 13 * 14 * Compute Express Link Host Managed Device Memory, starting with the 15 * CXL 2.0 specification, is managed by an array of HDM Decoder register 16 * instances per CXL port and per CXL endpoint. Define common helpers 17 * for enumerating these registers and capabilities. 18 */ 19 20 DECLARE_RWSEM(cxl_dpa_rwsem); 21 22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 23 int *target_map) 24 { 25 int rc; 26 27 rc = cxl_decoder_add_locked(cxld, target_map); 28 if (rc) { 29 put_device(&cxld->dev); 30 dev_err(&port->dev, "Failed to add decoder\n"); 31 return rc; 32 } 33 34 rc = cxl_decoder_autoremove(&port->dev, cxld); 35 if (rc) 36 return rc; 37 38 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 39 40 return 0; 41 } 42 43 /* 44 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) 45 * single ported host-bridges need not publish a decoder capability when a 46 * passthrough decode can be assumed, i.e. all transactions that the uport sees 47 * are claimed and passed to the single dport. Disable the range until the first 48 * CXL region is enumerated / activated. 49 */ 50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 51 { 52 struct cxl_switch_decoder *cxlsd; 53 struct cxl_dport *dport = NULL; 54 int single_port_map[1]; 55 unsigned long index; 56 57 cxlsd = cxl_switch_decoder_alloc(port, 1); 58 if (IS_ERR(cxlsd)) 59 return PTR_ERR(cxlsd); 60 61 device_lock_assert(&port->dev); 62 63 xa_for_each(&port->dports, index, dport) 64 break; 65 single_port_map[0] = dport->port_id; 66 67 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 68 } 69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); 70 71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 72 { 73 u32 hdm_cap; 74 75 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); 76 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); 77 cxlhdm->target_count = 78 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); 79 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap)) 80 cxlhdm->interleave_mask |= GENMASK(11, 8); 81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) 82 cxlhdm->interleave_mask |= GENMASK(14, 12); 83 } 84 85 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, 86 struct cxl_component_regs *regs) 87 { 88 struct cxl_register_map map = { 89 .resource = port->component_reg_phys, 90 .base = crb, 91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, 92 }; 93 94 cxl_probe_component_regs(&port->dev, crb, &map.component_map); 95 if (!map.component_map.hdm_decoder.valid) { 96 dev_err(&port->dev, "HDM decoder registers invalid\n"); 97 return -ENXIO; 98 } 99 100 return cxl_map_component_regs(&port->dev, regs, &map, 101 BIT(CXL_CM_CAP_CAP_ID_HDM)); 102 } 103 104 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) 105 { 106 struct cxl_hdm *cxlhdm; 107 void __iomem *hdm; 108 u32 ctrl; 109 int i; 110 111 if (!info) 112 return false; 113 114 cxlhdm = dev_get_drvdata(&info->port->dev); 115 hdm = cxlhdm->regs.hdm_decoder; 116 117 if (!hdm) 118 return true; 119 120 /* 121 * If HDM decoders are present and the driver is in control of 122 * Mem_Enable skip DVSEC based emulation 123 */ 124 if (!info->mem_enabled) 125 return false; 126 127 /* 128 * If any decoders are committed already, there should not be any 129 * emulated DVSEC decoders. 130 */ 131 for (i = 0; i < cxlhdm->decoder_count; i++) { 132 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 133 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 134 return false; 135 } 136 137 return true; 138 } 139 140 /** 141 * devm_cxl_setup_hdm - map HDM decoder component registers 142 * @port: cxl_port to map 143 * @info: cached DVSEC range register info 144 */ 145 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 146 struct cxl_endpoint_dvsec_info *info) 147 { 148 struct device *dev = &port->dev; 149 struct cxl_hdm *cxlhdm; 150 void __iomem *crb; 151 int rc; 152 153 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); 154 if (!cxlhdm) 155 return ERR_PTR(-ENOMEM); 156 cxlhdm->port = port; 157 dev_set_drvdata(dev, cxlhdm); 158 159 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); 160 if (!crb && info && info->mem_enabled) { 161 cxlhdm->decoder_count = info->ranges; 162 return cxlhdm; 163 } else if (!crb) { 164 dev_err(dev, "No component registers mapped\n"); 165 return ERR_PTR(-ENXIO); 166 } 167 168 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs); 169 iounmap(crb); 170 if (rc) 171 return ERR_PTR(rc); 172 173 parse_hdm_decoder_caps(cxlhdm); 174 if (cxlhdm->decoder_count == 0) { 175 dev_err(dev, "Spec violation. Caps invalid\n"); 176 return ERR_PTR(-ENXIO); 177 } 178 179 /* 180 * Now that the hdm capability is parsed, decide if range 181 * register emulation is needed and fixup cxlhdm accordingly. 182 */ 183 if (should_emulate_decoders(info)) { 184 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, 185 info->ranges > 1 ? "s" : ""); 186 cxlhdm->decoder_count = info->ranges; 187 } 188 189 return cxlhdm; 190 } 191 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); 192 193 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 194 { 195 unsigned long long start = r->start, end = r->end; 196 197 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, 198 r->name); 199 } 200 201 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) 202 { 203 struct resource *p1, *p2; 204 205 down_read(&cxl_dpa_rwsem); 206 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { 207 __cxl_dpa_debug(file, p1, 0); 208 for (p2 = p1->child; p2; p2 = p2->sibling) 209 __cxl_dpa_debug(file, p2, 1); 210 } 211 up_read(&cxl_dpa_rwsem); 212 } 213 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); 214 215 /* 216 * Must be called in a context that synchronizes against this decoder's 217 * port ->remove() callback (like an endpoint decoder sysfs attribute) 218 */ 219 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 220 { 221 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 222 struct cxl_port *port = cxled_to_port(cxled); 223 struct cxl_dev_state *cxlds = cxlmd->cxlds; 224 struct resource *res = cxled->dpa_res; 225 resource_size_t skip_start; 226 227 lockdep_assert_held_write(&cxl_dpa_rwsem); 228 229 /* save @skip_start, before @res is released */ 230 skip_start = res->start - cxled->skip; 231 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); 232 if (cxled->skip) 233 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); 234 cxled->skip = 0; 235 cxled->dpa_res = NULL; 236 put_device(&cxled->cxld.dev); 237 port->hdm_end--; 238 } 239 240 static void cxl_dpa_release(void *cxled) 241 { 242 down_write(&cxl_dpa_rwsem); 243 __cxl_dpa_release(cxled); 244 up_write(&cxl_dpa_rwsem); 245 } 246 247 /* 248 * Must be called from context that will not race port device 249 * unregistration, like decoder sysfs attribute methods 250 */ 251 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 252 { 253 struct cxl_port *port = cxled_to_port(cxled); 254 255 lockdep_assert_held_write(&cxl_dpa_rwsem); 256 devm_remove_action(&port->dev, cxl_dpa_release, cxled); 257 __cxl_dpa_release(cxled); 258 } 259 260 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 261 resource_size_t base, resource_size_t len, 262 resource_size_t skipped) 263 { 264 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 265 struct cxl_port *port = cxled_to_port(cxled); 266 struct cxl_dev_state *cxlds = cxlmd->cxlds; 267 struct device *dev = &port->dev; 268 struct resource *res; 269 270 lockdep_assert_held_write(&cxl_dpa_rwsem); 271 272 if (!len) 273 goto success; 274 275 if (cxled->dpa_res) { 276 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", 277 port->id, cxled->cxld.id, cxled->dpa_res); 278 return -EBUSY; 279 } 280 281 if (port->hdm_end + 1 != cxled->cxld.id) { 282 /* 283 * Assumes alloc and commit order is always in hardware instance 284 * order per expectations from 8.2.5.12.20 Committing Decoder 285 * Programming that enforce decoder[m] committed before 286 * decoder[m+1] commit start. 287 */ 288 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, 289 cxled->cxld.id, port->id, port->hdm_end + 1); 290 return -EBUSY; 291 } 292 293 if (skipped) { 294 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, 295 dev_name(&cxled->cxld.dev), 0); 296 if (!res) { 297 dev_dbg(dev, 298 "decoder%d.%d: failed to reserve skipped space\n", 299 port->id, cxled->cxld.id); 300 return -EBUSY; 301 } 302 } 303 res = __request_region(&cxlds->dpa_res, base, len, 304 dev_name(&cxled->cxld.dev), 0); 305 if (!res) { 306 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", 307 port->id, cxled->cxld.id); 308 if (skipped) 309 __release_region(&cxlds->dpa_res, base - skipped, 310 skipped); 311 return -EBUSY; 312 } 313 cxled->dpa_res = res; 314 cxled->skip = skipped; 315 316 if (resource_contains(&cxlds->pmem_res, res)) 317 cxled->mode = CXL_DECODER_PMEM; 318 else if (resource_contains(&cxlds->ram_res, res)) 319 cxled->mode = CXL_DECODER_RAM; 320 else { 321 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, 322 cxled->cxld.id, cxled->dpa_res); 323 cxled->mode = CXL_DECODER_MIXED; 324 } 325 326 success: 327 port->hdm_end++; 328 get_device(&cxled->cxld.dev); 329 return 0; 330 } 331 332 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 333 resource_size_t base, resource_size_t len, 334 resource_size_t skipped) 335 { 336 struct cxl_port *port = cxled_to_port(cxled); 337 int rc; 338 339 down_write(&cxl_dpa_rwsem); 340 rc = __cxl_dpa_reserve(cxled, base, len, skipped); 341 up_write(&cxl_dpa_rwsem); 342 343 if (rc) 344 return rc; 345 346 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 347 } 348 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); 349 350 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) 351 { 352 resource_size_t size = 0; 353 354 down_read(&cxl_dpa_rwsem); 355 if (cxled->dpa_res) 356 size = resource_size(cxled->dpa_res); 357 up_read(&cxl_dpa_rwsem); 358 359 return size; 360 } 361 362 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) 363 { 364 resource_size_t base = -1; 365 366 down_read(&cxl_dpa_rwsem); 367 if (cxled->dpa_res) 368 base = cxled->dpa_res->start; 369 up_read(&cxl_dpa_rwsem); 370 371 return base; 372 } 373 374 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) 375 { 376 struct cxl_port *port = cxled_to_port(cxled); 377 struct device *dev = &cxled->cxld.dev; 378 int rc; 379 380 down_write(&cxl_dpa_rwsem); 381 if (!cxled->dpa_res) { 382 rc = 0; 383 goto out; 384 } 385 if (cxled->cxld.region) { 386 dev_dbg(dev, "decoder assigned to: %s\n", 387 dev_name(&cxled->cxld.region->dev)); 388 rc = -EBUSY; 389 goto out; 390 } 391 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 392 dev_dbg(dev, "decoder enabled\n"); 393 rc = -EBUSY; 394 goto out; 395 } 396 if (cxled->cxld.id != port->hdm_end) { 397 dev_dbg(dev, "expected decoder%d.%d\n", port->id, 398 port->hdm_end); 399 rc = -EBUSY; 400 goto out; 401 } 402 devm_cxl_dpa_release(cxled); 403 rc = 0; 404 out: 405 up_write(&cxl_dpa_rwsem); 406 return rc; 407 } 408 409 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, 410 enum cxl_decoder_mode mode) 411 { 412 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 413 struct cxl_dev_state *cxlds = cxlmd->cxlds; 414 struct device *dev = &cxled->cxld.dev; 415 int rc; 416 417 switch (mode) { 418 case CXL_DECODER_RAM: 419 case CXL_DECODER_PMEM: 420 break; 421 default: 422 dev_dbg(dev, "unsupported mode: %d\n", mode); 423 return -EINVAL; 424 } 425 426 down_write(&cxl_dpa_rwsem); 427 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 428 rc = -EBUSY; 429 goto out; 430 } 431 432 /* 433 * Only allow modes that are supported by the current partition 434 * configuration 435 */ 436 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 437 dev_dbg(dev, "no available pmem capacity\n"); 438 rc = -ENXIO; 439 goto out; 440 } 441 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 442 dev_dbg(dev, "no available ram capacity\n"); 443 rc = -ENXIO; 444 goto out; 445 } 446 447 cxled->mode = mode; 448 rc = 0; 449 out: 450 up_write(&cxl_dpa_rwsem); 451 452 return rc; 453 } 454 455 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) 456 { 457 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 458 resource_size_t free_ram_start, free_pmem_start; 459 struct cxl_port *port = cxled_to_port(cxled); 460 struct cxl_dev_state *cxlds = cxlmd->cxlds; 461 struct device *dev = &cxled->cxld.dev; 462 resource_size_t start, avail, skip; 463 struct resource *p, *last; 464 int rc; 465 466 down_write(&cxl_dpa_rwsem); 467 if (cxled->cxld.region) { 468 dev_dbg(dev, "decoder attached to %s\n", 469 dev_name(&cxled->cxld.region->dev)); 470 rc = -EBUSY; 471 goto out; 472 } 473 474 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 475 dev_dbg(dev, "decoder enabled\n"); 476 rc = -EBUSY; 477 goto out; 478 } 479 480 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) 481 last = p; 482 if (last) 483 free_ram_start = last->end + 1; 484 else 485 free_ram_start = cxlds->ram_res.start; 486 487 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) 488 last = p; 489 if (last) 490 free_pmem_start = last->end + 1; 491 else 492 free_pmem_start = cxlds->pmem_res.start; 493 494 if (cxled->mode == CXL_DECODER_RAM) { 495 start = free_ram_start; 496 avail = cxlds->ram_res.end - start + 1; 497 skip = 0; 498 } else if (cxled->mode == CXL_DECODER_PMEM) { 499 resource_size_t skip_start, skip_end; 500 501 start = free_pmem_start; 502 avail = cxlds->pmem_res.end - start + 1; 503 skip_start = free_ram_start; 504 505 /* 506 * If some pmem is already allocated, then that allocation 507 * already handled the skip. 508 */ 509 if (cxlds->pmem_res.child && 510 skip_start == cxlds->pmem_res.child->start) 511 skip_end = skip_start - 1; 512 else 513 skip_end = start - 1; 514 skip = skip_end - skip_start + 1; 515 } else { 516 dev_dbg(dev, "mode not set\n"); 517 rc = -EINVAL; 518 goto out; 519 } 520 521 if (size > avail) { 522 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 523 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", 524 &avail); 525 rc = -ENOSPC; 526 goto out; 527 } 528 529 rc = __cxl_dpa_reserve(cxled, start, size, skip); 530 out: 531 up_write(&cxl_dpa_rwsem); 532 533 if (rc) 534 return rc; 535 536 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 537 } 538 539 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 540 { 541 u16 eig; 542 u8 eiw; 543 544 /* 545 * Input validation ensures these warns never fire, but otherwise 546 * suppress unititalized variable usage warnings. 547 */ 548 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), 549 "invalid interleave_ways: %d\n", cxld->interleave_ways)) 550 return; 551 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), 552 "invalid interleave_granularity: %d\n", 553 cxld->interleave_granularity)) 554 return; 555 556 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 557 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 558 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 559 } 560 561 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 562 { 563 u32p_replace_bits(ctrl, !!(cxld->target_type == 3), 564 CXL_HDM_DECODER0_CTRL_TYPE); 565 } 566 567 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 568 { 569 struct cxl_dport **t = &cxlsd->target[0]; 570 int ways = cxlsd->cxld.interleave_ways; 571 572 if (dev_WARN_ONCE(&cxlsd->cxld.dev, 573 ways > 8 || ways > cxlsd->nr_targets, 574 "ways: %d overflows targets: %d\n", ways, 575 cxlsd->nr_targets)) 576 return -ENXIO; 577 578 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 579 if (ways > 1) 580 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 581 if (ways > 2) 582 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 583 if (ways > 3) 584 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 585 if (ways > 4) 586 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 587 if (ways > 5) 588 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 589 if (ways > 6) 590 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 591 if (ways > 7) 592 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 593 594 return 0; 595 } 596 597 /* 598 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 599 * committed or error within 10ms, but just be generous with 20ms to account for 600 * clock skew and other marginal behavior 601 */ 602 #define COMMIT_TIMEOUT_MS 20 603 static int cxld_await_commit(void __iomem *hdm, int id) 604 { 605 u32 ctrl; 606 int i; 607 608 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 609 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 610 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 611 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 612 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 613 return -EIO; 614 } 615 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 616 return 0; 617 fsleep(1000); 618 } 619 620 return -ETIMEDOUT; 621 } 622 623 static int cxl_decoder_commit(struct cxl_decoder *cxld) 624 { 625 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 626 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 627 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 628 int id = cxld->id, rc; 629 u64 base, size; 630 u32 ctrl; 631 632 if (cxld->flags & CXL_DECODER_F_ENABLE) 633 return 0; 634 635 if (port->commit_end + 1 != id) { 636 dev_dbg(&port->dev, 637 "%s: out of order commit, expected decoder%d.%d\n", 638 dev_name(&cxld->dev), port->id, port->commit_end + 1); 639 return -EBUSY; 640 } 641 642 down_read(&cxl_dpa_rwsem); 643 /* common decoder settings */ 644 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 645 cxld_set_interleave(cxld, &ctrl); 646 cxld_set_type(cxld, &ctrl); 647 base = cxld->hpa_range.start; 648 size = range_len(&cxld->hpa_range); 649 650 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 651 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 652 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 653 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 654 655 if (is_switch_decoder(&cxld->dev)) { 656 struct cxl_switch_decoder *cxlsd = 657 to_cxl_switch_decoder(&cxld->dev); 658 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 659 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 660 u64 targets; 661 662 rc = cxlsd_set_targets(cxlsd, &targets); 663 if (rc) { 664 dev_dbg(&port->dev, "%s: target configuration error\n", 665 dev_name(&cxld->dev)); 666 goto err; 667 } 668 669 writel(upper_32_bits(targets), tl_hi); 670 writel(lower_32_bits(targets), tl_lo); 671 } else { 672 struct cxl_endpoint_decoder *cxled = 673 to_cxl_endpoint_decoder(&cxld->dev); 674 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 675 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 676 677 writel(upper_32_bits(cxled->skip), sk_hi); 678 writel(lower_32_bits(cxled->skip), sk_lo); 679 } 680 681 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 682 up_read(&cxl_dpa_rwsem); 683 684 port->commit_end++; 685 rc = cxld_await_commit(hdm, cxld->id); 686 err: 687 if (rc) { 688 dev_dbg(&port->dev, "%s: error %d committing decoder\n", 689 dev_name(&cxld->dev), rc); 690 cxld->reset(cxld); 691 return rc; 692 } 693 cxld->flags |= CXL_DECODER_F_ENABLE; 694 695 return 0; 696 } 697 698 static int cxl_decoder_reset(struct cxl_decoder *cxld) 699 { 700 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 701 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 702 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 703 int id = cxld->id; 704 u32 ctrl; 705 706 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 707 return 0; 708 709 if (port->commit_end != id) { 710 dev_dbg(&port->dev, 711 "%s: out of order reset, expected decoder%d.%d\n", 712 dev_name(&cxld->dev), port->id, port->commit_end); 713 return -EBUSY; 714 } 715 716 down_read(&cxl_dpa_rwsem); 717 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 718 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 719 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 720 721 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 722 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 723 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 724 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 725 up_read(&cxl_dpa_rwsem); 726 727 port->commit_end--; 728 cxld->flags &= ~CXL_DECODER_F_ENABLE; 729 730 /* Userspace is now responsible for reconfiguring this decoder */ 731 if (is_endpoint_decoder(&cxld->dev)) { 732 struct cxl_endpoint_decoder *cxled; 733 734 cxled = to_cxl_endpoint_decoder(&cxld->dev); 735 cxled->state = CXL_DECODER_STATE_MANUAL; 736 } 737 738 return 0; 739 } 740 741 static int cxl_setup_hdm_decoder_from_dvsec( 742 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, 743 int which, struct cxl_endpoint_dvsec_info *info) 744 { 745 struct cxl_endpoint_decoder *cxled; 746 u64 len; 747 int rc; 748 749 if (!is_cxl_endpoint(port)) 750 return -EOPNOTSUPP; 751 752 cxled = to_cxl_endpoint_decoder(&cxld->dev); 753 len = range_len(&info->dvsec_range[which]); 754 if (!len) 755 return -ENOENT; 756 757 cxld->target_type = CXL_DECODER_EXPANDER; 758 cxld->commit = NULL; 759 cxld->reset = NULL; 760 cxld->hpa_range = info->dvsec_range[which]; 761 762 /* 763 * Set the emulated decoder as locked pending additional support to 764 * change the range registers at run time. 765 */ 766 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; 767 port->commit_end = cxld->id; 768 769 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0); 770 if (rc) { 771 dev_err(&port->dev, 772 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 773 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); 774 return rc; 775 } 776 *dpa_base += len; 777 cxled->state = CXL_DECODER_STATE_AUTO; 778 779 return 0; 780 } 781 782 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 783 int *target_map, void __iomem *hdm, int which, 784 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 785 { 786 struct cxl_endpoint_decoder *cxled; 787 u64 size, base, skip, dpa_size; 788 bool committed; 789 u32 remainder; 790 int i, rc; 791 u32 ctrl; 792 union { 793 u64 value; 794 unsigned char target_id[8]; 795 } target_list; 796 797 if (should_emulate_decoders(info)) 798 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, 799 which, info); 800 801 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 802 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 803 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 804 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 805 cxld->commit = cxl_decoder_commit; 806 cxld->reset = cxl_decoder_reset; 807 808 if (!committed) 809 size = 0; 810 if (base == U64_MAX || size == U64_MAX) { 811 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", 812 port->id, cxld->id); 813 return -ENXIO; 814 } 815 816 cxld->hpa_range = (struct range) { 817 .start = base, 818 .end = base + size - 1, 819 }; 820 821 /* decoders are enabled if committed */ 822 if (committed) { 823 cxld->flags |= CXL_DECODER_F_ENABLE; 824 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 825 cxld->flags |= CXL_DECODER_F_LOCK; 826 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl)) 827 cxld->target_type = CXL_DECODER_EXPANDER; 828 else 829 cxld->target_type = CXL_DECODER_ACCELERATOR; 830 if (cxld->id != port->commit_end + 1) { 831 dev_warn(&port->dev, 832 "decoder%d.%d: Committed out of order\n", 833 port->id, cxld->id); 834 return -ENXIO; 835 } 836 port->commit_end = cxld->id; 837 } else { 838 /* unless / until type-2 drivers arrive, assume type-3 */ 839 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) { 840 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE; 841 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 842 } 843 cxld->target_type = CXL_DECODER_EXPANDER; 844 } 845 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 846 &cxld->interleave_ways); 847 if (rc) { 848 dev_warn(&port->dev, 849 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", 850 port->id, cxld->id, ctrl); 851 return rc; 852 } 853 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 854 &cxld->interleave_granularity); 855 if (rc) 856 return rc; 857 858 if (!info) { 859 target_list.value = 860 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 861 for (i = 0; i < cxld->interleave_ways; i++) 862 target_map[i] = target_list.target_id[i]; 863 864 return 0; 865 } 866 867 if (!committed) 868 return 0; 869 870 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); 871 if (remainder) { 872 dev_err(&port->dev, 873 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", 874 port->id, cxld->id, size, cxld->interleave_ways); 875 return -ENXIO; 876 } 877 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 878 cxled = to_cxl_endpoint_decoder(&cxld->dev); 879 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 880 if (rc) { 881 dev_err(&port->dev, 882 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 883 port->id, cxld->id, *dpa_base, 884 *dpa_base + dpa_size + skip - 1, rc); 885 return rc; 886 } 887 *dpa_base += dpa_size + skip; 888 889 cxled->state = CXL_DECODER_STATE_AUTO; 890 891 return 0; 892 } 893 894 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm) 895 { 896 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 897 int committed, i; 898 u32 ctrl; 899 900 if (!hdm) 901 return; 902 903 /* 904 * Since the register resource was recently claimed via request_region() 905 * be careful about trusting the "not-committed" status until the commit 906 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0 907 * 8.2.5.12.20), but double it to be tolerant of any clock skew between 908 * host and target. 909 */ 910 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { 911 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 912 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) 913 committed++; 914 } 915 916 /* ensure that future checks of committed can be trusted */ 917 if (committed != cxlhdm->decoder_count) 918 msleep(20); 919 } 920 921 /** 922 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set 923 * @cxlhdm: Structure to populate with HDM capabilities 924 * @info: cached DVSEC range register info 925 */ 926 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 927 struct cxl_endpoint_dvsec_info *info) 928 { 929 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 930 struct cxl_port *port = cxlhdm->port; 931 int i; 932 u64 dpa_base = 0; 933 934 cxl_settle_decoders(cxlhdm); 935 936 for (i = 0; i < cxlhdm->decoder_count; i++) { 937 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 938 int rc, target_count = cxlhdm->target_count; 939 struct cxl_decoder *cxld; 940 941 if (is_cxl_endpoint(port)) { 942 struct cxl_endpoint_decoder *cxled; 943 944 cxled = cxl_endpoint_decoder_alloc(port); 945 if (IS_ERR(cxled)) { 946 dev_warn(&port->dev, 947 "Failed to allocate decoder%d.%d\n", 948 port->id, i); 949 return PTR_ERR(cxled); 950 } 951 cxld = &cxled->cxld; 952 } else { 953 struct cxl_switch_decoder *cxlsd; 954 955 cxlsd = cxl_switch_decoder_alloc(port, target_count); 956 if (IS_ERR(cxlsd)) { 957 dev_warn(&port->dev, 958 "Failed to allocate decoder%d.%d\n", 959 port->id, i); 960 return PTR_ERR(cxlsd); 961 } 962 cxld = &cxlsd->cxld; 963 } 964 965 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, 966 &dpa_base, info); 967 if (rc) { 968 dev_warn(&port->dev, 969 "Failed to initialize decoder%d.%d\n", 970 port->id, i); 971 put_device(&cxld->dev); 972 return rc; 973 } 974 rc = add_hdm_decoder(port, cxld, target_map); 975 if (rc) { 976 dev_warn(&port->dev, 977 "Failed to add decoder%d.%d\n", port->id, i); 978 return rc; 979 } 980 } 981 982 return 0; 983 } 984 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); 985