1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11 * DOC: cxl core hdm
12 *
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
17 */
18
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map)21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22 int *target_map)
23 {
24 int rc;
25
26 rc = cxl_decoder_add_locked(cxld, target_map);
27 if (rc) {
28 put_device(&cxld->dev);
29 dev_err(&port->dev, "Failed to add decoder\n");
30 return rc;
31 }
32
33 rc = cxl_decoder_autoremove(&port->dev, cxld);
34 if (rc)
35 return rc;
36
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38
39 return 0;
40 }
41
42 /*
43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44 * single ported host-bridges need not publish a decoder capability when a
45 * passthrough decode can be assumed, i.e. all transactions that the uport sees
46 * are claimed and passed to the single dport. Disable the range until the first
47 * CXL region is enumerated / activated.
48 */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51 struct cxl_switch_decoder *cxlsd;
52 struct cxl_dport *dport = NULL;
53 int single_port_map[1];
54 unsigned long index;
55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57 /*
58 * Capability checks are moot for passthrough decoders, support
59 * any and all possibilities.
60 */
61 cxlhdm->interleave_mask = ~0U;
62 cxlhdm->iw_cap_mask = ~0UL;
63
64 cxlsd = cxl_switch_decoder_alloc(port, 1);
65 if (IS_ERR(cxlsd))
66 return PTR_ERR(cxlsd);
67
68 device_lock_assert(&port->dev);
69
70 xa_for_each(&port->dports, index, dport)
71 break;
72 single_port_map[0] = dport->port_id;
73
74 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
75 }
76 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
77
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)78 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
79 {
80 u32 hdm_cap;
81
82 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
83 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
84 cxlhdm->target_count =
85 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
86 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
87 cxlhdm->interleave_mask |= GENMASK(11, 8);
88 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
89 cxlhdm->interleave_mask |= GENMASK(14, 12);
90 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
91 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
92 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
93 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
94 cxlhdm->iw_cap_mask |= BIT(16);
95 }
96
map_hdm_decoder_regs(struct cxl_port * port,void __iomem * crb,struct cxl_component_regs * regs)97 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
98 struct cxl_component_regs *regs)
99 {
100 struct cxl_register_map map = {
101 .host = &port->dev,
102 .resource = port->component_reg_phys,
103 .base = crb,
104 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
105 };
106
107 cxl_probe_component_regs(&port->dev, crb, &map.component_map);
108 if (!map.component_map.hdm_decoder.valid) {
109 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
110 /* unique error code to indicate no HDM decoder capability */
111 return -ENODEV;
112 }
113
114 return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
115 }
116
should_emulate_decoders(struct cxl_endpoint_dvsec_info * info)117 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
118 {
119 struct cxl_hdm *cxlhdm;
120 void __iomem *hdm;
121 u32 ctrl;
122 int i;
123
124 if (!info)
125 return false;
126
127 cxlhdm = dev_get_drvdata(&info->port->dev);
128 hdm = cxlhdm->regs.hdm_decoder;
129
130 if (!hdm)
131 return true;
132
133 /*
134 * If HDM decoders are present and the driver is in control of
135 * Mem_Enable skip DVSEC based emulation
136 */
137 if (!info->mem_enabled)
138 return false;
139
140 /*
141 * If any decoders are committed already, there should not be any
142 * emulated DVSEC decoders.
143 */
144 for (i = 0; i < cxlhdm->decoder_count; i++) {
145 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
146 dev_dbg(&info->port->dev,
147 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
148 info->port->id, i,
149 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
150 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
151 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
152 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
153 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
154 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
155 return false;
156 }
157
158 return true;
159 }
160
161 /**
162 * devm_cxl_setup_hdm - map HDM decoder component registers
163 * @port: cxl_port to map
164 * @info: cached DVSEC range register info
165 */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)166 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
167 struct cxl_endpoint_dvsec_info *info)
168 {
169 struct device *dev = &port->dev;
170 struct cxl_hdm *cxlhdm;
171 void __iomem *crb;
172 int rc;
173
174 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
175 if (!cxlhdm)
176 return ERR_PTR(-ENOMEM);
177 cxlhdm->port = port;
178 dev_set_drvdata(dev, cxlhdm);
179
180 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
181 if (!crb && info && info->mem_enabled) {
182 cxlhdm->decoder_count = info->ranges;
183 return cxlhdm;
184 } else if (!crb) {
185 dev_err(dev, "No component registers mapped\n");
186 return ERR_PTR(-ENXIO);
187 }
188
189 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
190 iounmap(crb);
191 if (rc)
192 return ERR_PTR(rc);
193
194 parse_hdm_decoder_caps(cxlhdm);
195 if (cxlhdm->decoder_count == 0) {
196 dev_err(dev, "Spec violation. Caps invalid\n");
197 return ERR_PTR(-ENXIO);
198 }
199
200 /*
201 * Now that the hdm capability is parsed, decide if range
202 * register emulation is needed and fixup cxlhdm accordingly.
203 */
204 if (should_emulate_decoders(info)) {
205 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
206 info->ranges > 1 ? "s" : "");
207 cxlhdm->decoder_count = info->ranges;
208 }
209
210 return cxlhdm;
211 }
212 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
213
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)214 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
215 {
216 unsigned long long start = r->start, end = r->end;
217
218 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
219 r->name);
220 }
221
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)222 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
223 {
224 struct resource *p1, *p2;
225
226 down_read(&cxl_dpa_rwsem);
227 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
228 __cxl_dpa_debug(file, p1, 0);
229 for (p2 = p1->child; p2; p2 = p2->sibling)
230 __cxl_dpa_debug(file, p2, 1);
231 }
232 up_read(&cxl_dpa_rwsem);
233 }
234 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
235
236 /*
237 * Must be called in a context that synchronizes against this decoder's
238 * port ->remove() callback (like an endpoint decoder sysfs attribute)
239 */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)240 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
241 {
242 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
243 struct cxl_port *port = cxled_to_port(cxled);
244 struct cxl_dev_state *cxlds = cxlmd->cxlds;
245 struct resource *res = cxled->dpa_res;
246 resource_size_t skip_start;
247
248 lockdep_assert_held_write(&cxl_dpa_rwsem);
249
250 /* save @skip_start, before @res is released */
251 skip_start = res->start - cxled->skip;
252 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
253 if (cxled->skip)
254 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
255 cxled->skip = 0;
256 cxled->dpa_res = NULL;
257 put_device(&cxled->cxld.dev);
258 port->hdm_end--;
259 }
260
cxl_dpa_release(void * cxled)261 static void cxl_dpa_release(void *cxled)
262 {
263 down_write(&cxl_dpa_rwsem);
264 __cxl_dpa_release(cxled);
265 up_write(&cxl_dpa_rwsem);
266 }
267
268 /*
269 * Must be called from context that will not race port device
270 * unregistration, like decoder sysfs attribute methods
271 */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)272 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
273 {
274 struct cxl_port *port = cxled_to_port(cxled);
275
276 lockdep_assert_held_write(&cxl_dpa_rwsem);
277 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
278 __cxl_dpa_release(cxled);
279 }
280
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)281 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
282 resource_size_t base, resource_size_t len,
283 resource_size_t skipped)
284 {
285 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
286 struct cxl_port *port = cxled_to_port(cxled);
287 struct cxl_dev_state *cxlds = cxlmd->cxlds;
288 struct device *dev = &port->dev;
289 struct resource *res;
290
291 lockdep_assert_held_write(&cxl_dpa_rwsem);
292
293 if (!len) {
294 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
295 port->id, cxled->cxld.id);
296 return -EINVAL;
297 }
298
299 if (cxled->dpa_res) {
300 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
301 port->id, cxled->cxld.id, cxled->dpa_res);
302 return -EBUSY;
303 }
304
305 if (port->hdm_end + 1 != cxled->cxld.id) {
306 /*
307 * Assumes alloc and commit order is always in hardware instance
308 * order per expectations from 8.2.5.12.20 Committing Decoder
309 * Programming that enforce decoder[m] committed before
310 * decoder[m+1] commit start.
311 */
312 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
313 cxled->cxld.id, port->id, port->hdm_end + 1);
314 return -EBUSY;
315 }
316
317 if (skipped) {
318 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
319 dev_name(&cxled->cxld.dev), 0);
320 if (!res) {
321 dev_dbg(dev,
322 "decoder%d.%d: failed to reserve skipped space\n",
323 port->id, cxled->cxld.id);
324 return -EBUSY;
325 }
326 }
327 res = __request_region(&cxlds->dpa_res, base, len,
328 dev_name(&cxled->cxld.dev), 0);
329 if (!res) {
330 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
331 port->id, cxled->cxld.id);
332 if (skipped)
333 __release_region(&cxlds->dpa_res, base - skipped,
334 skipped);
335 return -EBUSY;
336 }
337 cxled->dpa_res = res;
338 cxled->skip = skipped;
339
340 if (resource_contains(&cxlds->pmem_res, res))
341 cxled->mode = CXL_DECODER_PMEM;
342 else if (resource_contains(&cxlds->ram_res, res))
343 cxled->mode = CXL_DECODER_RAM;
344 else {
345 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
346 cxled->cxld.id, cxled->dpa_res);
347 cxled->mode = CXL_DECODER_MIXED;
348 }
349
350 port->hdm_end++;
351 get_device(&cxled->cxld.dev);
352 return 0;
353 }
354
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)355 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
356 resource_size_t base, resource_size_t len,
357 resource_size_t skipped)
358 {
359 struct cxl_port *port = cxled_to_port(cxled);
360 int rc;
361
362 down_write(&cxl_dpa_rwsem);
363 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
364 up_write(&cxl_dpa_rwsem);
365
366 if (rc)
367 return rc;
368
369 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
370 }
371 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
372
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)373 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
374 {
375 resource_size_t size = 0;
376
377 down_read(&cxl_dpa_rwsem);
378 if (cxled->dpa_res)
379 size = resource_size(cxled->dpa_res);
380 up_read(&cxl_dpa_rwsem);
381
382 return size;
383 }
384
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)385 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
386 {
387 resource_size_t base = -1;
388
389 lockdep_assert_held(&cxl_dpa_rwsem);
390 if (cxled->dpa_res)
391 base = cxled->dpa_res->start;
392
393 return base;
394 }
395
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)396 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
397 {
398 struct cxl_port *port = cxled_to_port(cxled);
399 struct device *dev = &cxled->cxld.dev;
400 int rc;
401
402 down_write(&cxl_dpa_rwsem);
403 if (!cxled->dpa_res) {
404 rc = 0;
405 goto out;
406 }
407 if (cxled->cxld.region) {
408 dev_dbg(dev, "decoder assigned to: %s\n",
409 dev_name(&cxled->cxld.region->dev));
410 rc = -EBUSY;
411 goto out;
412 }
413 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
414 dev_dbg(dev, "decoder enabled\n");
415 rc = -EBUSY;
416 goto out;
417 }
418 if (cxled->cxld.id != port->hdm_end) {
419 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
420 port->hdm_end);
421 rc = -EBUSY;
422 goto out;
423 }
424 devm_cxl_dpa_release(cxled);
425 rc = 0;
426 out:
427 up_write(&cxl_dpa_rwsem);
428 return rc;
429 }
430
cxl_dpa_set_mode(struct cxl_endpoint_decoder * cxled,enum cxl_decoder_mode mode)431 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
432 enum cxl_decoder_mode mode)
433 {
434 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
435 struct cxl_dev_state *cxlds = cxlmd->cxlds;
436 struct device *dev = &cxled->cxld.dev;
437 int rc;
438
439 switch (mode) {
440 case CXL_DECODER_RAM:
441 case CXL_DECODER_PMEM:
442 break;
443 default:
444 dev_dbg(dev, "unsupported mode: %d\n", mode);
445 return -EINVAL;
446 }
447
448 down_write(&cxl_dpa_rwsem);
449 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
450 rc = -EBUSY;
451 goto out;
452 }
453
454 /*
455 * Only allow modes that are supported by the current partition
456 * configuration
457 */
458 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
459 dev_dbg(dev, "no available pmem capacity\n");
460 rc = -ENXIO;
461 goto out;
462 }
463 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
464 dev_dbg(dev, "no available ram capacity\n");
465 rc = -ENXIO;
466 goto out;
467 }
468
469 cxled->mode = mode;
470 rc = 0;
471 out:
472 up_write(&cxl_dpa_rwsem);
473
474 return rc;
475 }
476
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,unsigned long long size)477 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
478 {
479 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
480 resource_size_t free_ram_start, free_pmem_start;
481 struct cxl_port *port = cxled_to_port(cxled);
482 struct cxl_dev_state *cxlds = cxlmd->cxlds;
483 struct device *dev = &cxled->cxld.dev;
484 resource_size_t start, avail, skip;
485 struct resource *p, *last;
486 int rc;
487
488 down_write(&cxl_dpa_rwsem);
489 if (cxled->cxld.region) {
490 dev_dbg(dev, "decoder attached to %s\n",
491 dev_name(&cxled->cxld.region->dev));
492 rc = -EBUSY;
493 goto out;
494 }
495
496 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
497 dev_dbg(dev, "decoder enabled\n");
498 rc = -EBUSY;
499 goto out;
500 }
501
502 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
503 last = p;
504 if (last)
505 free_ram_start = last->end + 1;
506 else
507 free_ram_start = cxlds->ram_res.start;
508
509 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
510 last = p;
511 if (last)
512 free_pmem_start = last->end + 1;
513 else
514 free_pmem_start = cxlds->pmem_res.start;
515
516 if (cxled->mode == CXL_DECODER_RAM) {
517 start = free_ram_start;
518 avail = cxlds->ram_res.end - start + 1;
519 skip = 0;
520 } else if (cxled->mode == CXL_DECODER_PMEM) {
521 resource_size_t skip_start, skip_end;
522
523 start = free_pmem_start;
524 avail = cxlds->pmem_res.end - start + 1;
525 skip_start = free_ram_start;
526
527 /*
528 * If some pmem is already allocated, then that allocation
529 * already handled the skip.
530 */
531 if (cxlds->pmem_res.child &&
532 skip_start == cxlds->pmem_res.child->start)
533 skip_end = skip_start - 1;
534 else
535 skip_end = start - 1;
536 skip = skip_end - skip_start + 1;
537 } else {
538 dev_dbg(dev, "mode not set\n");
539 rc = -EINVAL;
540 goto out;
541 }
542
543 if (size > avail) {
544 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
545 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
546 &avail);
547 rc = -ENOSPC;
548 goto out;
549 }
550
551 rc = __cxl_dpa_reserve(cxled, start, size, skip);
552 out:
553 up_write(&cxl_dpa_rwsem);
554
555 if (rc)
556 return rc;
557
558 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
559 }
560
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)561 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
562 {
563 u16 eig;
564 u8 eiw;
565
566 /*
567 * Input validation ensures these warns never fire, but otherwise
568 * suppress unititalized variable usage warnings.
569 */
570 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
571 "invalid interleave_ways: %d\n", cxld->interleave_ways))
572 return;
573 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
574 "invalid interleave_granularity: %d\n",
575 cxld->interleave_granularity))
576 return;
577
578 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
579 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
580 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
581 }
582
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)583 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
584 {
585 u32p_replace_bits(ctrl,
586 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
587 CXL_HDM_DECODER0_CTRL_HOSTONLY);
588 }
589
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)590 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
591 {
592 struct cxl_dport **t = &cxlsd->target[0];
593 int ways = cxlsd->cxld.interleave_ways;
594
595 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
596 if (ways > 1)
597 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
598 if (ways > 2)
599 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
600 if (ways > 3)
601 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
602 if (ways > 4)
603 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
604 if (ways > 5)
605 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
606 if (ways > 6)
607 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
608 if (ways > 7)
609 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
610 }
611
612 /*
613 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
614 * committed or error within 10ms, but just be generous with 20ms to account for
615 * clock skew and other marginal behavior
616 */
617 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)618 static int cxld_await_commit(void __iomem *hdm, int id)
619 {
620 u32 ctrl;
621 int i;
622
623 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
624 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
625 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
626 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
627 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
628 return -EIO;
629 }
630 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
631 return 0;
632 fsleep(1000);
633 }
634
635 return -ETIMEDOUT;
636 }
637
cxl_decoder_commit(struct cxl_decoder * cxld)638 static int cxl_decoder_commit(struct cxl_decoder *cxld)
639 {
640 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
641 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
642 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
643 int id = cxld->id, rc;
644 u64 base, size;
645 u32 ctrl;
646
647 if (cxld->flags & CXL_DECODER_F_ENABLE)
648 return 0;
649
650 if (cxl_num_decoders_committed(port) != id) {
651 dev_dbg(&port->dev,
652 "%s: out of order commit, expected decoder%d.%d\n",
653 dev_name(&cxld->dev), port->id,
654 cxl_num_decoders_committed(port));
655 return -EBUSY;
656 }
657
658 /*
659 * For endpoint decoders hosted on CXL memory devices that
660 * support the sanitize operation, make sure sanitize is not in-flight.
661 */
662 if (is_endpoint_decoder(&cxld->dev)) {
663 struct cxl_endpoint_decoder *cxled =
664 to_cxl_endpoint_decoder(&cxld->dev);
665 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
666 struct cxl_memdev_state *mds =
667 to_cxl_memdev_state(cxlmd->cxlds);
668
669 if (mds && mds->security.sanitize_active) {
670 dev_dbg(&cxlmd->dev,
671 "attempted to commit %s during sanitize\n",
672 dev_name(&cxld->dev));
673 return -EBUSY;
674 }
675 }
676
677 down_read(&cxl_dpa_rwsem);
678 /* common decoder settings */
679 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
680 cxld_set_interleave(cxld, &ctrl);
681 cxld_set_type(cxld, &ctrl);
682 base = cxld->hpa_range.start;
683 size = range_len(&cxld->hpa_range);
684
685 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
686 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
687 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
688 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
689
690 if (is_switch_decoder(&cxld->dev)) {
691 struct cxl_switch_decoder *cxlsd =
692 to_cxl_switch_decoder(&cxld->dev);
693 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
694 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
695 u64 targets;
696
697 cxlsd_set_targets(cxlsd, &targets);
698 writel(upper_32_bits(targets), tl_hi);
699 writel(lower_32_bits(targets), tl_lo);
700 } else {
701 struct cxl_endpoint_decoder *cxled =
702 to_cxl_endpoint_decoder(&cxld->dev);
703 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
704 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
705
706 writel(upper_32_bits(cxled->skip), sk_hi);
707 writel(lower_32_bits(cxled->skip), sk_lo);
708 }
709
710 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
711 up_read(&cxl_dpa_rwsem);
712
713 port->commit_end++;
714 rc = cxld_await_commit(hdm, cxld->id);
715 if (rc) {
716 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
717 dev_name(&cxld->dev), rc);
718 cxld->reset(cxld);
719 return rc;
720 }
721 cxld->flags |= CXL_DECODER_F_ENABLE;
722
723 return 0;
724 }
725
commit_reap(struct device * dev,const void * data)726 static int commit_reap(struct device *dev, const void *data)
727 {
728 struct cxl_port *port = to_cxl_port(dev->parent);
729 struct cxl_decoder *cxld;
730
731 if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
732 return 0;
733
734 cxld = to_cxl_decoder(dev);
735 if (port->commit_end == cxld->id &&
736 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
737 port->commit_end--;
738 dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
739 dev_name(&cxld->dev), port->commit_end);
740 }
741
742 return 0;
743 }
744
cxl_port_commit_reap(struct cxl_decoder * cxld)745 void cxl_port_commit_reap(struct cxl_decoder *cxld)
746 {
747 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
748
749 lockdep_assert_held_write(&cxl_region_rwsem);
750
751 /*
752 * Once the highest committed decoder is disabled, free any other
753 * decoders that were pinned allocated by out-of-order release.
754 */
755 port->commit_end--;
756 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
757 port->commit_end);
758 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
759 commit_reap);
760 }
761 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL);
762
cxl_decoder_reset(struct cxl_decoder * cxld)763 static void cxl_decoder_reset(struct cxl_decoder *cxld)
764 {
765 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
766 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
767 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
768 int id = cxld->id;
769 u32 ctrl;
770
771 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
772 return;
773
774 if (port->commit_end == id)
775 cxl_port_commit_reap(cxld);
776 else
777 dev_dbg(&port->dev,
778 "%s: out of order reset, expected decoder%d.%d\n",
779 dev_name(&cxld->dev), port->id, port->commit_end);
780
781 down_read(&cxl_dpa_rwsem);
782 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
783 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
784 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
785
786 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
787 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
788 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
789 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
790 up_read(&cxl_dpa_rwsem);
791
792 cxld->flags &= ~CXL_DECODER_F_ENABLE;
793
794 /* Userspace is now responsible for reconfiguring this decoder */
795 if (is_endpoint_decoder(&cxld->dev)) {
796 struct cxl_endpoint_decoder *cxled;
797
798 cxled = to_cxl_endpoint_decoder(&cxld->dev);
799 cxled->state = CXL_DECODER_STATE_MANUAL;
800 }
801 }
802
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,u64 * dpa_base,int which,struct cxl_endpoint_dvsec_info * info)803 static int cxl_setup_hdm_decoder_from_dvsec(
804 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
805 int which, struct cxl_endpoint_dvsec_info *info)
806 {
807 struct cxl_endpoint_decoder *cxled;
808 u64 len;
809 int rc;
810
811 if (!is_cxl_endpoint(port))
812 return -EOPNOTSUPP;
813
814 cxled = to_cxl_endpoint_decoder(&cxld->dev);
815 len = range_len(&info->dvsec_range[which]);
816 if (!len)
817 return -ENOENT;
818
819 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
820 cxld->commit = NULL;
821 cxld->reset = NULL;
822 cxld->hpa_range = info->dvsec_range[which];
823
824 /*
825 * Set the emulated decoder as locked pending additional support to
826 * change the range registers at run time.
827 */
828 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
829 port->commit_end = cxld->id;
830
831 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
832 if (rc) {
833 dev_err(&port->dev,
834 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
835 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
836 return rc;
837 }
838 *dpa_base += len;
839 cxled->state = CXL_DECODER_STATE_AUTO;
840
841 return 0;
842 }
843
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)844 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
845 int *target_map, void __iomem *hdm, int which,
846 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
847 {
848 struct cxl_endpoint_decoder *cxled = NULL;
849 u64 size, base, skip, dpa_size, lo, hi;
850 bool committed;
851 u32 remainder;
852 int i, rc;
853 u32 ctrl;
854 union {
855 u64 value;
856 unsigned char target_id[8];
857 } target_list;
858
859 if (should_emulate_decoders(info))
860 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
861 which, info);
862
863 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
864 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
865 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
866 base = (hi << 32) + lo;
867 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
868 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
869 size = (hi << 32) + lo;
870 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
871 cxld->commit = cxl_decoder_commit;
872 cxld->reset = cxl_decoder_reset;
873
874 if (!committed)
875 size = 0;
876 if (base == U64_MAX || size == U64_MAX) {
877 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
878 port->id, cxld->id);
879 return -ENXIO;
880 }
881
882 if (info)
883 cxled = to_cxl_endpoint_decoder(&cxld->dev);
884 cxld->hpa_range = (struct range) {
885 .start = base,
886 .end = base + size - 1,
887 };
888
889 /* decoders are enabled if committed */
890 if (committed) {
891 cxld->flags |= CXL_DECODER_F_ENABLE;
892 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
893 cxld->flags |= CXL_DECODER_F_LOCK;
894 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
895 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
896 else
897 cxld->target_type = CXL_DECODER_DEVMEM;
898
899 guard(rwsem_write)(&cxl_region_rwsem);
900 if (cxld->id != cxl_num_decoders_committed(port)) {
901 dev_warn(&port->dev,
902 "decoder%d.%d: Committed out of order\n",
903 port->id, cxld->id);
904 return -ENXIO;
905 }
906
907 if (size == 0) {
908 dev_warn(&port->dev,
909 "decoder%d.%d: Committed with zero size\n",
910 port->id, cxld->id);
911 return -ENXIO;
912 }
913 port->commit_end = cxld->id;
914 } else {
915 if (cxled) {
916 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
917 struct cxl_dev_state *cxlds = cxlmd->cxlds;
918
919 /*
920 * Default by devtype until a device arrives that needs
921 * more precision.
922 */
923 if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
924 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
925 else
926 cxld->target_type = CXL_DECODER_DEVMEM;
927 } else {
928 /* To be overridden by region type at commit time */
929 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
930 }
931
932 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
933 cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
934 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
935 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
936 }
937 }
938 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
939 &cxld->interleave_ways);
940 if (rc) {
941 dev_warn(&port->dev,
942 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
943 port->id, cxld->id, ctrl);
944 return rc;
945 }
946 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
947 &cxld->interleave_granularity);
948 if (rc)
949 return rc;
950
951 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
952 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
953 cxld->interleave_ways, cxld->interleave_granularity);
954
955 if (!cxled) {
956 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
957 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
958 target_list.value = (hi << 32) + lo;
959 for (i = 0; i < cxld->interleave_ways; i++)
960 target_map[i] = target_list.target_id[i];
961
962 return 0;
963 }
964
965 if (!committed)
966 return 0;
967
968 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
969 if (remainder) {
970 dev_err(&port->dev,
971 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
972 port->id, cxld->id, size, cxld->interleave_ways);
973 return -ENXIO;
974 }
975 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
976 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
977 skip = (hi << 32) + lo;
978 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
979 if (rc) {
980 dev_err(&port->dev,
981 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
982 port->id, cxld->id, *dpa_base,
983 *dpa_base + dpa_size + skip - 1, rc);
984 return rc;
985 }
986 *dpa_base += dpa_size + skip;
987
988 cxled->state = CXL_DECODER_STATE_AUTO;
989
990 return 0;
991 }
992
cxl_settle_decoders(struct cxl_hdm * cxlhdm)993 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
994 {
995 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
996 int committed, i;
997 u32 ctrl;
998
999 if (!hdm)
1000 return;
1001
1002 /*
1003 * Since the register resource was recently claimed via request_region()
1004 * be careful about trusting the "not-committed" status until the commit
1005 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
1006 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
1007 * host and target.
1008 */
1009 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
1010 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
1011 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
1012 committed++;
1013 }
1014
1015 /* ensure that future checks of committed can be trusted */
1016 if (committed != cxlhdm->decoder_count)
1017 msleep(20);
1018 }
1019
1020 /**
1021 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1022 * @cxlhdm: Structure to populate with HDM capabilities
1023 * @info: cached DVSEC range register info
1024 */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)1025 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1026 struct cxl_endpoint_dvsec_info *info)
1027 {
1028 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1029 struct cxl_port *port = cxlhdm->port;
1030 int i;
1031 u64 dpa_base = 0;
1032
1033 cxl_settle_decoders(cxlhdm);
1034
1035 for (i = 0; i < cxlhdm->decoder_count; i++) {
1036 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
1037 int rc, target_count = cxlhdm->target_count;
1038 struct cxl_decoder *cxld;
1039
1040 if (is_cxl_endpoint(port)) {
1041 struct cxl_endpoint_decoder *cxled;
1042
1043 cxled = cxl_endpoint_decoder_alloc(port);
1044 if (IS_ERR(cxled)) {
1045 dev_warn(&port->dev,
1046 "Failed to allocate decoder%d.%d\n",
1047 port->id, i);
1048 return PTR_ERR(cxled);
1049 }
1050 cxld = &cxled->cxld;
1051 } else {
1052 struct cxl_switch_decoder *cxlsd;
1053
1054 cxlsd = cxl_switch_decoder_alloc(port, target_count);
1055 if (IS_ERR(cxlsd)) {
1056 dev_warn(&port->dev,
1057 "Failed to allocate decoder%d.%d\n",
1058 port->id, i);
1059 return PTR_ERR(cxlsd);
1060 }
1061 cxld = &cxlsd->cxld;
1062 }
1063
1064 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1065 &dpa_base, info);
1066 if (rc) {
1067 dev_warn(&port->dev,
1068 "Failed to initialize decoder%d.%d\n",
1069 port->id, i);
1070 put_device(&cxld->dev);
1071 return rc;
1072 }
1073 rc = add_hdm_decoder(port, cxld, target_map);
1074 if (rc) {
1075 dev_warn(&port->dev,
1076 "Failed to add decoder%d.%d\n", port->id, i);
1077 return rc;
1078 }
1079 }
1080
1081 return 0;
1082 }
1083 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
1084