1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/sort.h>
10 #include <linux/idr.h>
11 #include <cxlmem.h>
12 #include <cxl.h>
13 #include "core.h"
14
15 /**
16 * DOC: cxl core region
17 *
18 * CXL Regions represent mapped memory capacity in system physical address
19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20 * Memory ranges, Regions represent the active mapped capacity by the HDM
21 * Decoder Capability structures throughout the Host Bridges, Switches, and
22 * Endpoints in the topology.
23 *
24 * Region configuration has ordering constraints. UUID may be set at any time
25 * but is only visible for persistent regions.
26 * 1. Interleave granularity
27 * 2. Interleave size
28 * 3. Decoder targets
29 */
30
31 static struct cxl_region *to_cxl_region(struct device *dev);
32
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
34 char *buf)
35 {
36 struct cxl_region *cxlr = to_cxl_region(dev);
37 struct cxl_region_params *p = &cxlr->params;
38 ssize_t rc;
39
40 rc = down_read_interruptible(&cxl_region_rwsem);
41 if (rc)
42 return rc;
43 if (cxlr->mode != CXL_DECODER_PMEM)
44 rc = sysfs_emit(buf, "\n");
45 else
46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
47 up_read(&cxl_region_rwsem);
48
49 return rc;
50 }
51
is_dup(struct device * match,void * data)52 static int is_dup(struct device *match, void *data)
53 {
54 struct cxl_region_params *p;
55 struct cxl_region *cxlr;
56 uuid_t *uuid = data;
57
58 if (!is_cxl_region(match))
59 return 0;
60
61 lockdep_assert_held(&cxl_region_rwsem);
62 cxlr = to_cxl_region(match);
63 p = &cxlr->params;
64
65 if (uuid_equal(&p->uuid, uuid)) {
66 dev_dbg(match, "already has uuid: %pUb\n", uuid);
67 return -EBUSY;
68 }
69
70 return 0;
71 }
72
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
74 const char *buf, size_t len)
75 {
76 struct cxl_region *cxlr = to_cxl_region(dev);
77 struct cxl_region_params *p = &cxlr->params;
78 uuid_t temp;
79 ssize_t rc;
80
81 if (len != UUID_STRING_LEN + 1)
82 return -EINVAL;
83
84 rc = uuid_parse(buf, &temp);
85 if (rc)
86 return rc;
87
88 if (uuid_is_null(&temp))
89 return -EINVAL;
90
91 rc = down_write_killable(&cxl_region_rwsem);
92 if (rc)
93 return rc;
94
95 if (uuid_equal(&p->uuid, &temp))
96 goto out;
97
98 rc = -EBUSY;
99 if (p->state >= CXL_CONFIG_ACTIVE)
100 goto out;
101
102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
103 if (rc < 0)
104 goto out;
105
106 uuid_copy(&p->uuid, &temp);
107 out:
108 up_write(&cxl_region_rwsem);
109
110 if (rc)
111 return rc;
112 return len;
113 }
114 static DEVICE_ATTR_RW(uuid);
115
cxl_rr_load(struct cxl_port * port,struct cxl_region * cxlr)116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
117 struct cxl_region *cxlr)
118 {
119 return xa_load(&port->regions, (unsigned long)cxlr);
120 }
121
cxl_region_invalidate_memregion(struct cxl_region * cxlr)122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
123 {
124 if (!cpu_cache_has_invalidate_memregion()) {
125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
126 dev_warn_once(
127 &cxlr->dev,
128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
129 return 0;
130 } else {
131 dev_err(&cxlr->dev,
132 "Failed to synchronize CPU cache state\n");
133 return -ENXIO;
134 }
135 }
136
137 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
138 return 0;
139 }
140
cxl_region_decode_reset(struct cxl_region * cxlr,int count)141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
142 {
143 struct cxl_region_params *p = &cxlr->params;
144 int i, rc = 0;
145
146 /*
147 * Before region teardown attempt to flush, and if the flush
148 * fails cancel the region teardown for data consistency
149 * concerns
150 */
151 rc = cxl_region_invalidate_memregion(cxlr);
152 if (rc)
153 return rc;
154
155 for (i = count - 1; i >= 0; i--) {
156 struct cxl_endpoint_decoder *cxled = p->targets[i];
157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
158 struct cxl_port *iter = cxled_to_port(cxled);
159 struct cxl_dev_state *cxlds = cxlmd->cxlds;
160 struct cxl_ep *ep;
161
162 if (cxlds->rcd)
163 goto endpoint_reset;
164
165 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
166 iter = to_cxl_port(iter->dev.parent);
167
168 for (ep = cxl_ep_load(iter, cxlmd); iter;
169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
170 struct cxl_region_ref *cxl_rr;
171 struct cxl_decoder *cxld;
172
173 cxl_rr = cxl_rr_load(iter, cxlr);
174 cxld = cxl_rr->decoder;
175 if (cxld->reset)
176 rc = cxld->reset(cxld);
177 if (rc)
178 return rc;
179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
180 }
181
182 endpoint_reset:
183 rc = cxled->cxld.reset(&cxled->cxld);
184 if (rc)
185 return rc;
186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
187 }
188
189 /* all decoders associated with this region have been torn down */
190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
191
192 return 0;
193 }
194
commit_decoder(struct cxl_decoder * cxld)195 static int commit_decoder(struct cxl_decoder *cxld)
196 {
197 struct cxl_switch_decoder *cxlsd = NULL;
198
199 if (cxld->commit)
200 return cxld->commit(cxld);
201
202 if (is_switch_decoder(&cxld->dev))
203 cxlsd = to_cxl_switch_decoder(&cxld->dev);
204
205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
206 "->commit() is required\n"))
207 return -ENXIO;
208 return 0;
209 }
210
cxl_region_decode_commit(struct cxl_region * cxlr)211 static int cxl_region_decode_commit(struct cxl_region *cxlr)
212 {
213 struct cxl_region_params *p = &cxlr->params;
214 int i, rc = 0;
215
216 for (i = 0; i < p->nr_targets; i++) {
217 struct cxl_endpoint_decoder *cxled = p->targets[i];
218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
219 struct cxl_region_ref *cxl_rr;
220 struct cxl_decoder *cxld;
221 struct cxl_port *iter;
222 struct cxl_ep *ep;
223
224 /* commit bottom up */
225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
226 iter = to_cxl_port(iter->dev.parent)) {
227 cxl_rr = cxl_rr_load(iter, cxlr);
228 cxld = cxl_rr->decoder;
229 rc = commit_decoder(cxld);
230 if (rc)
231 break;
232 }
233
234 if (rc) {
235 /* programming @iter failed, teardown */
236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
238 cxl_rr = cxl_rr_load(iter, cxlr);
239 cxld = cxl_rr->decoder;
240 if (cxld->reset)
241 cxld->reset(cxld);
242 }
243
244 cxled->cxld.reset(&cxled->cxld);
245 goto err;
246 }
247 }
248
249 return 0;
250
251 err:
252 /* undo the targets that were successfully committed */
253 cxl_region_decode_reset(cxlr, i);
254 return rc;
255 }
256
commit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
258 const char *buf, size_t len)
259 {
260 struct cxl_region *cxlr = to_cxl_region(dev);
261 struct cxl_region_params *p = &cxlr->params;
262 bool commit;
263 ssize_t rc;
264
265 rc = kstrtobool(buf, &commit);
266 if (rc)
267 return rc;
268
269 rc = down_write_killable(&cxl_region_rwsem);
270 if (rc)
271 return rc;
272
273 /* Already in the requested state? */
274 if (commit && p->state >= CXL_CONFIG_COMMIT)
275 goto out;
276 if (!commit && p->state < CXL_CONFIG_COMMIT)
277 goto out;
278
279 /* Not ready to commit? */
280 if (commit && p->state < CXL_CONFIG_ACTIVE) {
281 rc = -ENXIO;
282 goto out;
283 }
284
285 /*
286 * Invalidate caches before region setup to drop any speculative
287 * consumption of this address space
288 */
289 rc = cxl_region_invalidate_memregion(cxlr);
290 if (rc)
291 goto out;
292
293 if (commit) {
294 rc = cxl_region_decode_commit(cxlr);
295 if (rc == 0)
296 p->state = CXL_CONFIG_COMMIT;
297 } else {
298 p->state = CXL_CONFIG_RESET_PENDING;
299 up_write(&cxl_region_rwsem);
300 device_release_driver(&cxlr->dev);
301 down_write(&cxl_region_rwsem);
302
303 /*
304 * The lock was dropped, so need to revalidate that the reset is
305 * still pending.
306 */
307 if (p->state == CXL_CONFIG_RESET_PENDING) {
308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
309 /*
310 * Revert to committed since there may still be active
311 * decoders associated with this region, or move forward
312 * to active to mark the reset successful
313 */
314 if (rc)
315 p->state = CXL_CONFIG_COMMIT;
316 else
317 p->state = CXL_CONFIG_ACTIVE;
318 }
319 }
320
321 out:
322 up_write(&cxl_region_rwsem);
323
324 if (rc)
325 return rc;
326 return len;
327 }
328
commit_show(struct device * dev,struct device_attribute * attr,char * buf)329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
330 char *buf)
331 {
332 struct cxl_region *cxlr = to_cxl_region(dev);
333 struct cxl_region_params *p = &cxlr->params;
334 ssize_t rc;
335
336 rc = down_read_interruptible(&cxl_region_rwsem);
337 if (rc)
338 return rc;
339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
340 up_read(&cxl_region_rwsem);
341
342 return rc;
343 }
344 static DEVICE_ATTR_RW(commit);
345
cxl_region_visible(struct kobject * kobj,struct attribute * a,int n)346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
347 int n)
348 {
349 struct device *dev = kobj_to_dev(kobj);
350 struct cxl_region *cxlr = to_cxl_region(dev);
351
352 /*
353 * Support tooling that expects to find a 'uuid' attribute for all
354 * regions regardless of mode.
355 */
356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
357 return 0444;
358 return a->mode;
359 }
360
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)361 static ssize_t interleave_ways_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
363 {
364 struct cxl_region *cxlr = to_cxl_region(dev);
365 struct cxl_region_params *p = &cxlr->params;
366 ssize_t rc;
367
368 rc = down_read_interruptible(&cxl_region_rwsem);
369 if (rc)
370 return rc;
371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
372 up_read(&cxl_region_rwsem);
373
374 return rc;
375 }
376
377 static const struct attribute_group *get_cxl_region_target_group(void);
378
interleave_ways_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)379 static ssize_t interleave_ways_store(struct device *dev,
380 struct device_attribute *attr,
381 const char *buf, size_t len)
382 {
383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
385 struct cxl_region *cxlr = to_cxl_region(dev);
386 struct cxl_region_params *p = &cxlr->params;
387 unsigned int val, save;
388 int rc;
389 u8 iw;
390
391 rc = kstrtouint(buf, 0, &val);
392 if (rc)
393 return rc;
394
395 rc = ways_to_eiw(val, &iw);
396 if (rc)
397 return rc;
398
399 /*
400 * Even for x3, x6, and x12 interleaves the region interleave must be a
401 * power of 2 multiple of the host bridge interleave.
402 */
403 if (!is_power_of_2(val / cxld->interleave_ways) ||
404 (val % cxld->interleave_ways)) {
405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
406 return -EINVAL;
407 }
408
409 rc = down_write_killable(&cxl_region_rwsem);
410 if (rc)
411 return rc;
412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
413 rc = -EBUSY;
414 goto out;
415 }
416
417 save = p->interleave_ways;
418 p->interleave_ways = val;
419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
420 if (rc)
421 p->interleave_ways = save;
422 out:
423 up_write(&cxl_region_rwsem);
424 if (rc)
425 return rc;
426 return len;
427 }
428 static DEVICE_ATTR_RW(interleave_ways);
429
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)430 static ssize_t interleave_granularity_show(struct device *dev,
431 struct device_attribute *attr,
432 char *buf)
433 {
434 struct cxl_region *cxlr = to_cxl_region(dev);
435 struct cxl_region_params *p = &cxlr->params;
436 ssize_t rc;
437
438 rc = down_read_interruptible(&cxl_region_rwsem);
439 if (rc)
440 return rc;
441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
442 up_read(&cxl_region_rwsem);
443
444 return rc;
445 }
446
interleave_granularity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)447 static ssize_t interleave_granularity_store(struct device *dev,
448 struct device_attribute *attr,
449 const char *buf, size_t len)
450 {
451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
453 struct cxl_region *cxlr = to_cxl_region(dev);
454 struct cxl_region_params *p = &cxlr->params;
455 int rc, val;
456 u16 ig;
457
458 rc = kstrtoint(buf, 0, &val);
459 if (rc)
460 return rc;
461
462 rc = granularity_to_eig(val, &ig);
463 if (rc)
464 return rc;
465
466 /*
467 * When the host-bridge is interleaved, disallow region granularity !=
468 * root granularity. Regions with a granularity less than the root
469 * interleave result in needing multiple endpoints to support a single
470 * slot in the interleave (possible to support in the future). Regions
471 * with a granularity greater than the root interleave result in invalid
472 * DPA translations (invalid to support).
473 */
474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
475 return -EINVAL;
476
477 rc = down_write_killable(&cxl_region_rwsem);
478 if (rc)
479 return rc;
480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
481 rc = -EBUSY;
482 goto out;
483 }
484
485 p->interleave_granularity = val;
486 out:
487 up_write(&cxl_region_rwsem);
488 if (rc)
489 return rc;
490 return len;
491 }
492 static DEVICE_ATTR_RW(interleave_granularity);
493
resource_show(struct device * dev,struct device_attribute * attr,char * buf)494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
495 char *buf)
496 {
497 struct cxl_region *cxlr = to_cxl_region(dev);
498 struct cxl_region_params *p = &cxlr->params;
499 u64 resource = -1ULL;
500 ssize_t rc;
501
502 rc = down_read_interruptible(&cxl_region_rwsem);
503 if (rc)
504 return rc;
505 if (p->res)
506 resource = p->res->start;
507 rc = sysfs_emit(buf, "%#llx\n", resource);
508 up_read(&cxl_region_rwsem);
509
510 return rc;
511 }
512 static DEVICE_ATTR_RO(resource);
513
mode_show(struct device * dev,struct device_attribute * attr,char * buf)514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
515 char *buf)
516 {
517 struct cxl_region *cxlr = to_cxl_region(dev);
518
519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
520 }
521 static DEVICE_ATTR_RO(mode);
522
alloc_hpa(struct cxl_region * cxlr,resource_size_t size)523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
524 {
525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
526 struct cxl_region_params *p = &cxlr->params;
527 struct resource *res;
528 u64 remainder = 0;
529
530 lockdep_assert_held_write(&cxl_region_rwsem);
531
532 /* Nothing to do... */
533 if (p->res && resource_size(p->res) == size)
534 return 0;
535
536 /* To change size the old size must be freed first */
537 if (p->res)
538 return -EBUSY;
539
540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
541 return -EBUSY;
542
543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
544 if (!p->interleave_ways || !p->interleave_granularity ||
545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
546 return -ENXIO;
547
548 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
549 if (remainder)
550 return -EINVAL;
551
552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
553 dev_name(&cxlr->dev));
554 if (IS_ERR(res)) {
555 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
556 PTR_ERR(res));
557 return PTR_ERR(res);
558 }
559
560 p->res = res;
561 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
562
563 return 0;
564 }
565
cxl_region_iomem_release(struct cxl_region * cxlr)566 static void cxl_region_iomem_release(struct cxl_region *cxlr)
567 {
568 struct cxl_region_params *p = &cxlr->params;
569
570 if (device_is_registered(&cxlr->dev))
571 lockdep_assert_held_write(&cxl_region_rwsem);
572 if (p->res) {
573 /*
574 * Autodiscovered regions may not have been able to insert their
575 * resource.
576 */
577 if (p->res->parent)
578 remove_resource(p->res);
579 kfree(p->res);
580 p->res = NULL;
581 }
582 }
583
free_hpa(struct cxl_region * cxlr)584 static int free_hpa(struct cxl_region *cxlr)
585 {
586 struct cxl_region_params *p = &cxlr->params;
587
588 lockdep_assert_held_write(&cxl_region_rwsem);
589
590 if (!p->res)
591 return 0;
592
593 if (p->state >= CXL_CONFIG_ACTIVE)
594 return -EBUSY;
595
596 cxl_region_iomem_release(cxlr);
597 p->state = CXL_CONFIG_IDLE;
598 return 0;
599 }
600
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)601 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
602 const char *buf, size_t len)
603 {
604 struct cxl_region *cxlr = to_cxl_region(dev);
605 u64 val;
606 int rc;
607
608 rc = kstrtou64(buf, 0, &val);
609 if (rc)
610 return rc;
611
612 rc = down_write_killable(&cxl_region_rwsem);
613 if (rc)
614 return rc;
615
616 if (val)
617 rc = alloc_hpa(cxlr, val);
618 else
619 rc = free_hpa(cxlr);
620 up_write(&cxl_region_rwsem);
621
622 if (rc)
623 return rc;
624
625 return len;
626 }
627
size_show(struct device * dev,struct device_attribute * attr,char * buf)628 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
629 char *buf)
630 {
631 struct cxl_region *cxlr = to_cxl_region(dev);
632 struct cxl_region_params *p = &cxlr->params;
633 u64 size = 0;
634 ssize_t rc;
635
636 rc = down_read_interruptible(&cxl_region_rwsem);
637 if (rc)
638 return rc;
639 if (p->res)
640 size = resource_size(p->res);
641 rc = sysfs_emit(buf, "%#llx\n", size);
642 up_read(&cxl_region_rwsem);
643
644 return rc;
645 }
646 static DEVICE_ATTR_RW(size);
647
648 static struct attribute *cxl_region_attrs[] = {
649 &dev_attr_uuid.attr,
650 &dev_attr_commit.attr,
651 &dev_attr_interleave_ways.attr,
652 &dev_attr_interleave_granularity.attr,
653 &dev_attr_resource.attr,
654 &dev_attr_size.attr,
655 &dev_attr_mode.attr,
656 NULL,
657 };
658
659 static const struct attribute_group cxl_region_group = {
660 .attrs = cxl_region_attrs,
661 .is_visible = cxl_region_visible,
662 };
663
show_targetN(struct cxl_region * cxlr,char * buf,int pos)664 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
665 {
666 struct cxl_region_params *p = &cxlr->params;
667 struct cxl_endpoint_decoder *cxled;
668 int rc;
669
670 rc = down_read_interruptible(&cxl_region_rwsem);
671 if (rc)
672 return rc;
673
674 if (pos >= p->interleave_ways) {
675 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
676 p->interleave_ways);
677 rc = -ENXIO;
678 goto out;
679 }
680
681 cxled = p->targets[pos];
682 if (!cxled)
683 rc = sysfs_emit(buf, "\n");
684 else
685 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
686 out:
687 up_read(&cxl_region_rwsem);
688
689 return rc;
690 }
691
match_free_decoder(struct device * dev,void * data)692 static int match_free_decoder(struct device *dev, void *data)
693 {
694 struct cxl_decoder *cxld;
695 int *id = data;
696
697 if (!is_switch_decoder(dev))
698 return 0;
699
700 cxld = to_cxl_decoder(dev);
701
702 /* enforce ordered allocation */
703 if (cxld->id != *id)
704 return 0;
705
706 if (!cxld->region)
707 return 1;
708
709 (*id)++;
710
711 return 0;
712 }
713
match_auto_decoder(struct device * dev,void * data)714 static int match_auto_decoder(struct device *dev, void *data)
715 {
716 struct cxl_region_params *p = data;
717 struct cxl_decoder *cxld;
718 struct range *r;
719
720 if (!is_switch_decoder(dev))
721 return 0;
722
723 cxld = to_cxl_decoder(dev);
724 r = &cxld->hpa_range;
725
726 if (p->res && p->res->start == r->start && p->res->end == r->end)
727 return 1;
728
729 return 0;
730 }
731
732 static struct cxl_decoder *
cxl_region_find_decoder(struct cxl_port * port,struct cxl_endpoint_decoder * cxled,struct cxl_region * cxlr)733 cxl_region_find_decoder(struct cxl_port *port,
734 struct cxl_endpoint_decoder *cxled,
735 struct cxl_region *cxlr)
736 {
737 struct device *dev;
738 int id = 0;
739
740 if (port == cxled_to_port(cxled))
741 return &cxled->cxld;
742
743 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
744 dev = device_find_child(&port->dev, &cxlr->params,
745 match_auto_decoder);
746 else
747 dev = device_find_child(&port->dev, &id, match_free_decoder);
748 if (!dev)
749 return NULL;
750 /*
751 * This decoder is pinned registered as long as the endpoint decoder is
752 * registered, and endpoint decoder unregistration holds the
753 * cxl_region_rwsem over unregister events, so no need to hold on to
754 * this extra reference.
755 */
756 put_device(dev);
757 return to_cxl_decoder(dev);
758 }
759
auto_order_ok(struct cxl_port * port,struct cxl_region * cxlr_iter,struct cxl_decoder * cxld)760 static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
761 struct cxl_decoder *cxld)
762 {
763 struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
764 struct cxl_decoder *cxld_iter = rr->decoder;
765
766 /*
767 * Allow the out of order assembly of auto-discovered regions.
768 * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
769 * in HPA order. Confirm that the decoder with the lesser HPA
770 * starting address has the lesser id.
771 */
772 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
773 dev_name(&cxld->dev), cxld->id,
774 dev_name(&cxld_iter->dev), cxld_iter->id);
775
776 if (cxld_iter->id > cxld->id)
777 return true;
778
779 return false;
780 }
781
782 static struct cxl_region_ref *
alloc_region_ref(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)783 alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
784 struct cxl_endpoint_decoder *cxled)
785 {
786 struct cxl_region_params *p = &cxlr->params;
787 struct cxl_region_ref *cxl_rr, *iter;
788 unsigned long index;
789 int rc;
790
791 xa_for_each(&port->regions, index, iter) {
792 struct cxl_region_params *ip = &iter->region->params;
793
794 if (!ip->res || ip->res->start < p->res->start)
795 continue;
796
797 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
798 struct cxl_decoder *cxld;
799
800 cxld = cxl_region_find_decoder(port, cxled, cxlr);
801 if (auto_order_ok(port, iter->region, cxld))
802 continue;
803 }
804 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
805 dev_name(&port->dev),
806 dev_name(&iter->region->dev), ip->res, p->res);
807
808 return ERR_PTR(-EBUSY);
809 }
810
811 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
812 if (!cxl_rr)
813 return ERR_PTR(-ENOMEM);
814 cxl_rr->port = port;
815 cxl_rr->region = cxlr;
816 cxl_rr->nr_targets = 1;
817 xa_init(&cxl_rr->endpoints);
818
819 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
820 if (rc) {
821 dev_dbg(&cxlr->dev,
822 "%s: failed to track region reference: %d\n",
823 dev_name(&port->dev), rc);
824 kfree(cxl_rr);
825 return ERR_PTR(rc);
826 }
827
828 return cxl_rr;
829 }
830
cxl_rr_free_decoder(struct cxl_region_ref * cxl_rr)831 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
832 {
833 struct cxl_region *cxlr = cxl_rr->region;
834 struct cxl_decoder *cxld = cxl_rr->decoder;
835
836 if (!cxld)
837 return;
838
839 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
840 if (cxld->region == cxlr) {
841 cxld->region = NULL;
842 put_device(&cxlr->dev);
843 }
844 }
845
free_region_ref(struct cxl_region_ref * cxl_rr)846 static void free_region_ref(struct cxl_region_ref *cxl_rr)
847 {
848 struct cxl_port *port = cxl_rr->port;
849 struct cxl_region *cxlr = cxl_rr->region;
850
851 cxl_rr_free_decoder(cxl_rr);
852 xa_erase(&port->regions, (unsigned long)cxlr);
853 xa_destroy(&cxl_rr->endpoints);
854 kfree(cxl_rr);
855 }
856
cxl_rr_ep_add(struct cxl_region_ref * cxl_rr,struct cxl_endpoint_decoder * cxled)857 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
858 struct cxl_endpoint_decoder *cxled)
859 {
860 int rc;
861 struct cxl_port *port = cxl_rr->port;
862 struct cxl_region *cxlr = cxl_rr->region;
863 struct cxl_decoder *cxld = cxl_rr->decoder;
864 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
865
866 if (ep) {
867 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
868 GFP_KERNEL);
869 if (rc)
870 return rc;
871 }
872 cxl_rr->nr_eps++;
873
874 if (!cxld->region) {
875 cxld->region = cxlr;
876 get_device(&cxlr->dev);
877 }
878
879 return 0;
880 }
881
cxl_rr_alloc_decoder(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct cxl_region_ref * cxl_rr)882 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
883 struct cxl_endpoint_decoder *cxled,
884 struct cxl_region_ref *cxl_rr)
885 {
886 struct cxl_decoder *cxld;
887
888 cxld = cxl_region_find_decoder(port, cxled, cxlr);
889 if (!cxld) {
890 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
891 dev_name(&port->dev));
892 return -EBUSY;
893 }
894
895 if (cxld->region) {
896 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
897 dev_name(&port->dev), dev_name(&cxld->dev),
898 dev_name(&cxld->region->dev));
899 return -EBUSY;
900 }
901
902 /*
903 * Endpoints should already match the region type, but backstop that
904 * assumption with an assertion. Switch-decoders change mapping-type
905 * based on what is mapped when they are assigned to a region.
906 */
907 dev_WARN_ONCE(&cxlr->dev,
908 port == cxled_to_port(cxled) &&
909 cxld->target_type != cxlr->type,
910 "%s:%s mismatch decoder type %d -> %d\n",
911 dev_name(&cxled_to_memdev(cxled)->dev),
912 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
913 cxld->target_type = cxlr->type;
914 cxl_rr->decoder = cxld;
915 return 0;
916 }
917
918 /**
919 * cxl_port_attach_region() - track a region's interest in a port by endpoint
920 * @port: port to add a new region reference 'struct cxl_region_ref'
921 * @cxlr: region to attach to @port
922 * @cxled: endpoint decoder used to create or further pin a region reference
923 * @pos: interleave position of @cxled in @cxlr
924 *
925 * The attach event is an opportunity to validate CXL decode setup
926 * constraints and record metadata needed for programming HDM decoders,
927 * in particular decoder target lists.
928 *
929 * The steps are:
930 *
931 * - validate that there are no other regions with a higher HPA already
932 * associated with @port
933 * - establish a region reference if one is not already present
934 *
935 * - additionally allocate a decoder instance that will host @cxlr on
936 * @port
937 *
938 * - pin the region reference by the endpoint
939 * - account for how many entries in @port's target list are needed to
940 * cover all of the added endpoints.
941 */
cxl_port_attach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)942 static int cxl_port_attach_region(struct cxl_port *port,
943 struct cxl_region *cxlr,
944 struct cxl_endpoint_decoder *cxled, int pos)
945 {
946 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
947 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
948 struct cxl_region_ref *cxl_rr;
949 bool nr_targets_inc = false;
950 struct cxl_decoder *cxld;
951 unsigned long index;
952 int rc = -EBUSY;
953
954 lockdep_assert_held_write(&cxl_region_rwsem);
955
956 cxl_rr = cxl_rr_load(port, cxlr);
957 if (cxl_rr) {
958 struct cxl_ep *ep_iter;
959 int found = 0;
960
961 /*
962 * Walk the existing endpoints that have been attached to
963 * @cxlr at @port and see if they share the same 'next' port
964 * in the downstream direction. I.e. endpoints that share common
965 * upstream switch.
966 */
967 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
968 if (ep_iter == ep)
969 continue;
970 if (ep_iter->next == ep->next) {
971 found++;
972 break;
973 }
974 }
975
976 /*
977 * New target port, or @port is an endpoint port that always
978 * accounts its own local decode as a target.
979 */
980 if (!found || !ep->next) {
981 cxl_rr->nr_targets++;
982 nr_targets_inc = true;
983 }
984 } else {
985 cxl_rr = alloc_region_ref(port, cxlr, cxled);
986 if (IS_ERR(cxl_rr)) {
987 dev_dbg(&cxlr->dev,
988 "%s: failed to allocate region reference\n",
989 dev_name(&port->dev));
990 return PTR_ERR(cxl_rr);
991 }
992 nr_targets_inc = true;
993
994 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
995 if (rc)
996 goto out_erase;
997 }
998 cxld = cxl_rr->decoder;
999
1000 rc = cxl_rr_ep_add(cxl_rr, cxled);
1001 if (rc) {
1002 dev_dbg(&cxlr->dev,
1003 "%s: failed to track endpoint %s:%s reference\n",
1004 dev_name(&port->dev), dev_name(&cxlmd->dev),
1005 dev_name(&cxld->dev));
1006 goto out_erase;
1007 }
1008
1009 dev_dbg(&cxlr->dev,
1010 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
1011 dev_name(port->uport_dev), dev_name(&port->dev),
1012 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1013 dev_name(&cxled->cxld.dev), pos,
1014 ep ? ep->next ? dev_name(ep->next->uport_dev) :
1015 dev_name(&cxlmd->dev) :
1016 "none",
1017 cxl_rr->nr_eps, cxl_rr->nr_targets);
1018
1019 return 0;
1020 out_erase:
1021 if (nr_targets_inc)
1022 cxl_rr->nr_targets--;
1023 if (cxl_rr->nr_eps == 0)
1024 free_region_ref(cxl_rr);
1025 return rc;
1026 }
1027
cxl_port_detach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1028 static void cxl_port_detach_region(struct cxl_port *port,
1029 struct cxl_region *cxlr,
1030 struct cxl_endpoint_decoder *cxled)
1031 {
1032 struct cxl_region_ref *cxl_rr;
1033 struct cxl_ep *ep = NULL;
1034
1035 lockdep_assert_held_write(&cxl_region_rwsem);
1036
1037 cxl_rr = cxl_rr_load(port, cxlr);
1038 if (!cxl_rr)
1039 return;
1040
1041 /*
1042 * Endpoint ports do not carry cxl_ep references, and they
1043 * never target more than one endpoint by definition
1044 */
1045 if (cxl_rr->decoder == &cxled->cxld)
1046 cxl_rr->nr_eps--;
1047 else
1048 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1049 if (ep) {
1050 struct cxl_ep *ep_iter;
1051 unsigned long index;
1052 int found = 0;
1053
1054 cxl_rr->nr_eps--;
1055 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1056 if (ep_iter->next == ep->next) {
1057 found++;
1058 break;
1059 }
1060 }
1061 if (!found)
1062 cxl_rr->nr_targets--;
1063 }
1064
1065 if (cxl_rr->nr_eps == 0)
1066 free_region_ref(cxl_rr);
1067 }
1068
check_last_peer(struct cxl_endpoint_decoder * cxled,struct cxl_ep * ep,struct cxl_region_ref * cxl_rr,int distance)1069 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1070 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1071 int distance)
1072 {
1073 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1074 struct cxl_region *cxlr = cxl_rr->region;
1075 struct cxl_region_params *p = &cxlr->params;
1076 struct cxl_endpoint_decoder *cxled_peer;
1077 struct cxl_port *port = cxl_rr->port;
1078 struct cxl_memdev *cxlmd_peer;
1079 struct cxl_ep *ep_peer;
1080 int pos = cxled->pos;
1081
1082 /*
1083 * If this position wants to share a dport with the last endpoint mapped
1084 * then that endpoint, at index 'position - distance', must also be
1085 * mapped by this dport.
1086 */
1087 if (pos < distance) {
1088 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1089 dev_name(port->uport_dev), dev_name(&port->dev),
1090 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1091 return -ENXIO;
1092 }
1093 cxled_peer = p->targets[pos - distance];
1094 cxlmd_peer = cxled_to_memdev(cxled_peer);
1095 ep_peer = cxl_ep_load(port, cxlmd_peer);
1096 if (ep->dport != ep_peer->dport) {
1097 dev_dbg(&cxlr->dev,
1098 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1099 dev_name(port->uport_dev), dev_name(&port->dev),
1100 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1101 dev_name(&cxlmd_peer->dev),
1102 dev_name(&cxled_peer->cxld.dev));
1103 return -ENXIO;
1104 }
1105
1106 return 0;
1107 }
1108
cxl_port_setup_targets(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1109 static int cxl_port_setup_targets(struct cxl_port *port,
1110 struct cxl_region *cxlr,
1111 struct cxl_endpoint_decoder *cxled)
1112 {
1113 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1114 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1115 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1116 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1117 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1118 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1119 struct cxl_region_params *p = &cxlr->params;
1120 struct cxl_decoder *cxld = cxl_rr->decoder;
1121 struct cxl_switch_decoder *cxlsd;
1122 u16 eig, peig;
1123 u8 eiw, peiw;
1124
1125 /*
1126 * While root level decoders support x3, x6, x12, switch level
1127 * decoders only support powers of 2 up to x16.
1128 */
1129 if (!is_power_of_2(cxl_rr->nr_targets)) {
1130 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1131 dev_name(port->uport_dev), dev_name(&port->dev),
1132 cxl_rr->nr_targets);
1133 return -EINVAL;
1134 }
1135
1136 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1137 if (cxl_rr->nr_targets_set) {
1138 int i, distance;
1139
1140 /*
1141 * Passthrough decoders impose no distance requirements between
1142 * peers
1143 */
1144 if (cxl_rr->nr_targets == 1)
1145 distance = 0;
1146 else
1147 distance = p->nr_targets / cxl_rr->nr_targets;
1148 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1149 if (ep->dport == cxlsd->target[i]) {
1150 rc = check_last_peer(cxled, ep, cxl_rr,
1151 distance);
1152 if (rc)
1153 return rc;
1154 goto out_target_set;
1155 }
1156 goto add_target;
1157 }
1158
1159 if (is_cxl_root(parent_port)) {
1160 /*
1161 * Root decoder IG is always set to value in CFMWS which
1162 * may be different than this region's IG. We can use the
1163 * region's IG here since interleave_granularity_store()
1164 * does not allow interleaved host-bridges with
1165 * root IG != region IG.
1166 */
1167 parent_ig = p->interleave_granularity;
1168 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1169 /*
1170 * For purposes of address bit routing, use power-of-2 math for
1171 * switch ports.
1172 */
1173 if (!is_power_of_2(parent_iw))
1174 parent_iw /= 3;
1175 } else {
1176 struct cxl_region_ref *parent_rr;
1177 struct cxl_decoder *parent_cxld;
1178
1179 parent_rr = cxl_rr_load(parent_port, cxlr);
1180 parent_cxld = parent_rr->decoder;
1181 parent_ig = parent_cxld->interleave_granularity;
1182 parent_iw = parent_cxld->interleave_ways;
1183 }
1184
1185 rc = granularity_to_eig(parent_ig, &peig);
1186 if (rc) {
1187 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1188 dev_name(parent_port->uport_dev),
1189 dev_name(&parent_port->dev), parent_ig);
1190 return rc;
1191 }
1192
1193 rc = ways_to_eiw(parent_iw, &peiw);
1194 if (rc) {
1195 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1196 dev_name(parent_port->uport_dev),
1197 dev_name(&parent_port->dev), parent_iw);
1198 return rc;
1199 }
1200
1201 iw = cxl_rr->nr_targets;
1202 rc = ways_to_eiw(iw, &eiw);
1203 if (rc) {
1204 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1205 dev_name(port->uport_dev), dev_name(&port->dev), iw);
1206 return rc;
1207 }
1208
1209 /*
1210 * Interleave granularity is a multiple of @parent_port granularity.
1211 * Multiplier is the parent port interleave ways.
1212 */
1213 rc = granularity_to_eig(parent_ig * parent_iw, &eig);
1214 if (rc) {
1215 dev_dbg(&cxlr->dev,
1216 "%s: invalid granularity calculation (%d * %d)\n",
1217 dev_name(&parent_port->dev), parent_ig, parent_iw);
1218 return rc;
1219 }
1220
1221 rc = eig_to_granularity(eig, &ig);
1222 if (rc) {
1223 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1224 dev_name(port->uport_dev), dev_name(&port->dev),
1225 256 << eig);
1226 return rc;
1227 }
1228
1229 if (iw > 8 || iw > cxlsd->nr_targets) {
1230 dev_dbg(&cxlr->dev,
1231 "%s:%s:%s: ways: %d overflows targets: %d\n",
1232 dev_name(port->uport_dev), dev_name(&port->dev),
1233 dev_name(&cxld->dev), iw, cxlsd->nr_targets);
1234 return -ENXIO;
1235 }
1236
1237 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1238 if (cxld->interleave_ways != iw ||
1239 cxld->interleave_granularity != ig ||
1240 cxld->hpa_range.start != p->res->start ||
1241 cxld->hpa_range.end != p->res->end ||
1242 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1243 dev_err(&cxlr->dev,
1244 "%s:%s %s expected iw: %d ig: %d %pr\n",
1245 dev_name(port->uport_dev), dev_name(&port->dev),
1246 __func__, iw, ig, p->res);
1247 dev_err(&cxlr->dev,
1248 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1249 dev_name(port->uport_dev), dev_name(&port->dev),
1250 __func__, cxld->interleave_ways,
1251 cxld->interleave_granularity,
1252 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1253 "enabled" :
1254 "disabled",
1255 cxld->hpa_range.start, cxld->hpa_range.end);
1256 return -ENXIO;
1257 }
1258 } else {
1259 cxld->interleave_ways = iw;
1260 cxld->interleave_granularity = ig;
1261 cxld->hpa_range = (struct range) {
1262 .start = p->res->start,
1263 .end = p->res->end,
1264 };
1265 }
1266 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
1267 dev_name(&port->dev), iw, ig);
1268 add_target:
1269 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1270 dev_dbg(&cxlr->dev,
1271 "%s:%s: targets full trying to add %s:%s at %d\n",
1272 dev_name(port->uport_dev), dev_name(&port->dev),
1273 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1274 return -ENXIO;
1275 }
1276 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1277 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1278 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1279 dev_name(port->uport_dev), dev_name(&port->dev),
1280 dev_name(&cxlsd->cxld.dev),
1281 dev_name(ep->dport->dport_dev),
1282 cxl_rr->nr_targets_set);
1283 return -ENXIO;
1284 }
1285 } else
1286 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1287 inc = 1;
1288 out_target_set:
1289 cxl_rr->nr_targets_set += inc;
1290 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1291 dev_name(port->uport_dev), dev_name(&port->dev),
1292 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
1293 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1294
1295 return 0;
1296 }
1297
cxl_port_reset_targets(struct cxl_port * port,struct cxl_region * cxlr)1298 static void cxl_port_reset_targets(struct cxl_port *port,
1299 struct cxl_region *cxlr)
1300 {
1301 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1302 struct cxl_decoder *cxld;
1303
1304 /*
1305 * After the last endpoint has been detached the entire cxl_rr may now
1306 * be gone.
1307 */
1308 if (!cxl_rr)
1309 return;
1310 cxl_rr->nr_targets_set = 0;
1311
1312 cxld = cxl_rr->decoder;
1313 cxld->hpa_range = (struct range) {
1314 .start = 0,
1315 .end = -1,
1316 };
1317 }
1318
cxl_region_teardown_targets(struct cxl_region * cxlr)1319 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1320 {
1321 struct cxl_region_params *p = &cxlr->params;
1322 struct cxl_endpoint_decoder *cxled;
1323 struct cxl_dev_state *cxlds;
1324 struct cxl_memdev *cxlmd;
1325 struct cxl_port *iter;
1326 struct cxl_ep *ep;
1327 int i;
1328
1329 /*
1330 * In the auto-discovery case skip automatic teardown since the
1331 * address space is already active
1332 */
1333 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1334 return;
1335
1336 for (i = 0; i < p->nr_targets; i++) {
1337 cxled = p->targets[i];
1338 cxlmd = cxled_to_memdev(cxled);
1339 cxlds = cxlmd->cxlds;
1340
1341 if (cxlds->rcd)
1342 continue;
1343
1344 iter = cxled_to_port(cxled);
1345 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1346 iter = to_cxl_port(iter->dev.parent);
1347
1348 for (ep = cxl_ep_load(iter, cxlmd); iter;
1349 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1350 cxl_port_reset_targets(iter, cxlr);
1351 }
1352 }
1353
cxl_region_setup_targets(struct cxl_region * cxlr)1354 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1355 {
1356 struct cxl_region_params *p = &cxlr->params;
1357 struct cxl_endpoint_decoder *cxled;
1358 struct cxl_dev_state *cxlds;
1359 int i, rc, rch = 0, vh = 0;
1360 struct cxl_memdev *cxlmd;
1361 struct cxl_port *iter;
1362 struct cxl_ep *ep;
1363
1364 for (i = 0; i < p->nr_targets; i++) {
1365 cxled = p->targets[i];
1366 cxlmd = cxled_to_memdev(cxled);
1367 cxlds = cxlmd->cxlds;
1368
1369 /* validate that all targets agree on topology */
1370 if (!cxlds->rcd) {
1371 vh++;
1372 } else {
1373 rch++;
1374 continue;
1375 }
1376
1377 iter = cxled_to_port(cxled);
1378 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1379 iter = to_cxl_port(iter->dev.parent);
1380
1381 /*
1382 * Descend the topology tree programming / validating
1383 * targets while looking for conflicts.
1384 */
1385 for (ep = cxl_ep_load(iter, cxlmd); iter;
1386 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1387 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1388 if (rc) {
1389 cxl_region_teardown_targets(cxlr);
1390 return rc;
1391 }
1392 }
1393 }
1394
1395 if (rch && vh) {
1396 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1397 cxl_region_teardown_targets(cxlr);
1398 return -ENXIO;
1399 }
1400
1401 return 0;
1402 }
1403
cxl_region_validate_position(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1404 static int cxl_region_validate_position(struct cxl_region *cxlr,
1405 struct cxl_endpoint_decoder *cxled,
1406 int pos)
1407 {
1408 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1409 struct cxl_region_params *p = &cxlr->params;
1410 int i;
1411
1412 if (pos < 0 || pos >= p->interleave_ways) {
1413 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1414 p->interleave_ways);
1415 return -ENXIO;
1416 }
1417
1418 if (p->targets[pos] == cxled)
1419 return 0;
1420
1421 if (p->targets[pos]) {
1422 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1423 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1424
1425 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1426 pos, dev_name(&cxlmd_target->dev),
1427 dev_name(&cxled_target->cxld.dev));
1428 return -EBUSY;
1429 }
1430
1431 for (i = 0; i < p->interleave_ways; i++) {
1432 struct cxl_endpoint_decoder *cxled_target;
1433 struct cxl_memdev *cxlmd_target;
1434
1435 cxled_target = p->targets[i];
1436 if (!cxled_target)
1437 continue;
1438
1439 cxlmd_target = cxled_to_memdev(cxled_target);
1440 if (cxlmd_target == cxlmd) {
1441 dev_dbg(&cxlr->dev,
1442 "%s already specified at position %d via: %s\n",
1443 dev_name(&cxlmd->dev), pos,
1444 dev_name(&cxled_target->cxld.dev));
1445 return -EBUSY;
1446 }
1447 }
1448
1449 return 0;
1450 }
1451
cxl_region_attach_position(struct cxl_region * cxlr,struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled,const struct cxl_dport * dport,int pos)1452 static int cxl_region_attach_position(struct cxl_region *cxlr,
1453 struct cxl_root_decoder *cxlrd,
1454 struct cxl_endpoint_decoder *cxled,
1455 const struct cxl_dport *dport, int pos)
1456 {
1457 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1458 struct cxl_port *iter;
1459 int rc;
1460
1461 if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1462 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1463 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1464 dev_name(&cxlrd->cxlsd.cxld.dev));
1465 return -ENXIO;
1466 }
1467
1468 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1469 iter = to_cxl_port(iter->dev.parent)) {
1470 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1471 if (rc)
1472 goto err;
1473 }
1474
1475 return 0;
1476
1477 err:
1478 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1479 iter = to_cxl_port(iter->dev.parent))
1480 cxl_port_detach_region(iter, cxlr, cxled);
1481 return rc;
1482 }
1483
cxl_region_attach_auto(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1484 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1485 struct cxl_endpoint_decoder *cxled, int pos)
1486 {
1487 struct cxl_region_params *p = &cxlr->params;
1488
1489 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1490 dev_err(&cxlr->dev,
1491 "%s: unable to add decoder to autodetected region\n",
1492 dev_name(&cxled->cxld.dev));
1493 return -EINVAL;
1494 }
1495
1496 if (pos >= 0) {
1497 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1498 dev_name(&cxled->cxld.dev), pos);
1499 return -EINVAL;
1500 }
1501
1502 if (p->nr_targets >= p->interleave_ways) {
1503 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1504 dev_name(&cxled->cxld.dev));
1505 return -ENXIO;
1506 }
1507
1508 /*
1509 * Temporarily record the endpoint decoder into the target array. Yes,
1510 * this means that userspace can view devices in the wrong position
1511 * before the region activates, and must be careful to understand when
1512 * it might be racing region autodiscovery.
1513 */
1514 pos = p->nr_targets;
1515 p->targets[pos] = cxled;
1516 cxled->pos = pos;
1517 p->nr_targets++;
1518
1519 return 0;
1520 }
1521
cmp_interleave_pos(const void * a,const void * b)1522 static int cmp_interleave_pos(const void *a, const void *b)
1523 {
1524 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1525 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1526
1527 return cxled_a->pos - cxled_b->pos;
1528 }
1529
next_port(struct cxl_port * port)1530 static struct cxl_port *next_port(struct cxl_port *port)
1531 {
1532 if (!port->parent_dport)
1533 return NULL;
1534 return port->parent_dport->port;
1535 }
1536
match_switch_decoder_by_range(struct device * dev,void * data)1537 static int match_switch_decoder_by_range(struct device *dev, void *data)
1538 {
1539 struct cxl_switch_decoder *cxlsd;
1540 struct range *r1, *r2 = data;
1541
1542 if (!is_switch_decoder(dev))
1543 return 0;
1544
1545 cxlsd = to_cxl_switch_decoder(dev);
1546 r1 = &cxlsd->cxld.hpa_range;
1547
1548 if (is_root_decoder(dev))
1549 return range_contains(r1, r2);
1550 return (r1->start == r2->start && r1->end == r2->end);
1551 }
1552
find_pos_and_ways(struct cxl_port * port,struct range * range,int * pos,int * ways)1553 static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1554 int *pos, int *ways)
1555 {
1556 struct cxl_switch_decoder *cxlsd;
1557 struct cxl_port *parent;
1558 struct device *dev;
1559 int rc = -ENXIO;
1560
1561 parent = next_port(port);
1562 if (!parent)
1563 return rc;
1564
1565 dev = device_find_child(&parent->dev, range,
1566 match_switch_decoder_by_range);
1567 if (!dev) {
1568 dev_err(port->uport_dev,
1569 "failed to find decoder mapping %#llx-%#llx\n",
1570 range->start, range->end);
1571 return rc;
1572 }
1573 cxlsd = to_cxl_switch_decoder(dev);
1574 *ways = cxlsd->cxld.interleave_ways;
1575
1576 for (int i = 0; i < *ways; i++) {
1577 if (cxlsd->target[i] == port->parent_dport) {
1578 *pos = i;
1579 rc = 0;
1580 break;
1581 }
1582 }
1583 put_device(dev);
1584
1585 return rc;
1586 }
1587
1588 /**
1589 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1590 * @cxled: endpoint decoder member of given region
1591 *
1592 * The endpoint position is calculated by traversing the topology from
1593 * the endpoint to the root decoder and iteratively applying this
1594 * calculation:
1595 *
1596 * position = position * parent_ways + parent_pos;
1597 *
1598 * ...where @position is inferred from switch and root decoder target lists.
1599 *
1600 * Return: position >= 0 on success
1601 * -ENXIO on failure
1602 */
cxl_calc_interleave_pos(struct cxl_endpoint_decoder * cxled)1603 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1604 {
1605 struct cxl_port *iter, *port = cxled_to_port(cxled);
1606 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1607 struct range *range = &cxled->cxld.hpa_range;
1608 int parent_ways = 0, parent_pos = 0, pos = 0;
1609 int rc;
1610
1611 /*
1612 * Example: the expected interleave order of the 4-way region shown
1613 * below is: mem0, mem2, mem1, mem3
1614 *
1615 * root_port
1616 * / \
1617 * host_bridge_0 host_bridge_1
1618 * | | | |
1619 * mem0 mem1 mem2 mem3
1620 *
1621 * In the example the calculator will iterate twice. The first iteration
1622 * uses the mem position in the host-bridge and the ways of the host-
1623 * bridge to generate the first, or local, position. The second
1624 * iteration uses the host-bridge position in the root_port and the ways
1625 * of the root_port to refine the position.
1626 *
1627 * A trace of the calculation per endpoint looks like this:
1628 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1629 * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1630 * pos: 0 pos: 1
1631 *
1632 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1633 * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1634 * pos: 2 pos = 3
1635 *
1636 * Note that while this example is simple, the method applies to more
1637 * complex topologies, including those with switches.
1638 */
1639
1640 /* Iterate from endpoint to root_port refining the position */
1641 for (iter = port; iter; iter = next_port(iter)) {
1642 if (is_cxl_root(iter))
1643 break;
1644
1645 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1646 if (rc)
1647 return rc;
1648
1649 pos = pos * parent_ways + parent_pos;
1650 }
1651
1652 dev_dbg(&cxlmd->dev,
1653 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1654 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1655 dev_name(&port->dev), range->start, range->end, pos);
1656
1657 return pos;
1658 }
1659
cxl_region_sort_targets(struct cxl_region * cxlr)1660 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1661 {
1662 struct cxl_region_params *p = &cxlr->params;
1663 int i, rc = 0;
1664
1665 for (i = 0; i < p->nr_targets; i++) {
1666 struct cxl_endpoint_decoder *cxled = p->targets[i];
1667
1668 cxled->pos = cxl_calc_interleave_pos(cxled);
1669 /*
1670 * Record that sorting failed, but still continue to calc
1671 * cxled->pos so that follow-on code paths can reliably
1672 * do p->targets[cxled->pos] to self-reference their entry.
1673 */
1674 if (cxled->pos < 0)
1675 rc = -ENXIO;
1676 }
1677 /* Keep the cxlr target list in interleave position order */
1678 sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
1679 cmp_interleave_pos, NULL);
1680
1681 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1682 return rc;
1683 }
1684
cxl_region_attach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1685 static int cxl_region_attach(struct cxl_region *cxlr,
1686 struct cxl_endpoint_decoder *cxled, int pos)
1687 {
1688 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1689 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1690 struct cxl_region_params *p = &cxlr->params;
1691 struct cxl_port *ep_port, *root_port;
1692 struct cxl_dport *dport;
1693 int rc = -ENXIO;
1694
1695 if (cxled->mode != cxlr->mode) {
1696 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1697 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1698 return -EINVAL;
1699 }
1700
1701 if (cxled->mode == CXL_DECODER_DEAD) {
1702 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1703 return -ENODEV;
1704 }
1705
1706 /* all full of members, or interleave config not established? */
1707 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1708 dev_dbg(&cxlr->dev, "region already active\n");
1709 return -EBUSY;
1710 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1711 dev_dbg(&cxlr->dev, "interleave config missing\n");
1712 return -ENXIO;
1713 }
1714
1715 if (p->nr_targets >= p->interleave_ways) {
1716 dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
1717 p->nr_targets);
1718 return -EINVAL;
1719 }
1720
1721 ep_port = cxled_to_port(cxled);
1722 root_port = cxlrd_to_port(cxlrd);
1723 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1724 if (!dport) {
1725 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1726 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1727 dev_name(cxlr->dev.parent));
1728 return -ENXIO;
1729 }
1730
1731 if (cxled->cxld.target_type != cxlr->type) {
1732 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1733 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1734 cxled->cxld.target_type, cxlr->type);
1735 return -ENXIO;
1736 }
1737
1738 if (!cxled->dpa_res) {
1739 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1740 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1741 return -ENXIO;
1742 }
1743
1744 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1745 resource_size(p->res)) {
1746 dev_dbg(&cxlr->dev,
1747 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1748 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1749 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1750 (u64)resource_size(p->res));
1751 return -EINVAL;
1752 }
1753
1754 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1755 int i;
1756
1757 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1758 if (rc)
1759 return rc;
1760
1761 /* await more targets to arrive... */
1762 if (p->nr_targets < p->interleave_ways)
1763 return 0;
1764
1765 /*
1766 * All targets are here, which implies all PCI enumeration that
1767 * affects this region has been completed. Walk the topology to
1768 * sort the devices into their relative region decode position.
1769 */
1770 rc = cxl_region_sort_targets(cxlr);
1771 if (rc)
1772 return rc;
1773
1774 for (i = 0; i < p->nr_targets; i++) {
1775 cxled = p->targets[i];
1776 ep_port = cxled_to_port(cxled);
1777 dport = cxl_find_dport_by_dev(root_port,
1778 ep_port->host_bridge);
1779 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1780 dport, i);
1781 if (rc)
1782 return rc;
1783 }
1784
1785 rc = cxl_region_setup_targets(cxlr);
1786 if (rc)
1787 return rc;
1788
1789 /*
1790 * If target setup succeeds in the autodiscovery case
1791 * then the region is already committed.
1792 */
1793 p->state = CXL_CONFIG_COMMIT;
1794
1795 return 0;
1796 }
1797
1798 rc = cxl_region_validate_position(cxlr, cxled, pos);
1799 if (rc)
1800 return rc;
1801
1802 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
1803 if (rc)
1804 return rc;
1805
1806 p->targets[pos] = cxled;
1807 cxled->pos = pos;
1808 p->nr_targets++;
1809
1810 if (p->nr_targets == p->interleave_ways) {
1811 rc = cxl_region_setup_targets(cxlr);
1812 if (rc)
1813 return rc;
1814 p->state = CXL_CONFIG_ACTIVE;
1815 }
1816
1817 cxled->cxld.interleave_ways = p->interleave_ways;
1818 cxled->cxld.interleave_granularity = p->interleave_granularity;
1819 cxled->cxld.hpa_range = (struct range) {
1820 .start = p->res->start,
1821 .end = p->res->end,
1822 };
1823
1824 if (p->nr_targets != p->interleave_ways)
1825 return 0;
1826
1827 /*
1828 * Test the auto-discovery position calculator function
1829 * against this successfully created user-defined region.
1830 * A fail message here means that this interleave config
1831 * will fail when presented as CXL_REGION_F_AUTO.
1832 */
1833 for (int i = 0; i < p->nr_targets; i++) {
1834 struct cxl_endpoint_decoder *cxled = p->targets[i];
1835 int test_pos;
1836
1837 test_pos = cxl_calc_interleave_pos(cxled);
1838 dev_dbg(&cxled->cxld.dev,
1839 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
1840 (test_pos == cxled->pos) ? "success" : "fail",
1841 test_pos, cxled->pos);
1842 }
1843
1844 return 0;
1845 }
1846
cxl_region_detach(struct cxl_endpoint_decoder * cxled)1847 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1848 {
1849 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1850 struct cxl_region *cxlr = cxled->cxld.region;
1851 struct cxl_region_params *p;
1852 int rc = 0;
1853
1854 lockdep_assert_held_write(&cxl_region_rwsem);
1855
1856 if (!cxlr)
1857 return 0;
1858
1859 p = &cxlr->params;
1860 get_device(&cxlr->dev);
1861
1862 if (p->state > CXL_CONFIG_ACTIVE) {
1863 /*
1864 * TODO: tear down all impacted regions if a device is
1865 * removed out of order
1866 */
1867 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1868 if (rc)
1869 goto out;
1870 p->state = CXL_CONFIG_ACTIVE;
1871 }
1872
1873 for (iter = ep_port; !is_cxl_root(iter);
1874 iter = to_cxl_port(iter->dev.parent))
1875 cxl_port_detach_region(iter, cxlr, cxled);
1876
1877 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1878 p->targets[cxled->pos] != cxled) {
1879 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1880
1881 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1882 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1883 cxled->pos);
1884 goto out;
1885 }
1886
1887 if (p->state == CXL_CONFIG_ACTIVE) {
1888 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1889 cxl_region_teardown_targets(cxlr);
1890 }
1891 p->targets[cxled->pos] = NULL;
1892 p->nr_targets--;
1893 cxled->cxld.hpa_range = (struct range) {
1894 .start = 0,
1895 .end = -1,
1896 };
1897
1898 /* notify the region driver that one of its targets has departed */
1899 up_write(&cxl_region_rwsem);
1900 device_release_driver(&cxlr->dev);
1901 down_write(&cxl_region_rwsem);
1902 out:
1903 put_device(&cxlr->dev);
1904 return rc;
1905 }
1906
cxl_decoder_kill_region(struct cxl_endpoint_decoder * cxled)1907 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1908 {
1909 down_write(&cxl_region_rwsem);
1910 cxled->mode = CXL_DECODER_DEAD;
1911 cxl_region_detach(cxled);
1912 up_write(&cxl_region_rwsem);
1913 }
1914
attach_target(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,unsigned int state)1915 static int attach_target(struct cxl_region *cxlr,
1916 struct cxl_endpoint_decoder *cxled, int pos,
1917 unsigned int state)
1918 {
1919 int rc = 0;
1920
1921 if (state == TASK_INTERRUPTIBLE)
1922 rc = down_write_killable(&cxl_region_rwsem);
1923 else
1924 down_write(&cxl_region_rwsem);
1925 if (rc)
1926 return rc;
1927
1928 down_read(&cxl_dpa_rwsem);
1929 rc = cxl_region_attach(cxlr, cxled, pos);
1930 up_read(&cxl_dpa_rwsem);
1931 up_write(&cxl_region_rwsem);
1932 return rc;
1933 }
1934
detach_target(struct cxl_region * cxlr,int pos)1935 static int detach_target(struct cxl_region *cxlr, int pos)
1936 {
1937 struct cxl_region_params *p = &cxlr->params;
1938 int rc;
1939
1940 rc = down_write_killable(&cxl_region_rwsem);
1941 if (rc)
1942 return rc;
1943
1944 if (pos >= p->interleave_ways) {
1945 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1946 p->interleave_ways);
1947 rc = -ENXIO;
1948 goto out;
1949 }
1950
1951 if (!p->targets[pos]) {
1952 rc = 0;
1953 goto out;
1954 }
1955
1956 rc = cxl_region_detach(p->targets[pos]);
1957 out:
1958 up_write(&cxl_region_rwsem);
1959 return rc;
1960 }
1961
store_targetN(struct cxl_region * cxlr,const char * buf,int pos,size_t len)1962 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1963 size_t len)
1964 {
1965 int rc;
1966
1967 if (sysfs_streq(buf, "\n"))
1968 rc = detach_target(cxlr, pos);
1969 else {
1970 struct device *dev;
1971
1972 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
1973 if (!dev)
1974 return -ENODEV;
1975
1976 if (!is_endpoint_decoder(dev)) {
1977 rc = -EINVAL;
1978 goto out;
1979 }
1980
1981 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
1982 TASK_INTERRUPTIBLE);
1983 out:
1984 put_device(dev);
1985 }
1986
1987 if (rc < 0)
1988 return rc;
1989 return len;
1990 }
1991
1992 #define TARGET_ATTR_RW(n) \
1993 static ssize_t target##n##_show( \
1994 struct device *dev, struct device_attribute *attr, char *buf) \
1995 { \
1996 return show_targetN(to_cxl_region(dev), buf, (n)); \
1997 } \
1998 static ssize_t target##n##_store(struct device *dev, \
1999 struct device_attribute *attr, \
2000 const char *buf, size_t len) \
2001 { \
2002 return store_targetN(to_cxl_region(dev), buf, (n), len); \
2003 } \
2004 static DEVICE_ATTR_RW(target##n)
2005
2006 TARGET_ATTR_RW(0);
2007 TARGET_ATTR_RW(1);
2008 TARGET_ATTR_RW(2);
2009 TARGET_ATTR_RW(3);
2010 TARGET_ATTR_RW(4);
2011 TARGET_ATTR_RW(5);
2012 TARGET_ATTR_RW(6);
2013 TARGET_ATTR_RW(7);
2014 TARGET_ATTR_RW(8);
2015 TARGET_ATTR_RW(9);
2016 TARGET_ATTR_RW(10);
2017 TARGET_ATTR_RW(11);
2018 TARGET_ATTR_RW(12);
2019 TARGET_ATTR_RW(13);
2020 TARGET_ATTR_RW(14);
2021 TARGET_ATTR_RW(15);
2022
2023 static struct attribute *target_attrs[] = {
2024 &dev_attr_target0.attr,
2025 &dev_attr_target1.attr,
2026 &dev_attr_target2.attr,
2027 &dev_attr_target3.attr,
2028 &dev_attr_target4.attr,
2029 &dev_attr_target5.attr,
2030 &dev_attr_target6.attr,
2031 &dev_attr_target7.attr,
2032 &dev_attr_target8.attr,
2033 &dev_attr_target9.attr,
2034 &dev_attr_target10.attr,
2035 &dev_attr_target11.attr,
2036 &dev_attr_target12.attr,
2037 &dev_attr_target13.attr,
2038 &dev_attr_target14.attr,
2039 &dev_attr_target15.attr,
2040 NULL,
2041 };
2042
cxl_region_target_visible(struct kobject * kobj,struct attribute * a,int n)2043 static umode_t cxl_region_target_visible(struct kobject *kobj,
2044 struct attribute *a, int n)
2045 {
2046 struct device *dev = kobj_to_dev(kobj);
2047 struct cxl_region *cxlr = to_cxl_region(dev);
2048 struct cxl_region_params *p = &cxlr->params;
2049
2050 if (n < p->interleave_ways)
2051 return a->mode;
2052 return 0;
2053 }
2054
2055 static const struct attribute_group cxl_region_target_group = {
2056 .attrs = target_attrs,
2057 .is_visible = cxl_region_target_visible,
2058 };
2059
get_cxl_region_target_group(void)2060 static const struct attribute_group *get_cxl_region_target_group(void)
2061 {
2062 return &cxl_region_target_group;
2063 }
2064
2065 static const struct attribute_group *region_groups[] = {
2066 &cxl_base_attribute_group,
2067 &cxl_region_group,
2068 &cxl_region_target_group,
2069 NULL,
2070 };
2071
cxl_region_release(struct device * dev)2072 static void cxl_region_release(struct device *dev)
2073 {
2074 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2075 struct cxl_region *cxlr = to_cxl_region(dev);
2076 int id = atomic_read(&cxlrd->region_id);
2077
2078 /*
2079 * Try to reuse the recently idled id rather than the cached
2080 * next id to prevent the region id space from increasing
2081 * unnecessarily.
2082 */
2083 if (cxlr->id < id)
2084 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
2085 memregion_free(id);
2086 goto out;
2087 }
2088
2089 memregion_free(cxlr->id);
2090 out:
2091 put_device(dev->parent);
2092 kfree(cxlr);
2093 }
2094
2095 const struct device_type cxl_region_type = {
2096 .name = "cxl_region",
2097 .release = cxl_region_release,
2098 .groups = region_groups
2099 };
2100
is_cxl_region(struct device * dev)2101 bool is_cxl_region(struct device *dev)
2102 {
2103 return dev->type == &cxl_region_type;
2104 }
2105 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2106
to_cxl_region(struct device * dev)2107 static struct cxl_region *to_cxl_region(struct device *dev)
2108 {
2109 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2110 "not a cxl_region device\n"))
2111 return NULL;
2112
2113 return container_of(dev, struct cxl_region, dev);
2114 }
2115
unregister_region(void * dev)2116 static void unregister_region(void *dev)
2117 {
2118 struct cxl_region *cxlr = to_cxl_region(dev);
2119 struct cxl_region_params *p = &cxlr->params;
2120 int i;
2121
2122 device_del(dev);
2123
2124 /*
2125 * Now that region sysfs is shutdown, the parameter block is now
2126 * read-only, so no need to hold the region rwsem to access the
2127 * region parameters.
2128 */
2129 for (i = 0; i < p->interleave_ways; i++)
2130 detach_target(cxlr, i);
2131
2132 cxl_region_iomem_release(cxlr);
2133 put_device(dev);
2134 }
2135
2136 static struct lock_class_key cxl_region_key;
2137
cxl_region_alloc(struct cxl_root_decoder * cxlrd,int id)2138 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2139 {
2140 struct cxl_region *cxlr;
2141 struct device *dev;
2142
2143 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2144 if (!cxlr) {
2145 memregion_free(id);
2146 return ERR_PTR(-ENOMEM);
2147 }
2148
2149 dev = &cxlr->dev;
2150 device_initialize(dev);
2151 lockdep_set_class(&dev->mutex, &cxl_region_key);
2152 dev->parent = &cxlrd->cxlsd.cxld.dev;
2153 /*
2154 * Keep root decoder pinned through cxl_region_release to fixup
2155 * region id allocations
2156 */
2157 get_device(dev->parent);
2158 device_set_pm_not_required(dev);
2159 dev->bus = &cxl_bus_type;
2160 dev->type = &cxl_region_type;
2161 cxlr->id = id;
2162
2163 return cxlr;
2164 }
2165
2166 /**
2167 * devm_cxl_add_region - Adds a region to a decoder
2168 * @cxlrd: root decoder
2169 * @id: memregion id to create, or memregion_free() on failure
2170 * @mode: mode for the endpoint decoders of this region
2171 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2172 *
2173 * This is the second step of region initialization. Regions exist within an
2174 * address space which is mapped by a @cxlrd.
2175 *
2176 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2177 * code. The region will be named "regionZ" where Z is the unique region number.
2178 */
devm_cxl_add_region(struct cxl_root_decoder * cxlrd,int id,enum cxl_decoder_mode mode,enum cxl_decoder_type type)2179 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2180 int id,
2181 enum cxl_decoder_mode mode,
2182 enum cxl_decoder_type type)
2183 {
2184 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2185 struct cxl_region *cxlr;
2186 struct device *dev;
2187 int rc;
2188
2189 cxlr = cxl_region_alloc(cxlrd, id);
2190 if (IS_ERR(cxlr))
2191 return cxlr;
2192 cxlr->mode = mode;
2193 cxlr->type = type;
2194
2195 dev = &cxlr->dev;
2196 rc = dev_set_name(dev, "region%d", id);
2197 if (rc)
2198 goto err;
2199
2200 rc = device_add(dev);
2201 if (rc)
2202 goto err;
2203
2204 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2205 if (rc)
2206 return ERR_PTR(rc);
2207
2208 dev_dbg(port->uport_dev, "%s: created %s\n",
2209 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2210 return cxlr;
2211
2212 err:
2213 put_device(dev);
2214 return ERR_PTR(rc);
2215 }
2216
__create_region_show(struct cxl_root_decoder * cxlrd,char * buf)2217 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2218 {
2219 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2220 }
2221
create_pmem_region_show(struct device * dev,struct device_attribute * attr,char * buf)2222 static ssize_t create_pmem_region_show(struct device *dev,
2223 struct device_attribute *attr, char *buf)
2224 {
2225 return __create_region_show(to_cxl_root_decoder(dev), buf);
2226 }
2227
create_ram_region_show(struct device * dev,struct device_attribute * attr,char * buf)2228 static ssize_t create_ram_region_show(struct device *dev,
2229 struct device_attribute *attr, char *buf)
2230 {
2231 return __create_region_show(to_cxl_root_decoder(dev), buf);
2232 }
2233
__create_region(struct cxl_root_decoder * cxlrd,enum cxl_decoder_mode mode,int id)2234 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2235 enum cxl_decoder_mode mode, int id)
2236 {
2237 int rc;
2238
2239 switch (mode) {
2240 case CXL_DECODER_RAM:
2241 case CXL_DECODER_PMEM:
2242 break;
2243 default:
2244 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2245 return ERR_PTR(-EINVAL);
2246 }
2247
2248 rc = memregion_alloc(GFP_KERNEL);
2249 if (rc < 0)
2250 return ERR_PTR(rc);
2251
2252 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2253 memregion_free(rc);
2254 return ERR_PTR(-EBUSY);
2255 }
2256
2257 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2258 }
2259
create_pmem_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2260 static ssize_t create_pmem_region_store(struct device *dev,
2261 struct device_attribute *attr,
2262 const char *buf, size_t len)
2263 {
2264 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2265 struct cxl_region *cxlr;
2266 int rc, id;
2267
2268 rc = sscanf(buf, "region%d\n", &id);
2269 if (rc != 1)
2270 return -EINVAL;
2271
2272 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2273 if (IS_ERR(cxlr))
2274 return PTR_ERR(cxlr);
2275
2276 return len;
2277 }
2278 DEVICE_ATTR_RW(create_pmem_region);
2279
create_ram_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2280 static ssize_t create_ram_region_store(struct device *dev,
2281 struct device_attribute *attr,
2282 const char *buf, size_t len)
2283 {
2284 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2285 struct cxl_region *cxlr;
2286 int rc, id;
2287
2288 rc = sscanf(buf, "region%d\n", &id);
2289 if (rc != 1)
2290 return -EINVAL;
2291
2292 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2293 if (IS_ERR(cxlr))
2294 return PTR_ERR(cxlr);
2295
2296 return len;
2297 }
2298 DEVICE_ATTR_RW(create_ram_region);
2299
region_show(struct device * dev,struct device_attribute * attr,char * buf)2300 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2301 char *buf)
2302 {
2303 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2304 ssize_t rc;
2305
2306 rc = down_read_interruptible(&cxl_region_rwsem);
2307 if (rc)
2308 return rc;
2309
2310 if (cxld->region)
2311 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2312 else
2313 rc = sysfs_emit(buf, "\n");
2314 up_read(&cxl_region_rwsem);
2315
2316 return rc;
2317 }
2318 DEVICE_ATTR_RO(region);
2319
2320 static struct cxl_region *
cxl_find_region_by_name(struct cxl_root_decoder * cxlrd,const char * name)2321 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2322 {
2323 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2324 struct device *region_dev;
2325
2326 region_dev = device_find_child_by_name(&cxld->dev, name);
2327 if (!region_dev)
2328 return ERR_PTR(-ENODEV);
2329
2330 return to_cxl_region(region_dev);
2331 }
2332
delete_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2333 static ssize_t delete_region_store(struct device *dev,
2334 struct device_attribute *attr,
2335 const char *buf, size_t len)
2336 {
2337 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2338 struct cxl_port *port = to_cxl_port(dev->parent);
2339 struct cxl_region *cxlr;
2340
2341 cxlr = cxl_find_region_by_name(cxlrd, buf);
2342 if (IS_ERR(cxlr))
2343 return PTR_ERR(cxlr);
2344
2345 devm_release_action(port->uport_dev, unregister_region, cxlr);
2346 put_device(&cxlr->dev);
2347
2348 return len;
2349 }
2350 DEVICE_ATTR_WO(delete_region);
2351
cxl_pmem_region_release(struct device * dev)2352 static void cxl_pmem_region_release(struct device *dev)
2353 {
2354 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2355 int i;
2356
2357 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2358 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2359
2360 put_device(&cxlmd->dev);
2361 }
2362
2363 kfree(cxlr_pmem);
2364 }
2365
2366 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2367 &cxl_base_attribute_group,
2368 NULL,
2369 };
2370
2371 const struct device_type cxl_pmem_region_type = {
2372 .name = "cxl_pmem_region",
2373 .release = cxl_pmem_region_release,
2374 .groups = cxl_pmem_region_attribute_groups,
2375 };
2376
is_cxl_pmem_region(struct device * dev)2377 bool is_cxl_pmem_region(struct device *dev)
2378 {
2379 return dev->type == &cxl_pmem_region_type;
2380 }
2381 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2382
to_cxl_pmem_region(struct device * dev)2383 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2384 {
2385 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2386 "not a cxl_pmem_region device\n"))
2387 return NULL;
2388 return container_of(dev, struct cxl_pmem_region, dev);
2389 }
2390 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2391
2392 struct cxl_poison_context {
2393 struct cxl_port *port;
2394 enum cxl_decoder_mode mode;
2395 u64 offset;
2396 };
2397
cxl_get_poison_unmapped(struct cxl_memdev * cxlmd,struct cxl_poison_context * ctx)2398 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2399 struct cxl_poison_context *ctx)
2400 {
2401 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2402 u64 offset, length;
2403 int rc = 0;
2404
2405 /*
2406 * Collect poison for the remaining unmapped resources
2407 * after poison is collected by committed endpoints.
2408 *
2409 * Knowing that PMEM must always follow RAM, get poison
2410 * for unmapped resources based on the last decoder's mode:
2411 * ram: scan remains of ram range, then any pmem range
2412 * pmem: scan remains of pmem range
2413 */
2414
2415 if (ctx->mode == CXL_DECODER_RAM) {
2416 offset = ctx->offset;
2417 length = resource_size(&cxlds->ram_res) - offset;
2418 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2419 if (rc == -EFAULT)
2420 rc = 0;
2421 if (rc)
2422 return rc;
2423 }
2424 if (ctx->mode == CXL_DECODER_PMEM) {
2425 offset = ctx->offset;
2426 length = resource_size(&cxlds->dpa_res) - offset;
2427 if (!length)
2428 return 0;
2429 } else if (resource_size(&cxlds->pmem_res)) {
2430 offset = cxlds->pmem_res.start;
2431 length = resource_size(&cxlds->pmem_res);
2432 } else {
2433 return 0;
2434 }
2435
2436 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2437 }
2438
poison_by_decoder(struct device * dev,void * arg)2439 static int poison_by_decoder(struct device *dev, void *arg)
2440 {
2441 struct cxl_poison_context *ctx = arg;
2442 struct cxl_endpoint_decoder *cxled;
2443 struct cxl_memdev *cxlmd;
2444 u64 offset, length;
2445 int rc = 0;
2446
2447 if (!is_endpoint_decoder(dev))
2448 return rc;
2449
2450 cxled = to_cxl_endpoint_decoder(dev);
2451 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2452 return rc;
2453
2454 /*
2455 * Regions are only created with single mode decoders: pmem or ram.
2456 * Linux does not support mixed mode decoders. This means that
2457 * reading poison per endpoint decoder adheres to the requirement
2458 * that poison reads of pmem and ram must be separated.
2459 * CXL 3.0 Spec 8.2.9.8.4.1
2460 */
2461 if (cxled->mode == CXL_DECODER_MIXED) {
2462 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2463 return rc;
2464 }
2465
2466 cxlmd = cxled_to_memdev(cxled);
2467 if (cxled->skip) {
2468 offset = cxled->dpa_res->start - cxled->skip;
2469 length = cxled->skip;
2470 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2471 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2472 rc = 0;
2473 if (rc)
2474 return rc;
2475 }
2476
2477 offset = cxled->dpa_res->start;
2478 length = cxled->dpa_res->end - offset + 1;
2479 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2480 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2481 rc = 0;
2482 if (rc)
2483 return rc;
2484
2485 /* Iterate until commit_end is reached */
2486 if (cxled->cxld.id == ctx->port->commit_end) {
2487 ctx->offset = cxled->dpa_res->end + 1;
2488 ctx->mode = cxled->mode;
2489 return 1;
2490 }
2491
2492 return 0;
2493 }
2494
cxl_get_poison_by_endpoint(struct cxl_port * port)2495 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2496 {
2497 struct cxl_poison_context ctx;
2498 int rc = 0;
2499
2500 ctx = (struct cxl_poison_context) {
2501 .port = port
2502 };
2503
2504 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2505 if (rc == 1)
2506 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
2507 &ctx);
2508
2509 return rc;
2510 }
2511
2512 static struct lock_class_key cxl_pmem_region_key;
2513
cxl_pmem_region_alloc(struct cxl_region * cxlr)2514 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
2515 {
2516 struct cxl_region_params *p = &cxlr->params;
2517 struct cxl_nvdimm_bridge *cxl_nvb;
2518 struct cxl_pmem_region *cxlr_pmem;
2519 struct device *dev;
2520 int i;
2521
2522 down_read(&cxl_region_rwsem);
2523 if (p->state != CXL_CONFIG_COMMIT) {
2524 cxlr_pmem = ERR_PTR(-ENXIO);
2525 goto out;
2526 }
2527
2528 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
2529 GFP_KERNEL);
2530 if (!cxlr_pmem) {
2531 cxlr_pmem = ERR_PTR(-ENOMEM);
2532 goto out;
2533 }
2534
2535 cxlr_pmem->hpa_range.start = p->res->start;
2536 cxlr_pmem->hpa_range.end = p->res->end;
2537
2538 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2539 cxlr_pmem->nr_mappings = p->nr_targets;
2540 for (i = 0; i < p->nr_targets; i++) {
2541 struct cxl_endpoint_decoder *cxled = p->targets[i];
2542 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2543 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2544
2545 /*
2546 * Regions never span CXL root devices, so by definition the
2547 * bridge for one device is the same for all.
2548 */
2549 if (i == 0) {
2550 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
2551 if (!cxl_nvb) {
2552 kfree(cxlr_pmem);
2553 cxlr_pmem = ERR_PTR(-ENODEV);
2554 goto out;
2555 }
2556 cxlr->cxl_nvb = cxl_nvb;
2557 }
2558 m->cxlmd = cxlmd;
2559 get_device(&cxlmd->dev);
2560 m->start = cxled->dpa_res->start;
2561 m->size = resource_size(cxled->dpa_res);
2562 m->position = i;
2563 }
2564
2565 dev = &cxlr_pmem->dev;
2566 cxlr_pmem->cxlr = cxlr;
2567 cxlr->cxlr_pmem = cxlr_pmem;
2568 device_initialize(dev);
2569 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2570 device_set_pm_not_required(dev);
2571 dev->parent = &cxlr->dev;
2572 dev->bus = &cxl_bus_type;
2573 dev->type = &cxl_pmem_region_type;
2574 out:
2575 up_read(&cxl_region_rwsem);
2576
2577 return cxlr_pmem;
2578 }
2579
cxl_dax_region_release(struct device * dev)2580 static void cxl_dax_region_release(struct device *dev)
2581 {
2582 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
2583
2584 kfree(cxlr_dax);
2585 }
2586
2587 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
2588 &cxl_base_attribute_group,
2589 NULL,
2590 };
2591
2592 const struct device_type cxl_dax_region_type = {
2593 .name = "cxl_dax_region",
2594 .release = cxl_dax_region_release,
2595 .groups = cxl_dax_region_attribute_groups,
2596 };
2597
is_cxl_dax_region(struct device * dev)2598 static bool is_cxl_dax_region(struct device *dev)
2599 {
2600 return dev->type == &cxl_dax_region_type;
2601 }
2602
to_cxl_dax_region(struct device * dev)2603 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
2604 {
2605 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
2606 "not a cxl_dax_region device\n"))
2607 return NULL;
2608 return container_of(dev, struct cxl_dax_region, dev);
2609 }
2610 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
2611
2612 static struct lock_class_key cxl_dax_region_key;
2613
cxl_dax_region_alloc(struct cxl_region * cxlr)2614 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
2615 {
2616 struct cxl_region_params *p = &cxlr->params;
2617 struct cxl_dax_region *cxlr_dax;
2618 struct device *dev;
2619
2620 down_read(&cxl_region_rwsem);
2621 if (p->state != CXL_CONFIG_COMMIT) {
2622 cxlr_dax = ERR_PTR(-ENXIO);
2623 goto out;
2624 }
2625
2626 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
2627 if (!cxlr_dax) {
2628 cxlr_dax = ERR_PTR(-ENOMEM);
2629 goto out;
2630 }
2631
2632 cxlr_dax->hpa_range.start = p->res->start;
2633 cxlr_dax->hpa_range.end = p->res->end;
2634
2635 dev = &cxlr_dax->dev;
2636 cxlr_dax->cxlr = cxlr;
2637 device_initialize(dev);
2638 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
2639 device_set_pm_not_required(dev);
2640 dev->parent = &cxlr->dev;
2641 dev->bus = &cxl_bus_type;
2642 dev->type = &cxl_dax_region_type;
2643 out:
2644 up_read(&cxl_region_rwsem);
2645
2646 return cxlr_dax;
2647 }
2648
cxlr_pmem_unregister(void * _cxlr_pmem)2649 static void cxlr_pmem_unregister(void *_cxlr_pmem)
2650 {
2651 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
2652 struct cxl_region *cxlr = cxlr_pmem->cxlr;
2653 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2654
2655 /*
2656 * Either the bridge is in ->remove() context under the device_lock(),
2657 * or cxlr_release_nvdimm() is cancelling the bridge's release action
2658 * for @cxlr_pmem and doing it itself (while manually holding the bridge
2659 * lock).
2660 */
2661 device_lock_assert(&cxl_nvb->dev);
2662 cxlr->cxlr_pmem = NULL;
2663 cxlr_pmem->cxlr = NULL;
2664 device_unregister(&cxlr_pmem->dev);
2665 }
2666
cxlr_release_nvdimm(void * _cxlr)2667 static void cxlr_release_nvdimm(void *_cxlr)
2668 {
2669 struct cxl_region *cxlr = _cxlr;
2670 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2671
2672 device_lock(&cxl_nvb->dev);
2673 if (cxlr->cxlr_pmem)
2674 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
2675 cxlr->cxlr_pmem);
2676 device_unlock(&cxl_nvb->dev);
2677 cxlr->cxl_nvb = NULL;
2678 put_device(&cxl_nvb->dev);
2679 }
2680
2681 /**
2682 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
2683 * @cxlr: parent CXL region for this pmem region bridge device
2684 *
2685 * Return: 0 on success negative error code on failure.
2686 */
devm_cxl_add_pmem_region(struct cxl_region * cxlr)2687 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
2688 {
2689 struct cxl_pmem_region *cxlr_pmem;
2690 struct cxl_nvdimm_bridge *cxl_nvb;
2691 struct device *dev;
2692 int rc;
2693
2694 cxlr_pmem = cxl_pmem_region_alloc(cxlr);
2695 if (IS_ERR(cxlr_pmem))
2696 return PTR_ERR(cxlr_pmem);
2697 cxl_nvb = cxlr->cxl_nvb;
2698
2699 dev = &cxlr_pmem->dev;
2700 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
2701 if (rc)
2702 goto err;
2703
2704 rc = device_add(dev);
2705 if (rc)
2706 goto err;
2707
2708 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2709 dev_name(dev));
2710
2711 device_lock(&cxl_nvb->dev);
2712 if (cxl_nvb->dev.driver)
2713 rc = devm_add_action_or_reset(&cxl_nvb->dev,
2714 cxlr_pmem_unregister, cxlr_pmem);
2715 else
2716 rc = -ENXIO;
2717 device_unlock(&cxl_nvb->dev);
2718
2719 if (rc)
2720 goto err_bridge;
2721
2722 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2723 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
2724
2725 err:
2726 put_device(dev);
2727 err_bridge:
2728 put_device(&cxl_nvb->dev);
2729 cxlr->cxl_nvb = NULL;
2730 return rc;
2731 }
2732
cxlr_dax_unregister(void * _cxlr_dax)2733 static void cxlr_dax_unregister(void *_cxlr_dax)
2734 {
2735 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
2736
2737 device_unregister(&cxlr_dax->dev);
2738 }
2739
devm_cxl_add_dax_region(struct cxl_region * cxlr)2740 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
2741 {
2742 struct cxl_dax_region *cxlr_dax;
2743 struct device *dev;
2744 int rc;
2745
2746 cxlr_dax = cxl_dax_region_alloc(cxlr);
2747 if (IS_ERR(cxlr_dax))
2748 return PTR_ERR(cxlr_dax);
2749
2750 dev = &cxlr_dax->dev;
2751 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
2752 if (rc)
2753 goto err;
2754
2755 rc = device_add(dev);
2756 if (rc)
2757 goto err;
2758
2759 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
2760 dev_name(dev));
2761
2762 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
2763 cxlr_dax);
2764 err:
2765 put_device(dev);
2766 return rc;
2767 }
2768
match_root_decoder_by_range(struct device * dev,void * data)2769 static int match_root_decoder_by_range(struct device *dev, void *data)
2770 {
2771 struct range *r1, *r2 = data;
2772 struct cxl_root_decoder *cxlrd;
2773
2774 if (!is_root_decoder(dev))
2775 return 0;
2776
2777 cxlrd = to_cxl_root_decoder(dev);
2778 r1 = &cxlrd->cxlsd.cxld.hpa_range;
2779 return range_contains(r1, r2);
2780 }
2781
match_region_by_range(struct device * dev,void * data)2782 static int match_region_by_range(struct device *dev, void *data)
2783 {
2784 struct cxl_region_params *p;
2785 struct cxl_region *cxlr;
2786 struct range *r = data;
2787 int rc = 0;
2788
2789 if (!is_cxl_region(dev))
2790 return 0;
2791
2792 cxlr = to_cxl_region(dev);
2793 p = &cxlr->params;
2794
2795 down_read(&cxl_region_rwsem);
2796 if (p->res && p->res->start == r->start && p->res->end == r->end)
2797 rc = 1;
2798 up_read(&cxl_region_rwsem);
2799
2800 return rc;
2801 }
2802
2803 /* Establish an empty region covering the given HPA range */
construct_region(struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled)2804 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
2805 struct cxl_endpoint_decoder *cxled)
2806 {
2807 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2808 struct cxl_port *port = cxlrd_to_port(cxlrd);
2809 struct range *hpa = &cxled->cxld.hpa_range;
2810 struct cxl_region_params *p;
2811 struct cxl_region *cxlr;
2812 struct resource *res;
2813 int rc;
2814
2815 do {
2816 cxlr = __create_region(cxlrd, cxled->mode,
2817 atomic_read(&cxlrd->region_id));
2818 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
2819
2820 if (IS_ERR(cxlr)) {
2821 dev_err(cxlmd->dev.parent,
2822 "%s:%s: %s failed assign region: %ld\n",
2823 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2824 __func__, PTR_ERR(cxlr));
2825 return cxlr;
2826 }
2827
2828 down_write(&cxl_region_rwsem);
2829 p = &cxlr->params;
2830 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
2831 dev_err(cxlmd->dev.parent,
2832 "%s:%s: %s autodiscovery interrupted\n",
2833 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2834 __func__);
2835 rc = -EBUSY;
2836 goto err;
2837 }
2838
2839 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
2840
2841 res = kmalloc(sizeof(*res), GFP_KERNEL);
2842 if (!res) {
2843 rc = -ENOMEM;
2844 goto err;
2845 }
2846
2847 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
2848 dev_name(&cxlr->dev));
2849 rc = insert_resource(cxlrd->res, res);
2850 if (rc) {
2851 /*
2852 * Platform-firmware may not have split resources like "System
2853 * RAM" on CXL window boundaries see cxl_region_iomem_release()
2854 */
2855 dev_warn(cxlmd->dev.parent,
2856 "%s:%s: %s %s cannot insert resource\n",
2857 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2858 __func__, dev_name(&cxlr->dev));
2859 }
2860
2861 p->res = res;
2862 p->interleave_ways = cxled->cxld.interleave_ways;
2863 p->interleave_granularity = cxled->cxld.interleave_granularity;
2864 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2865
2866 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
2867 if (rc)
2868 goto err;
2869
2870 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
2871 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
2872 dev_name(&cxlr->dev), p->res, p->interleave_ways,
2873 p->interleave_granularity);
2874
2875 /* ...to match put_device() in cxl_add_to_region() */
2876 get_device(&cxlr->dev);
2877 up_write(&cxl_region_rwsem);
2878
2879 return cxlr;
2880
2881 err:
2882 up_write(&cxl_region_rwsem);
2883 devm_release_action(port->uport_dev, unregister_region, cxlr);
2884 return ERR_PTR(rc);
2885 }
2886
cxl_add_to_region(struct cxl_port * root,struct cxl_endpoint_decoder * cxled)2887 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
2888 {
2889 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2890 struct range *hpa = &cxled->cxld.hpa_range;
2891 struct cxl_decoder *cxld = &cxled->cxld;
2892 struct device *cxlrd_dev, *region_dev;
2893 struct cxl_root_decoder *cxlrd;
2894 struct cxl_region_params *p;
2895 struct cxl_region *cxlr;
2896 bool attach = false;
2897 int rc;
2898
2899 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
2900 match_root_decoder_by_range);
2901 if (!cxlrd_dev) {
2902 dev_err(cxlmd->dev.parent,
2903 "%s:%s no CXL window for range %#llx:%#llx\n",
2904 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
2905 cxld->hpa_range.start, cxld->hpa_range.end);
2906 return -ENXIO;
2907 }
2908
2909 cxlrd = to_cxl_root_decoder(cxlrd_dev);
2910
2911 /*
2912 * Ensure that if multiple threads race to construct_region() for @hpa
2913 * one does the construction and the others add to that.
2914 */
2915 mutex_lock(&cxlrd->range_lock);
2916 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
2917 match_region_by_range);
2918 if (!region_dev) {
2919 cxlr = construct_region(cxlrd, cxled);
2920 region_dev = &cxlr->dev;
2921 } else
2922 cxlr = to_cxl_region(region_dev);
2923 mutex_unlock(&cxlrd->range_lock);
2924
2925 rc = PTR_ERR_OR_ZERO(cxlr);
2926 if (rc)
2927 goto out;
2928
2929 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
2930
2931 down_read(&cxl_region_rwsem);
2932 p = &cxlr->params;
2933 attach = p->state == CXL_CONFIG_COMMIT;
2934 up_read(&cxl_region_rwsem);
2935
2936 if (attach) {
2937 /*
2938 * If device_attach() fails the range may still be active via
2939 * the platform-firmware memory map, otherwise the driver for
2940 * regions is local to this file, so driver matching can't fail.
2941 */
2942 if (device_attach(&cxlr->dev) < 0)
2943 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
2944 p->res);
2945 }
2946
2947 put_device(region_dev);
2948 out:
2949 put_device(cxlrd_dev);
2950 return rc;
2951 }
2952 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
2953
is_system_ram(struct resource * res,void * arg)2954 static int is_system_ram(struct resource *res, void *arg)
2955 {
2956 struct cxl_region *cxlr = arg;
2957 struct cxl_region_params *p = &cxlr->params;
2958
2959 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
2960 return 1;
2961 }
2962
cxl_region_probe(struct device * dev)2963 static int cxl_region_probe(struct device *dev)
2964 {
2965 struct cxl_region *cxlr = to_cxl_region(dev);
2966 struct cxl_region_params *p = &cxlr->params;
2967 int rc;
2968
2969 rc = down_read_interruptible(&cxl_region_rwsem);
2970 if (rc) {
2971 dev_dbg(&cxlr->dev, "probe interrupted\n");
2972 return rc;
2973 }
2974
2975 if (p->state < CXL_CONFIG_COMMIT) {
2976 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
2977 rc = -ENXIO;
2978 goto out;
2979 }
2980
2981 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
2982 dev_err(&cxlr->dev,
2983 "failed to activate, re-commit region and retry\n");
2984 rc = -ENXIO;
2985 goto out;
2986 }
2987
2988 /*
2989 * From this point on any path that changes the region's state away from
2990 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
2991 */
2992 out:
2993 up_read(&cxl_region_rwsem);
2994
2995 if (rc)
2996 return rc;
2997
2998 switch (cxlr->mode) {
2999 case CXL_DECODER_PMEM:
3000 return devm_cxl_add_pmem_region(cxlr);
3001 case CXL_DECODER_RAM:
3002 /*
3003 * The region can not be manged by CXL if any portion of
3004 * it is already online as 'System RAM'
3005 */
3006 if (walk_iomem_res_desc(IORES_DESC_NONE,
3007 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
3008 p->res->start, p->res->end, cxlr,
3009 is_system_ram) > 0)
3010 return 0;
3011 return devm_cxl_add_dax_region(cxlr);
3012 default:
3013 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
3014 cxlr->mode);
3015 return -ENXIO;
3016 }
3017 }
3018
3019 static struct cxl_driver cxl_region_driver = {
3020 .name = "cxl_region",
3021 .probe = cxl_region_probe,
3022 .id = CXL_DEVICE_REGION,
3023 };
3024
cxl_region_init(void)3025 int cxl_region_init(void)
3026 {
3027 return cxl_driver_register(&cxl_region_driver);
3028 }
3029
cxl_region_exit(void)3030 void cxl_region_exit(void)
3031 {
3032 cxl_driver_unregister(&cxl_region_driver);
3033 }
3034
3035 MODULE_IMPORT_NS(CXL);
3036 MODULE_IMPORT_NS(DEVMEM);
3037 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
3038