1779dd20cSBen Widawsky // SPDX-License-Identifier: GPL-2.0-only
2779dd20cSBen Widawsky /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3779dd20cSBen Widawsky #include <linux/memregion.h>
4779dd20cSBen Widawsky #include <linux/genalloc.h>
5779dd20cSBen Widawsky #include <linux/device.h>
6779dd20cSBen Widawsky #include <linux/module.h>
7779dd20cSBen Widawsky #include <linux/slab.h>
8dd5ba0ebSBen Widawsky #include <linux/uuid.h>
9a32320b7SDan Williams #include <linux/sort.h>
10779dd20cSBen Widawsky #include <linux/idr.h>
1180d10a6cSBen Widawsky #include <cxlmem.h>
12779dd20cSBen Widawsky #include <cxl.h>
13779dd20cSBen Widawsky #include "core.h"
14779dd20cSBen Widawsky
15779dd20cSBen Widawsky /**
16779dd20cSBen Widawsky * DOC: cxl core region
17779dd20cSBen Widawsky *
18779dd20cSBen Widawsky * CXL Regions represent mapped memory capacity in system physical address
19779dd20cSBen Widawsky * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20779dd20cSBen Widawsky * Memory ranges, Regions represent the active mapped capacity by the HDM
21779dd20cSBen Widawsky * Decoder Capability structures throughout the Host Bridges, Switches, and
22779dd20cSBen Widawsky * Endpoints in the topology.
23dd5ba0ebSBen Widawsky *
24dd5ba0ebSBen Widawsky * Region configuration has ordering constraints. UUID may be set at any time
25dd5ba0ebSBen Widawsky * but is only visible for persistent regions.
2680d10a6cSBen Widawsky * 1. Interleave granularity
2780d10a6cSBen Widawsky * 2. Interleave size
28b9686e8cSDan Williams * 3. Decoder targets
29779dd20cSBen Widawsky */
30779dd20cSBen Widawsky
31779dd20cSBen Widawsky static struct cxl_region *to_cxl_region(struct device *dev);
32779dd20cSBen Widawsky
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)33dd5ba0ebSBen Widawsky static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
34dd5ba0ebSBen Widawsky char *buf)
35dd5ba0ebSBen Widawsky {
36dd5ba0ebSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
37dd5ba0ebSBen Widawsky struct cxl_region_params *p = &cxlr->params;
38dd5ba0ebSBen Widawsky ssize_t rc;
39dd5ba0ebSBen Widawsky
40dd5ba0ebSBen Widawsky rc = down_read_interruptible(&cxl_region_rwsem);
41dd5ba0ebSBen Widawsky if (rc)
42dd5ba0ebSBen Widawsky return rc;
43a8e7d558SDan Williams if (cxlr->mode != CXL_DECODER_PMEM)
44a8e7d558SDan Williams rc = sysfs_emit(buf, "\n");
45a8e7d558SDan Williams else
46dd5ba0ebSBen Widawsky rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
47dd5ba0ebSBen Widawsky up_read(&cxl_region_rwsem);
48dd5ba0ebSBen Widawsky
49dd5ba0ebSBen Widawsky return rc;
50dd5ba0ebSBen Widawsky }
51dd5ba0ebSBen Widawsky
is_dup(struct device * match,void * data)52dd5ba0ebSBen Widawsky static int is_dup(struct device *match, void *data)
53dd5ba0ebSBen Widawsky {
54dd5ba0ebSBen Widawsky struct cxl_region_params *p;
55dd5ba0ebSBen Widawsky struct cxl_region *cxlr;
56dd5ba0ebSBen Widawsky uuid_t *uuid = data;
57dd5ba0ebSBen Widawsky
58dd5ba0ebSBen Widawsky if (!is_cxl_region(match))
59dd5ba0ebSBen Widawsky return 0;
60dd5ba0ebSBen Widawsky
61dd5ba0ebSBen Widawsky lockdep_assert_held(&cxl_region_rwsem);
62dd5ba0ebSBen Widawsky cxlr = to_cxl_region(match);
63dd5ba0ebSBen Widawsky p = &cxlr->params;
64dd5ba0ebSBen Widawsky
65dd5ba0ebSBen Widawsky if (uuid_equal(&p->uuid, uuid)) {
66dd5ba0ebSBen Widawsky dev_dbg(match, "already has uuid: %pUb\n", uuid);
67dd5ba0ebSBen Widawsky return -EBUSY;
68dd5ba0ebSBen Widawsky }
69dd5ba0ebSBen Widawsky
70dd5ba0ebSBen Widawsky return 0;
71dd5ba0ebSBen Widawsky }
72dd5ba0ebSBen Widawsky
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)73dd5ba0ebSBen Widawsky static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
74dd5ba0ebSBen Widawsky const char *buf, size_t len)
75dd5ba0ebSBen Widawsky {
76dd5ba0ebSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
77dd5ba0ebSBen Widawsky struct cxl_region_params *p = &cxlr->params;
78dd5ba0ebSBen Widawsky uuid_t temp;
79dd5ba0ebSBen Widawsky ssize_t rc;
80dd5ba0ebSBen Widawsky
81dd5ba0ebSBen Widawsky if (len != UUID_STRING_LEN + 1)
82dd5ba0ebSBen Widawsky return -EINVAL;
83dd5ba0ebSBen Widawsky
84dd5ba0ebSBen Widawsky rc = uuid_parse(buf, &temp);
85dd5ba0ebSBen Widawsky if (rc)
86dd5ba0ebSBen Widawsky return rc;
87dd5ba0ebSBen Widawsky
88dd5ba0ebSBen Widawsky if (uuid_is_null(&temp))
89dd5ba0ebSBen Widawsky return -EINVAL;
90dd5ba0ebSBen Widawsky
91dd5ba0ebSBen Widawsky rc = down_write_killable(&cxl_region_rwsem);
92dd5ba0ebSBen Widawsky if (rc)
93dd5ba0ebSBen Widawsky return rc;
94dd5ba0ebSBen Widawsky
95dd5ba0ebSBen Widawsky if (uuid_equal(&p->uuid, &temp))
96dd5ba0ebSBen Widawsky goto out;
97dd5ba0ebSBen Widawsky
98dd5ba0ebSBen Widawsky rc = -EBUSY;
99dd5ba0ebSBen Widawsky if (p->state >= CXL_CONFIG_ACTIVE)
100dd5ba0ebSBen Widawsky goto out;
101dd5ba0ebSBen Widawsky
102dd5ba0ebSBen Widawsky rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
103dd5ba0ebSBen Widawsky if (rc < 0)
104dd5ba0ebSBen Widawsky goto out;
105dd5ba0ebSBen Widawsky
106dd5ba0ebSBen Widawsky uuid_copy(&p->uuid, &temp);
107dd5ba0ebSBen Widawsky out:
108dd5ba0ebSBen Widawsky up_write(&cxl_region_rwsem);
109dd5ba0ebSBen Widawsky
110dd5ba0ebSBen Widawsky if (rc)
111dd5ba0ebSBen Widawsky return rc;
112dd5ba0ebSBen Widawsky return len;
113dd5ba0ebSBen Widawsky }
114dd5ba0ebSBen Widawsky static DEVICE_ATTR_RW(uuid);
115dd5ba0ebSBen Widawsky
cxl_rr_load(struct cxl_port * port,struct cxl_region * cxlr)116176baefbSDan Williams static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
117176baefbSDan Williams struct cxl_region *cxlr)
118176baefbSDan Williams {
119176baefbSDan Williams return xa_load(&port->regions, (unsigned long)cxlr);
120176baefbSDan Williams }
121176baefbSDan Williams
cxl_region_invalidate_memregion(struct cxl_region * cxlr)122d1257d09SDan Williams static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
123d1257d09SDan Williams {
124d1257d09SDan Williams if (!cpu_cache_has_invalidate_memregion()) {
125d1257d09SDan Williams if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
126d1257d09SDan Williams dev_warn_once(
127d1257d09SDan Williams &cxlr->dev,
128d1257d09SDan Williams "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
129d1257d09SDan Williams return 0;
130d1257d09SDan Williams } else {
1318e1b52c1SDan Williams dev_WARN(&cxlr->dev,
132d1257d09SDan Williams "Failed to synchronize CPU cache state\n");
133d1257d09SDan Williams return -ENXIO;
134d1257d09SDan Williams }
135d1257d09SDan Williams }
136d1257d09SDan Williams
137d1257d09SDan Williams cpu_cache_invalidate_memregion(IORES_DESC_CXL);
138d1257d09SDan Williams return 0;
139d1257d09SDan Williams }
140d1257d09SDan Williams
cxl_region_decode_reset(struct cxl_region * cxlr,int count)1418e1b52c1SDan Williams static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
142176baefbSDan Williams {
143176baefbSDan Williams struct cxl_region_params *p = &cxlr->params;
1448e1b52c1SDan Williams int i;
145d1257d09SDan Williams
146d1257d09SDan Williams /*
1478e1b52c1SDan Williams * Before region teardown attempt to flush, evict any data cached for
1488e1b52c1SDan Williams * this region, or scream loudly about missing arch / platform support
1498e1b52c1SDan Williams * for CXL teardown.
150d1257d09SDan Williams */
1518e1b52c1SDan Williams cxl_region_invalidate_memregion(cxlr);
152176baefbSDan Williams
153176baefbSDan Williams for (i = count - 1; i >= 0; i--) {
154176baefbSDan Williams struct cxl_endpoint_decoder *cxled = p->targets[i];
155176baefbSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
156176baefbSDan Williams struct cxl_port *iter = cxled_to_port(cxled);
157030f8803SDan Williams struct cxl_dev_state *cxlds = cxlmd->cxlds;
158176baefbSDan Williams struct cxl_ep *ep;
159176baefbSDan Williams
160030f8803SDan Williams if (cxlds->rcd)
161030f8803SDan Williams goto endpoint_reset;
162030f8803SDan Williams
163176baefbSDan Williams while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
164176baefbSDan Williams iter = to_cxl_port(iter->dev.parent);
165176baefbSDan Williams
166176baefbSDan Williams for (ep = cxl_ep_load(iter, cxlmd); iter;
167176baefbSDan Williams iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
168176baefbSDan Williams struct cxl_region_ref *cxl_rr;
169176baefbSDan Williams struct cxl_decoder *cxld;
170176baefbSDan Williams
171176baefbSDan Williams cxl_rr = cxl_rr_load(iter, cxlr);
172176baefbSDan Williams cxld = cxl_rr->decoder;
1734fa4302dSFan Ni if (cxld->reset)
1748e1b52c1SDan Williams cxld->reset(cxld);
1752ab47045SDan Williams set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
176176baefbSDan Williams }
177176baefbSDan Williams
178030f8803SDan Williams endpoint_reset:
1798e1b52c1SDan Williams cxled->cxld.reset(&cxled->cxld);
1802ab47045SDan Williams set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
181176baefbSDan Williams }
182176baefbSDan Williams
1832ab47045SDan Williams /* all decoders associated with this region have been torn down */
1842ab47045SDan Williams clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
185176baefbSDan Williams }
186176baefbSDan Williams
commit_decoder(struct cxl_decoder * cxld)187af3ea9abSDan Williams static int commit_decoder(struct cxl_decoder *cxld)
188af3ea9abSDan Williams {
189af3ea9abSDan Williams struct cxl_switch_decoder *cxlsd = NULL;
190af3ea9abSDan Williams
191af3ea9abSDan Williams if (cxld->commit)
192af3ea9abSDan Williams return cxld->commit(cxld);
193af3ea9abSDan Williams
194af3ea9abSDan Williams if (is_switch_decoder(&cxld->dev))
195af3ea9abSDan Williams cxlsd = to_cxl_switch_decoder(&cxld->dev);
196af3ea9abSDan Williams
197af3ea9abSDan Williams if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
198af3ea9abSDan Williams "->commit() is required\n"))
199af3ea9abSDan Williams return -ENXIO;
200af3ea9abSDan Williams return 0;
201af3ea9abSDan Williams }
202af3ea9abSDan Williams
cxl_region_decode_commit(struct cxl_region * cxlr)203176baefbSDan Williams static int cxl_region_decode_commit(struct cxl_region *cxlr)
204176baefbSDan Williams {
205176baefbSDan Williams struct cxl_region_params *p = &cxlr->params;
20669c99613SDan Williams int i, rc = 0;
207176baefbSDan Williams
208176baefbSDan Williams for (i = 0; i < p->nr_targets; i++) {
209176baefbSDan Williams struct cxl_endpoint_decoder *cxled = p->targets[i];
210176baefbSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
211176baefbSDan Williams struct cxl_region_ref *cxl_rr;
212176baefbSDan Williams struct cxl_decoder *cxld;
213176baefbSDan Williams struct cxl_port *iter;
214176baefbSDan Williams struct cxl_ep *ep;
215176baefbSDan Williams
216176baefbSDan Williams /* commit bottom up */
217176baefbSDan Williams for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
218176baefbSDan Williams iter = to_cxl_port(iter->dev.parent)) {
219176baefbSDan Williams cxl_rr = cxl_rr_load(iter, cxlr);
220176baefbSDan Williams cxld = cxl_rr->decoder;
221af3ea9abSDan Williams rc = commit_decoder(cxld);
222176baefbSDan Williams if (rc)
223176baefbSDan Williams break;
224176baefbSDan Williams }
225176baefbSDan Williams
22669c99613SDan Williams if (rc) {
227176baefbSDan Williams /* programming @iter failed, teardown */
228176baefbSDan Williams for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
229176baefbSDan Williams iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
230176baefbSDan Williams cxl_rr = cxl_rr_load(iter, cxlr);
231176baefbSDan Williams cxld = cxl_rr->decoder;
2324fa4302dSFan Ni if (cxld->reset)
233176baefbSDan Williams cxld->reset(cxld);
234176baefbSDan Williams }
235176baefbSDan Williams
236176baefbSDan Williams cxled->cxld.reset(&cxled->cxld);
23769c99613SDan Williams goto err;
23869c99613SDan Williams }
239176baefbSDan Williams }
240176baefbSDan Williams
241176baefbSDan Williams return 0;
242176baefbSDan Williams
24369c99613SDan Williams err:
244176baefbSDan Williams /* undo the targets that were successfully committed */
245176baefbSDan Williams cxl_region_decode_reset(cxlr, i);
246176baefbSDan Williams return rc;
247176baefbSDan Williams }
248176baefbSDan Williams
commit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)249176baefbSDan Williams static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
250176baefbSDan Williams const char *buf, size_t len)
251176baefbSDan Williams {
252176baefbSDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
253176baefbSDan Williams struct cxl_region_params *p = &cxlr->params;
254176baefbSDan Williams bool commit;
255176baefbSDan Williams ssize_t rc;
256176baefbSDan Williams
257176baefbSDan Williams rc = kstrtobool(buf, &commit);
258176baefbSDan Williams if (rc)
259176baefbSDan Williams return rc;
260176baefbSDan Williams
261176baefbSDan Williams rc = down_write_killable(&cxl_region_rwsem);
262176baefbSDan Williams if (rc)
263176baefbSDan Williams return rc;
264176baefbSDan Williams
265176baefbSDan Williams /* Already in the requested state? */
266176baefbSDan Williams if (commit && p->state >= CXL_CONFIG_COMMIT)
267176baefbSDan Williams goto out;
268176baefbSDan Williams if (!commit && p->state < CXL_CONFIG_COMMIT)
269176baefbSDan Williams goto out;
270176baefbSDan Williams
271176baefbSDan Williams /* Not ready to commit? */
272176baefbSDan Williams if (commit && p->state < CXL_CONFIG_ACTIVE) {
273176baefbSDan Williams rc = -ENXIO;
274176baefbSDan Williams goto out;
275176baefbSDan Williams }
276176baefbSDan Williams
277d1257d09SDan Williams /*
278d1257d09SDan Williams * Invalidate caches before region setup to drop any speculative
279d1257d09SDan Williams * consumption of this address space
280d1257d09SDan Williams */
281d1257d09SDan Williams rc = cxl_region_invalidate_memregion(cxlr);
282d1257d09SDan Williams if (rc)
28341f3c9f0SLi Zhijian goto out;
284d1257d09SDan Williams
285adfe1973SDan Williams if (commit) {
286176baefbSDan Williams rc = cxl_region_decode_commit(cxlr);
287adfe1973SDan Williams if (rc == 0)
288adfe1973SDan Williams p->state = CXL_CONFIG_COMMIT;
289adfe1973SDan Williams } else {
290176baefbSDan Williams p->state = CXL_CONFIG_RESET_PENDING;
291176baefbSDan Williams up_write(&cxl_region_rwsem);
292176baefbSDan Williams device_release_driver(&cxlr->dev);
293176baefbSDan Williams down_write(&cxl_region_rwsem);
294176baefbSDan Williams
295176baefbSDan Williams /*
296176baefbSDan Williams * The lock was dropped, so need to revalidate that the reset is
297176baefbSDan Williams * still pending.
298176baefbSDan Williams */
299adfe1973SDan Williams if (p->state == CXL_CONFIG_RESET_PENDING) {
3008e1b52c1SDan Williams cxl_region_decode_reset(cxlr, p->interleave_ways);
301176baefbSDan Williams p->state = CXL_CONFIG_ACTIVE;
302adfe1973SDan Williams }
303adfe1973SDan Williams }
304176baefbSDan Williams
305176baefbSDan Williams out:
306176baefbSDan Williams up_write(&cxl_region_rwsem);
307176baefbSDan Williams
308176baefbSDan Williams if (rc)
309176baefbSDan Williams return rc;
310176baefbSDan Williams return len;
311176baefbSDan Williams }
312176baefbSDan Williams
commit_show(struct device * dev,struct device_attribute * attr,char * buf)313176baefbSDan Williams static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
314176baefbSDan Williams char *buf)
315176baefbSDan Williams {
316176baefbSDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
317176baefbSDan Williams struct cxl_region_params *p = &cxlr->params;
318176baefbSDan Williams ssize_t rc;
319176baefbSDan Williams
320176baefbSDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
321176baefbSDan Williams if (rc)
322176baefbSDan Williams return rc;
323176baefbSDan Williams rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
324176baefbSDan Williams up_read(&cxl_region_rwsem);
325176baefbSDan Williams
326176baefbSDan Williams return rc;
327176baefbSDan Williams }
328176baefbSDan Williams static DEVICE_ATTR_RW(commit);
329176baefbSDan Williams
cxl_region_visible(struct kobject * kobj,struct attribute * a,int n)330dd5ba0ebSBen Widawsky static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
331dd5ba0ebSBen Widawsky int n)
332dd5ba0ebSBen Widawsky {
333dd5ba0ebSBen Widawsky struct device *dev = kobj_to_dev(kobj);
334dd5ba0ebSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
335dd5ba0ebSBen Widawsky
336a8e7d558SDan Williams /*
337a8e7d558SDan Williams * Support tooling that expects to find a 'uuid' attribute for all
338a8e7d558SDan Williams * regions regardless of mode.
339a8e7d558SDan Williams */
340dd5ba0ebSBen Widawsky if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
341a8e7d558SDan Williams return 0444;
342dd5ba0ebSBen Widawsky return a->mode;
343dd5ba0ebSBen Widawsky }
344dd5ba0ebSBen Widawsky
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)34580d10a6cSBen Widawsky static ssize_t interleave_ways_show(struct device *dev,
34680d10a6cSBen Widawsky struct device_attribute *attr, char *buf)
34780d10a6cSBen Widawsky {
34880d10a6cSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
34980d10a6cSBen Widawsky struct cxl_region_params *p = &cxlr->params;
35080d10a6cSBen Widawsky ssize_t rc;
35180d10a6cSBen Widawsky
35280d10a6cSBen Widawsky rc = down_read_interruptible(&cxl_region_rwsem);
35380d10a6cSBen Widawsky if (rc)
35480d10a6cSBen Widawsky return rc;
35580d10a6cSBen Widawsky rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
35680d10a6cSBen Widawsky up_read(&cxl_region_rwsem);
35780d10a6cSBen Widawsky
35880d10a6cSBen Widawsky return rc;
35980d10a6cSBen Widawsky }
36080d10a6cSBen Widawsky
361b9686e8cSDan Williams static const struct attribute_group *get_cxl_region_target_group(void);
362b9686e8cSDan Williams
interleave_ways_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)36380d10a6cSBen Widawsky static ssize_t interleave_ways_store(struct device *dev,
36480d10a6cSBen Widawsky struct device_attribute *attr,
36580d10a6cSBen Widawsky const char *buf, size_t len)
36680d10a6cSBen Widawsky {
36780d10a6cSBen Widawsky struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
36880d10a6cSBen Widawsky struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
36980d10a6cSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
37080d10a6cSBen Widawsky struct cxl_region_params *p = &cxlr->params;
371c7e3548cSDan Carpenter unsigned int val, save;
372c7e3548cSDan Carpenter int rc;
37380d10a6cSBen Widawsky u8 iw;
37480d10a6cSBen Widawsky
375c7e3548cSDan Carpenter rc = kstrtouint(buf, 0, &val);
37680d10a6cSBen Widawsky if (rc)
37780d10a6cSBen Widawsky return rc;
37880d10a6cSBen Widawsky
379c99b2e8cSDave Jiang rc = ways_to_eiw(val, &iw);
38080d10a6cSBen Widawsky if (rc)
38180d10a6cSBen Widawsky return rc;
38280d10a6cSBen Widawsky
38380d10a6cSBen Widawsky /*
384a2b2b301SJim Harris * Even for x3, x6, and x12 interleaves the region interleave must be a
38580d10a6cSBen Widawsky * power of 2 multiple of the host bridge interleave.
38680d10a6cSBen Widawsky */
38780d10a6cSBen Widawsky if (!is_power_of_2(val / cxld->interleave_ways) ||
38880d10a6cSBen Widawsky (val % cxld->interleave_ways)) {
38980d10a6cSBen Widawsky dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
39080d10a6cSBen Widawsky return -EINVAL;
39180d10a6cSBen Widawsky }
39280d10a6cSBen Widawsky
39380d10a6cSBen Widawsky rc = down_write_killable(&cxl_region_rwsem);
39480d10a6cSBen Widawsky if (rc)
39580d10a6cSBen Widawsky return rc;
39680d10a6cSBen Widawsky if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
39780d10a6cSBen Widawsky rc = -EBUSY;
39880d10a6cSBen Widawsky goto out;
39980d10a6cSBen Widawsky }
40080d10a6cSBen Widawsky
401b9686e8cSDan Williams save = p->interleave_ways;
40280d10a6cSBen Widawsky p->interleave_ways = val;
403b9686e8cSDan Williams rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
404b9686e8cSDan Williams if (rc)
405b9686e8cSDan Williams p->interleave_ways = save;
40680d10a6cSBen Widawsky out:
40780d10a6cSBen Widawsky up_write(&cxl_region_rwsem);
40880d10a6cSBen Widawsky if (rc)
40980d10a6cSBen Widawsky return rc;
41080d10a6cSBen Widawsky return len;
41180d10a6cSBen Widawsky }
41280d10a6cSBen Widawsky static DEVICE_ATTR_RW(interleave_ways);
41380d10a6cSBen Widawsky
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)41480d10a6cSBen Widawsky static ssize_t interleave_granularity_show(struct device *dev,
41580d10a6cSBen Widawsky struct device_attribute *attr,
41680d10a6cSBen Widawsky char *buf)
41780d10a6cSBen Widawsky {
41880d10a6cSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
41980d10a6cSBen Widawsky struct cxl_region_params *p = &cxlr->params;
42080d10a6cSBen Widawsky ssize_t rc;
42180d10a6cSBen Widawsky
42280d10a6cSBen Widawsky rc = down_read_interruptible(&cxl_region_rwsem);
42380d10a6cSBen Widawsky if (rc)
42480d10a6cSBen Widawsky return rc;
42580d10a6cSBen Widawsky rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
42680d10a6cSBen Widawsky up_read(&cxl_region_rwsem);
42780d10a6cSBen Widawsky
42880d10a6cSBen Widawsky return rc;
42980d10a6cSBen Widawsky }
43080d10a6cSBen Widawsky
interleave_granularity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)43180d10a6cSBen Widawsky static ssize_t interleave_granularity_store(struct device *dev,
43280d10a6cSBen Widawsky struct device_attribute *attr,
43380d10a6cSBen Widawsky const char *buf, size_t len)
43480d10a6cSBen Widawsky {
43580d10a6cSBen Widawsky struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
43680d10a6cSBen Widawsky struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
43780d10a6cSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
43880d10a6cSBen Widawsky struct cxl_region_params *p = &cxlr->params;
43980d10a6cSBen Widawsky int rc, val;
44080d10a6cSBen Widawsky u16 ig;
44180d10a6cSBen Widawsky
44280d10a6cSBen Widawsky rc = kstrtoint(buf, 0, &val);
44380d10a6cSBen Widawsky if (rc)
44480d10a6cSBen Widawsky return rc;
44580d10a6cSBen Widawsky
44683351ddbSDave Jiang rc = granularity_to_eig(val, &ig);
44780d10a6cSBen Widawsky if (rc)
44880d10a6cSBen Widawsky return rc;
44980d10a6cSBen Widawsky
45080d10a6cSBen Widawsky /*
4514d8e4ea5SDan Williams * When the host-bridge is interleaved, disallow region granularity !=
4524d8e4ea5SDan Williams * root granularity. Regions with a granularity less than the root
4534d8e4ea5SDan Williams * interleave result in needing multiple endpoints to support a single
454cbbd05d0SRandy Dunlap * slot in the interleave (possible to support in the future). Regions
4554d8e4ea5SDan Williams * with a granularity greater than the root interleave result in invalid
4564d8e4ea5SDan Williams * DPA translations (invalid to support).
45780d10a6cSBen Widawsky */
4584d8e4ea5SDan Williams if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
45980d10a6cSBen Widawsky return -EINVAL;
46080d10a6cSBen Widawsky
46180d10a6cSBen Widawsky rc = down_write_killable(&cxl_region_rwsem);
46280d10a6cSBen Widawsky if (rc)
46380d10a6cSBen Widawsky return rc;
46480d10a6cSBen Widawsky if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
46580d10a6cSBen Widawsky rc = -EBUSY;
46680d10a6cSBen Widawsky goto out;
46780d10a6cSBen Widawsky }
46880d10a6cSBen Widawsky
46980d10a6cSBen Widawsky p->interleave_granularity = val;
47080d10a6cSBen Widawsky out:
47180d10a6cSBen Widawsky up_write(&cxl_region_rwsem);
47280d10a6cSBen Widawsky if (rc)
47380d10a6cSBen Widawsky return rc;
47480d10a6cSBen Widawsky return len;
47580d10a6cSBen Widawsky }
47680d10a6cSBen Widawsky static DEVICE_ATTR_RW(interleave_granularity);
47780d10a6cSBen Widawsky
resource_show(struct device * dev,struct device_attribute * attr,char * buf)47823a22cd1SDan Williams static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
47923a22cd1SDan Williams char *buf)
48023a22cd1SDan Williams {
48123a22cd1SDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
48223a22cd1SDan Williams struct cxl_region_params *p = &cxlr->params;
48323a22cd1SDan Williams u64 resource = -1ULL;
48423a22cd1SDan Williams ssize_t rc;
48523a22cd1SDan Williams
48623a22cd1SDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
48723a22cd1SDan Williams if (rc)
48823a22cd1SDan Williams return rc;
48923a22cd1SDan Williams if (p->res)
49023a22cd1SDan Williams resource = p->res->start;
49123a22cd1SDan Williams rc = sysfs_emit(buf, "%#llx\n", resource);
49223a22cd1SDan Williams up_read(&cxl_region_rwsem);
49323a22cd1SDan Williams
49423a22cd1SDan Williams return rc;
49523a22cd1SDan Williams }
49623a22cd1SDan Williams static DEVICE_ATTR_RO(resource);
49723a22cd1SDan Williams
mode_show(struct device * dev,struct device_attribute * attr,char * buf)4987d505f98SDan Williams static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
4997d505f98SDan Williams char *buf)
5007d505f98SDan Williams {
5017d505f98SDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
5027d505f98SDan Williams
5037d505f98SDan Williams return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
5047d505f98SDan Williams }
5057d505f98SDan Williams static DEVICE_ATTR_RO(mode);
5067d505f98SDan Williams
alloc_hpa(struct cxl_region * cxlr,resource_size_t size)50723a22cd1SDan Williams static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
50823a22cd1SDan Williams {
50923a22cd1SDan Williams struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
51023a22cd1SDan Williams struct cxl_region_params *p = &cxlr->params;
51123a22cd1SDan Williams struct resource *res;
51240cb184eSQuanquan Cao u64 remainder = 0;
51323a22cd1SDan Williams
51423a22cd1SDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
51523a22cd1SDan Williams
51623a22cd1SDan Williams /* Nothing to do... */
51788ab1ddeSDan Carpenter if (p->res && resource_size(p->res) == size)
51823a22cd1SDan Williams return 0;
51923a22cd1SDan Williams
52023a22cd1SDan Williams /* To change size the old size must be freed first */
52123a22cd1SDan Williams if (p->res)
52223a22cd1SDan Williams return -EBUSY;
52323a22cd1SDan Williams
52423a22cd1SDan Williams if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
52523a22cd1SDan Williams return -EBUSY;
52623a22cd1SDan Williams
52723a22cd1SDan Williams /* ways, granularity and uuid (if PMEM) need to be set before HPA */
52823a22cd1SDan Williams if (!p->interleave_ways || !p->interleave_granularity ||
52923a22cd1SDan Williams (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
53023a22cd1SDan Williams return -ENXIO;
53123a22cd1SDan Williams
53240cb184eSQuanquan Cao div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
53323a22cd1SDan Williams if (remainder)
53423a22cd1SDan Williams return -EINVAL;
53523a22cd1SDan Williams
53623a22cd1SDan Williams res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
53723a22cd1SDan Williams dev_name(&cxlr->dev));
53823a22cd1SDan Williams if (IS_ERR(res)) {
53923a22cd1SDan Williams dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
54023a22cd1SDan Williams PTR_ERR(res));
54123a22cd1SDan Williams return PTR_ERR(res);
54223a22cd1SDan Williams }
54323a22cd1SDan Williams
54423a22cd1SDan Williams p->res = res;
54523a22cd1SDan Williams p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
54623a22cd1SDan Williams
54723a22cd1SDan Williams return 0;
54823a22cd1SDan Williams }
54923a22cd1SDan Williams
cxl_region_iomem_release(struct cxl_region * cxlr)55023a22cd1SDan Williams static void cxl_region_iomem_release(struct cxl_region *cxlr)
55123a22cd1SDan Williams {
55223a22cd1SDan Williams struct cxl_region_params *p = &cxlr->params;
55323a22cd1SDan Williams
55423a22cd1SDan Williams if (device_is_registered(&cxlr->dev))
55523a22cd1SDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
55623a22cd1SDan Williams if (p->res) {
557a32320b7SDan Williams /*
558a32320b7SDan Williams * Autodiscovered regions may not have been able to insert their
559a32320b7SDan Williams * resource.
560a32320b7SDan Williams */
561a32320b7SDan Williams if (p->res->parent)
56223a22cd1SDan Williams remove_resource(p->res);
56323a22cd1SDan Williams kfree(p->res);
56423a22cd1SDan Williams p->res = NULL;
56523a22cd1SDan Williams }
56623a22cd1SDan Williams }
56723a22cd1SDan Williams
free_hpa(struct cxl_region * cxlr)56823a22cd1SDan Williams static int free_hpa(struct cxl_region *cxlr)
56923a22cd1SDan Williams {
57023a22cd1SDan Williams struct cxl_region_params *p = &cxlr->params;
57123a22cd1SDan Williams
57223a22cd1SDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
57323a22cd1SDan Williams
57423a22cd1SDan Williams if (!p->res)
57523a22cd1SDan Williams return 0;
57623a22cd1SDan Williams
57723a22cd1SDan Williams if (p->state >= CXL_CONFIG_ACTIVE)
57823a22cd1SDan Williams return -EBUSY;
57923a22cd1SDan Williams
58023a22cd1SDan Williams cxl_region_iomem_release(cxlr);
58123a22cd1SDan Williams p->state = CXL_CONFIG_IDLE;
58223a22cd1SDan Williams return 0;
58323a22cd1SDan Williams }
58423a22cd1SDan Williams
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)58523a22cd1SDan Williams static ssize_t size_store(struct device *dev, struct device_attribute *attr,
58623a22cd1SDan Williams const char *buf, size_t len)
58723a22cd1SDan Williams {
58823a22cd1SDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
58923a22cd1SDan Williams u64 val;
59023a22cd1SDan Williams int rc;
59123a22cd1SDan Williams
59223a22cd1SDan Williams rc = kstrtou64(buf, 0, &val);
59323a22cd1SDan Williams if (rc)
59423a22cd1SDan Williams return rc;
59523a22cd1SDan Williams
59623a22cd1SDan Williams rc = down_write_killable(&cxl_region_rwsem);
59723a22cd1SDan Williams if (rc)
59823a22cd1SDan Williams return rc;
59923a22cd1SDan Williams
60023a22cd1SDan Williams if (val)
60123a22cd1SDan Williams rc = alloc_hpa(cxlr, val);
60223a22cd1SDan Williams else
60323a22cd1SDan Williams rc = free_hpa(cxlr);
60423a22cd1SDan Williams up_write(&cxl_region_rwsem);
60523a22cd1SDan Williams
60623a22cd1SDan Williams if (rc)
60723a22cd1SDan Williams return rc;
60823a22cd1SDan Williams
60923a22cd1SDan Williams return len;
61023a22cd1SDan Williams }
61123a22cd1SDan Williams
size_show(struct device * dev,struct device_attribute * attr,char * buf)61223a22cd1SDan Williams static ssize_t size_show(struct device *dev, struct device_attribute *attr,
61323a22cd1SDan Williams char *buf)
61423a22cd1SDan Williams {
61523a22cd1SDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
61623a22cd1SDan Williams struct cxl_region_params *p = &cxlr->params;
61723a22cd1SDan Williams u64 size = 0;
61823a22cd1SDan Williams ssize_t rc;
61923a22cd1SDan Williams
62023a22cd1SDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
62123a22cd1SDan Williams if (rc)
62223a22cd1SDan Williams return rc;
62323a22cd1SDan Williams if (p->res)
62423a22cd1SDan Williams size = resource_size(p->res);
62523a22cd1SDan Williams rc = sysfs_emit(buf, "%#llx\n", size);
62623a22cd1SDan Williams up_read(&cxl_region_rwsem);
62723a22cd1SDan Williams
62823a22cd1SDan Williams return rc;
62923a22cd1SDan Williams }
63023a22cd1SDan Williams static DEVICE_ATTR_RW(size);
63123a22cd1SDan Williams
632dd5ba0ebSBen Widawsky static struct attribute *cxl_region_attrs[] = {
633dd5ba0ebSBen Widawsky &dev_attr_uuid.attr,
634176baefbSDan Williams &dev_attr_commit.attr,
63580d10a6cSBen Widawsky &dev_attr_interleave_ways.attr,
63680d10a6cSBen Widawsky &dev_attr_interleave_granularity.attr,
63723a22cd1SDan Williams &dev_attr_resource.attr,
63823a22cd1SDan Williams &dev_attr_size.attr,
6397d505f98SDan Williams &dev_attr_mode.attr,
640dd5ba0ebSBen Widawsky NULL,
641dd5ba0ebSBen Widawsky };
642dd5ba0ebSBen Widawsky
643dd5ba0ebSBen Widawsky static const struct attribute_group cxl_region_group = {
644dd5ba0ebSBen Widawsky .attrs = cxl_region_attrs,
645dd5ba0ebSBen Widawsky .is_visible = cxl_region_visible,
646dd5ba0ebSBen Widawsky };
647dd5ba0ebSBen Widawsky
show_targetN(struct cxl_region * cxlr,char * buf,int pos)648b9686e8cSDan Williams static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
649b9686e8cSDan Williams {
650b9686e8cSDan Williams struct cxl_region_params *p = &cxlr->params;
651b9686e8cSDan Williams struct cxl_endpoint_decoder *cxled;
652b9686e8cSDan Williams int rc;
653b9686e8cSDan Williams
654b9686e8cSDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
655b9686e8cSDan Williams if (rc)
656b9686e8cSDan Williams return rc;
657b9686e8cSDan Williams
658b9686e8cSDan Williams if (pos >= p->interleave_ways) {
659b9686e8cSDan Williams dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
660b9686e8cSDan Williams p->interleave_ways);
661b9686e8cSDan Williams rc = -ENXIO;
662b9686e8cSDan Williams goto out;
663b9686e8cSDan Williams }
664b9686e8cSDan Williams
665b9686e8cSDan Williams cxled = p->targets[pos];
666b9686e8cSDan Williams if (!cxled)
667b9686e8cSDan Williams rc = sysfs_emit(buf, "\n");
668b9686e8cSDan Williams else
669b9686e8cSDan Williams rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
670b9686e8cSDan Williams out:
671b9686e8cSDan Williams up_read(&cxl_region_rwsem);
672b9686e8cSDan Williams
673b9686e8cSDan Williams return rc;
674b9686e8cSDan Williams }
675b9686e8cSDan Williams
match_free_decoder(struct device * dev,void * data)676384e624bSDan Williams static int match_free_decoder(struct device *dev, void *data)
677384e624bSDan Williams {
678384e624bSDan Williams struct cxl_decoder *cxld;
679384e624bSDan Williams int *id = data;
680384e624bSDan Williams
681384e624bSDan Williams if (!is_switch_decoder(dev))
682384e624bSDan Williams return 0;
683384e624bSDan Williams
684384e624bSDan Williams cxld = to_cxl_decoder(dev);
685384e624bSDan Williams
686384e624bSDan Williams /* enforce ordered allocation */
687384e624bSDan Williams if (cxld->id != *id)
688384e624bSDan Williams return 0;
689384e624bSDan Williams
690384e624bSDan Williams if (!cxld->region)
691384e624bSDan Williams return 1;
692384e624bSDan Williams
693384e624bSDan Williams (*id)++;
694384e624bSDan Williams
695384e624bSDan Williams return 0;
696384e624bSDan Williams }
697384e624bSDan Williams
match_auto_decoder(struct device * dev,void * data)6989e4edf1aSAlison Schofield static int match_auto_decoder(struct device *dev, void *data)
6999e4edf1aSAlison Schofield {
7009e4edf1aSAlison Schofield struct cxl_region_params *p = data;
7019e4edf1aSAlison Schofield struct cxl_decoder *cxld;
7029e4edf1aSAlison Schofield struct range *r;
7039e4edf1aSAlison Schofield
7049e4edf1aSAlison Schofield if (!is_switch_decoder(dev))
7059e4edf1aSAlison Schofield return 0;
7069e4edf1aSAlison Schofield
7079e4edf1aSAlison Schofield cxld = to_cxl_decoder(dev);
7089e4edf1aSAlison Schofield r = &cxld->hpa_range;
7099e4edf1aSAlison Schofield
7109e4edf1aSAlison Schofield if (p->res && p->res->start == r->start && p->res->end == r->end)
7119e4edf1aSAlison Schofield return 1;
7129e4edf1aSAlison Schofield
7139e4edf1aSAlison Schofield return 0;
7149e4edf1aSAlison Schofield }
7159e4edf1aSAlison Schofield
7169f57eecfSAlison Schofield static struct cxl_decoder *
cxl_region_find_decoder(struct cxl_port * port,struct cxl_endpoint_decoder * cxled,struct cxl_region * cxlr)7179f57eecfSAlison Schofield cxl_region_find_decoder(struct cxl_port *port,
7189f57eecfSAlison Schofield struct cxl_endpoint_decoder *cxled,
719384e624bSDan Williams struct cxl_region *cxlr)
720384e624bSDan Williams {
721384e624bSDan Williams struct device *dev;
722384e624bSDan Williams int id = 0;
723384e624bSDan Williams
7249f57eecfSAlison Schofield if (port == cxled_to_port(cxled))
7259f57eecfSAlison Schofield return &cxled->cxld;
7269f57eecfSAlison Schofield
7279e4edf1aSAlison Schofield if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
7289e4edf1aSAlison Schofield dev = device_find_child(&port->dev, &cxlr->params,
7299e4edf1aSAlison Schofield match_auto_decoder);
7309e4edf1aSAlison Schofield else
731384e624bSDan Williams dev = device_find_child(&port->dev, &id, match_free_decoder);
732384e624bSDan Williams if (!dev)
733384e624bSDan Williams return NULL;
734b9686e8cSDan Williams /*
735384e624bSDan Williams * This decoder is pinned registered as long as the endpoint decoder is
736384e624bSDan Williams * registered, and endpoint decoder unregistration holds the
737384e624bSDan Williams * cxl_region_rwsem over unregister events, so no need to hold on to
738384e624bSDan Williams * this extra reference.
739b9686e8cSDan Williams */
740384e624bSDan Williams put_device(dev);
741384e624bSDan Williams return to_cxl_decoder(dev);
742384e624bSDan Williams }
743384e624bSDan Williams
auto_order_ok(struct cxl_port * port,struct cxl_region * cxlr_iter,struct cxl_decoder * cxld)7440468ac56SAlison Schofield static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
7450468ac56SAlison Schofield struct cxl_decoder *cxld)
7460468ac56SAlison Schofield {
7470468ac56SAlison Schofield struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
7480468ac56SAlison Schofield struct cxl_decoder *cxld_iter = rr->decoder;
7490468ac56SAlison Schofield
7500468ac56SAlison Schofield /*
7510468ac56SAlison Schofield * Allow the out of order assembly of auto-discovered regions.
7520468ac56SAlison Schofield * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
7530468ac56SAlison Schofield * in HPA order. Confirm that the decoder with the lesser HPA
7540468ac56SAlison Schofield * starting address has the lesser id.
7550468ac56SAlison Schofield */
7560468ac56SAlison Schofield dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
7570468ac56SAlison Schofield dev_name(&cxld->dev), cxld->id,
7580468ac56SAlison Schofield dev_name(&cxld_iter->dev), cxld_iter->id);
7590468ac56SAlison Schofield
7600468ac56SAlison Schofield if (cxld_iter->id > cxld->id)
7610468ac56SAlison Schofield return true;
7620468ac56SAlison Schofield
7630468ac56SAlison Schofield return false;
7640468ac56SAlison Schofield }
7650468ac56SAlison Schofield
7660468ac56SAlison Schofield static struct cxl_region_ref *
alloc_region_ref(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)7670468ac56SAlison Schofield alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
7680468ac56SAlison Schofield struct cxl_endpoint_decoder *cxled)
769384e624bSDan Williams {
770e29a8995SDan Williams struct cxl_region_params *p = &cxlr->params;
771e29a8995SDan Williams struct cxl_region_ref *cxl_rr, *iter;
772e29a8995SDan Williams unsigned long index;
773384e624bSDan Williams int rc;
774384e624bSDan Williams
775e29a8995SDan Williams xa_for_each(&port->regions, index, iter) {
776e29a8995SDan Williams struct cxl_region_params *ip = &iter->region->params;
777e29a8995SDan Williams
7780468ac56SAlison Schofield if (!ip->res || ip->res->start < p->res->start)
779a90accb3SDan Williams continue;
780a90accb3SDan Williams
7810468ac56SAlison Schofield if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
7820468ac56SAlison Schofield struct cxl_decoder *cxld;
7830468ac56SAlison Schofield
7840468ac56SAlison Schofield cxld = cxl_region_find_decoder(port, cxled, cxlr);
7850468ac56SAlison Schofield if (auto_order_ok(port, iter->region, cxld))
7860468ac56SAlison Schofield continue;
7870468ac56SAlison Schofield }
7880468ac56SAlison Schofield dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
789e29a8995SDan Williams dev_name(&port->dev),
790e29a8995SDan Williams dev_name(&iter->region->dev), ip->res, p->res);
7910468ac56SAlison Schofield
792e29a8995SDan Williams return ERR_PTR(-EBUSY);
793e29a8995SDan Williams }
794e29a8995SDan Williams
795384e624bSDan Williams cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
796384e624bSDan Williams if (!cxl_rr)
797e29a8995SDan Williams return ERR_PTR(-ENOMEM);
798384e624bSDan Williams cxl_rr->port = port;
799384e624bSDan Williams cxl_rr->region = cxlr;
80027b3f8d1SDan Williams cxl_rr->nr_targets = 1;
801384e624bSDan Williams xa_init(&cxl_rr->endpoints);
802384e624bSDan Williams
803384e624bSDan Williams rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
804384e624bSDan Williams if (rc) {
805384e624bSDan Williams dev_dbg(&cxlr->dev,
806384e624bSDan Williams "%s: failed to track region reference: %d\n",
807384e624bSDan Williams dev_name(&port->dev), rc);
808384e624bSDan Williams kfree(cxl_rr);
809e29a8995SDan Williams return ERR_PTR(rc);
810384e624bSDan Williams }
811384e624bSDan Williams
812384e624bSDan Williams return cxl_rr;
813384e624bSDan Williams }
814384e624bSDan Williams
cxl_rr_free_decoder(struct cxl_region_ref * cxl_rr)81571ee71d7SVishal Verma static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
816384e624bSDan Williams {
817384e624bSDan Williams struct cxl_region *cxlr = cxl_rr->region;
818384e624bSDan Williams struct cxl_decoder *cxld = cxl_rr->decoder;
819384e624bSDan Williams
82071ee71d7SVishal Verma if (!cxld)
82171ee71d7SVishal Verma return;
82271ee71d7SVishal Verma
823384e624bSDan Williams dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
824384e624bSDan Williams if (cxld->region == cxlr) {
825384e624bSDan Williams cxld->region = NULL;
826384e624bSDan Williams put_device(&cxlr->dev);
827384e624bSDan Williams }
82871ee71d7SVishal Verma }
829384e624bSDan Williams
free_region_ref(struct cxl_region_ref * cxl_rr)83071ee71d7SVishal Verma static void free_region_ref(struct cxl_region_ref *cxl_rr)
83171ee71d7SVishal Verma {
83271ee71d7SVishal Verma struct cxl_port *port = cxl_rr->port;
83371ee71d7SVishal Verma struct cxl_region *cxlr = cxl_rr->region;
83471ee71d7SVishal Verma
83571ee71d7SVishal Verma cxl_rr_free_decoder(cxl_rr);
836384e624bSDan Williams xa_erase(&port->regions, (unsigned long)cxlr);
837384e624bSDan Williams xa_destroy(&cxl_rr->endpoints);
838384e624bSDan Williams kfree(cxl_rr);
839384e624bSDan Williams }
840384e624bSDan Williams
cxl_rr_ep_add(struct cxl_region_ref * cxl_rr,struct cxl_endpoint_decoder * cxled)841384e624bSDan Williams static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
842384e624bSDan Williams struct cxl_endpoint_decoder *cxled)
843384e624bSDan Williams {
844384e624bSDan Williams int rc;
845384e624bSDan Williams struct cxl_port *port = cxl_rr->port;
846384e624bSDan Williams struct cxl_region *cxlr = cxl_rr->region;
847384e624bSDan Williams struct cxl_decoder *cxld = cxl_rr->decoder;
848384e624bSDan Williams struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
849384e624bSDan Williams
85027b3f8d1SDan Williams if (ep) {
851384e624bSDan Williams rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
852384e624bSDan Williams GFP_KERNEL);
853384e624bSDan Williams if (rc)
854384e624bSDan Williams return rc;
85527b3f8d1SDan Williams }
856384e624bSDan Williams cxl_rr->nr_eps++;
857384e624bSDan Williams
858384e624bSDan Williams if (!cxld->region) {
859384e624bSDan Williams cxld->region = cxlr;
860384e624bSDan Williams get_device(&cxlr->dev);
861384e624bSDan Williams }
862384e624bSDan Williams
863384e624bSDan Williams return 0;
864384e624bSDan Williams }
865384e624bSDan Williams
cxl_rr_alloc_decoder(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct cxl_region_ref * cxl_rr)86671ee71d7SVishal Verma static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
86771ee71d7SVishal Verma struct cxl_endpoint_decoder *cxled,
86871ee71d7SVishal Verma struct cxl_region_ref *cxl_rr)
86971ee71d7SVishal Verma {
87071ee71d7SVishal Verma struct cxl_decoder *cxld;
87171ee71d7SVishal Verma
8729f57eecfSAlison Schofield cxld = cxl_region_find_decoder(port, cxled, cxlr);
87371ee71d7SVishal Verma if (!cxld) {
87471ee71d7SVishal Verma dev_dbg(&cxlr->dev, "%s: no decoder available\n",
87571ee71d7SVishal Verma dev_name(&port->dev));
87671ee71d7SVishal Verma return -EBUSY;
87771ee71d7SVishal Verma }
87871ee71d7SVishal Verma
87971ee71d7SVishal Verma if (cxld->region) {
88071ee71d7SVishal Verma dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
88171ee71d7SVishal Verma dev_name(&port->dev), dev_name(&cxld->dev),
88271ee71d7SVishal Verma dev_name(&cxld->region->dev));
88371ee71d7SVishal Verma return -EBUSY;
88471ee71d7SVishal Verma }
88571ee71d7SVishal Verma
8868c897b36SDan Williams /*
8878c897b36SDan Williams * Endpoints should already match the region type, but backstop that
8888c897b36SDan Williams * assumption with an assertion. Switch-decoders change mapping-type
8898c897b36SDan Williams * based on what is mapped when they are assigned to a region.
8908c897b36SDan Williams */
8918c897b36SDan Williams dev_WARN_ONCE(&cxlr->dev,
8928c897b36SDan Williams port == cxled_to_port(cxled) &&
8938c897b36SDan Williams cxld->target_type != cxlr->type,
8948c897b36SDan Williams "%s:%s mismatch decoder type %d -> %d\n",
8958c897b36SDan Williams dev_name(&cxled_to_memdev(cxled)->dev),
8968c897b36SDan Williams dev_name(&cxld->dev), cxld->target_type, cxlr->type);
8978c897b36SDan Williams cxld->target_type = cxlr->type;
89871ee71d7SVishal Verma cxl_rr->decoder = cxld;
89971ee71d7SVishal Verma return 0;
90071ee71d7SVishal Verma }
90171ee71d7SVishal Verma
902384e624bSDan Williams /**
903384e624bSDan Williams * cxl_port_attach_region() - track a region's interest in a port by endpoint
904384e624bSDan Williams * @port: port to add a new region reference 'struct cxl_region_ref'
905384e624bSDan Williams * @cxlr: region to attach to @port
906384e624bSDan Williams * @cxled: endpoint decoder used to create or further pin a region reference
907384e624bSDan Williams * @pos: interleave position of @cxled in @cxlr
908384e624bSDan Williams *
909384e624bSDan Williams * The attach event is an opportunity to validate CXL decode setup
910384e624bSDan Williams * constraints and record metadata needed for programming HDM decoders,
911384e624bSDan Williams * in particular decoder target lists.
912384e624bSDan Williams *
913384e624bSDan Williams * The steps are:
914f13da0d9SBagas Sanjaya *
915384e624bSDan Williams * - validate that there are no other regions with a higher HPA already
916384e624bSDan Williams * associated with @port
917384e624bSDan Williams * - establish a region reference if one is not already present
918f13da0d9SBagas Sanjaya *
919384e624bSDan Williams * - additionally allocate a decoder instance that will host @cxlr on
920384e624bSDan Williams * @port
921f13da0d9SBagas Sanjaya *
922384e624bSDan Williams * - pin the region reference by the endpoint
923384e624bSDan Williams * - account for how many entries in @port's target list are needed to
924384e624bSDan Williams * cover all of the added endpoints.
925384e624bSDan Williams */
cxl_port_attach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)926384e624bSDan Williams static int cxl_port_attach_region(struct cxl_port *port,
927384e624bSDan Williams struct cxl_region *cxlr,
928384e624bSDan Williams struct cxl_endpoint_decoder *cxled, int pos)
929384e624bSDan Williams {
930384e624bSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
931384e624bSDan Williams struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
932e29a8995SDan Williams struct cxl_region_ref *cxl_rr;
933e29a8995SDan Williams bool nr_targets_inc = false;
934e29a8995SDan Williams struct cxl_decoder *cxld;
935384e624bSDan Williams unsigned long index;
936384e624bSDan Williams int rc = -EBUSY;
937384e624bSDan Williams
938384e624bSDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
939384e624bSDan Williams
940e29a8995SDan Williams cxl_rr = cxl_rr_load(port, cxlr);
941384e624bSDan Williams if (cxl_rr) {
942384e624bSDan Williams struct cxl_ep *ep_iter;
943384e624bSDan Williams int found = 0;
944384e624bSDan Williams
945e29a8995SDan Williams /*
946e29a8995SDan Williams * Walk the existing endpoints that have been attached to
947e29a8995SDan Williams * @cxlr at @port and see if they share the same 'next' port
948e29a8995SDan Williams * in the downstream direction. I.e. endpoints that share common
949e29a8995SDan Williams * upstream switch.
950e29a8995SDan Williams */
951384e624bSDan Williams xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
952384e624bSDan Williams if (ep_iter == ep)
953384e624bSDan Williams continue;
954384e624bSDan Williams if (ep_iter->next == ep->next) {
955384e624bSDan Williams found++;
956384e624bSDan Williams break;
957384e624bSDan Williams }
958384e624bSDan Williams }
959384e624bSDan Williams
960384e624bSDan Williams /*
961e29a8995SDan Williams * New target port, or @port is an endpoint port that always
962e29a8995SDan Williams * accounts its own local decode as a target.
963384e624bSDan Williams */
964e29a8995SDan Williams if (!found || !ep->next) {
965384e624bSDan Williams cxl_rr->nr_targets++;
966e29a8995SDan Williams nr_targets_inc = true;
967e29a8995SDan Williams }
968384e624bSDan Williams } else {
9690468ac56SAlison Schofield cxl_rr = alloc_region_ref(port, cxlr, cxled);
970e29a8995SDan Williams if (IS_ERR(cxl_rr)) {
971384e624bSDan Williams dev_dbg(&cxlr->dev,
972384e624bSDan Williams "%s: failed to allocate region reference\n",
973384e624bSDan Williams dev_name(&port->dev));
974e29a8995SDan Williams return PTR_ERR(cxl_rr);
975384e624bSDan Williams }
976e29a8995SDan Williams nr_targets_inc = true;
977384e624bSDan Williams
97871ee71d7SVishal Verma rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
97971ee71d7SVishal Verma if (rc)
980384e624bSDan Williams goto out_erase;
981384e624bSDan Williams }
98271ee71d7SVishal Verma cxld = cxl_rr->decoder;
983384e624bSDan Williams
984843836bfSYao Xingtao /*
985843836bfSYao Xingtao * the number of targets should not exceed the target_count
986843836bfSYao Xingtao * of the decoder
987843836bfSYao Xingtao */
988843836bfSYao Xingtao if (is_switch_decoder(&cxld->dev)) {
989843836bfSYao Xingtao struct cxl_switch_decoder *cxlsd;
990843836bfSYao Xingtao
991843836bfSYao Xingtao cxlsd = to_cxl_switch_decoder(&cxld->dev);
992843836bfSYao Xingtao if (cxl_rr->nr_targets > cxlsd->nr_targets) {
993843836bfSYao Xingtao dev_dbg(&cxlr->dev,
994843836bfSYao Xingtao "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
995843836bfSYao Xingtao dev_name(port->uport_dev), dev_name(&port->dev),
996843836bfSYao Xingtao dev_name(&cxld->dev), dev_name(&cxlmd->dev),
997843836bfSYao Xingtao dev_name(&cxled->cxld.dev), pos,
998843836bfSYao Xingtao cxlsd->nr_targets);
999843836bfSYao Xingtao rc = -ENXIO;
1000843836bfSYao Xingtao goto out_erase;
1001843836bfSYao Xingtao }
1002843836bfSYao Xingtao }
1003843836bfSYao Xingtao
1004384e624bSDan Williams rc = cxl_rr_ep_add(cxl_rr, cxled);
1005384e624bSDan Williams if (rc) {
1006384e624bSDan Williams dev_dbg(&cxlr->dev,
1007384e624bSDan Williams "%s: failed to track endpoint %s:%s reference\n",
1008384e624bSDan Williams dev_name(&port->dev), dev_name(&cxlmd->dev),
1009384e624bSDan Williams dev_name(&cxld->dev));
1010384e624bSDan Williams goto out_erase;
1011384e624bSDan Williams }
1012384e624bSDan Williams
101327b3f8d1SDan Williams dev_dbg(&cxlr->dev,
101427b3f8d1SDan Williams "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
10157481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
101627b3f8d1SDan Williams dev_name(&cxld->dev), dev_name(&cxlmd->dev),
101727b3f8d1SDan Williams dev_name(&cxled->cxld.dev), pos,
10187481653dSDan Williams ep ? ep->next ? dev_name(ep->next->uport_dev) :
101927b3f8d1SDan Williams dev_name(&cxlmd->dev) :
102027b3f8d1SDan Williams "none",
102127b3f8d1SDan Williams cxl_rr->nr_eps, cxl_rr->nr_targets);
102227b3f8d1SDan Williams
1023384e624bSDan Williams return 0;
1024384e624bSDan Williams out_erase:
1025e29a8995SDan Williams if (nr_targets_inc)
1026e29a8995SDan Williams cxl_rr->nr_targets--;
1027384e624bSDan Williams if (cxl_rr->nr_eps == 0)
1028384e624bSDan Williams free_region_ref(cxl_rr);
1029384e624bSDan Williams return rc;
1030384e624bSDan Williams }
1031384e624bSDan Williams
cxl_port_detach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1032384e624bSDan Williams static void cxl_port_detach_region(struct cxl_port *port,
1033384e624bSDan Williams struct cxl_region *cxlr,
1034384e624bSDan Williams struct cxl_endpoint_decoder *cxled)
1035384e624bSDan Williams {
1036384e624bSDan Williams struct cxl_region_ref *cxl_rr;
103727b3f8d1SDan Williams struct cxl_ep *ep = NULL;
1038384e624bSDan Williams
1039384e624bSDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
1040384e624bSDan Williams
1041384e624bSDan Williams cxl_rr = cxl_rr_load(port, cxlr);
1042384e624bSDan Williams if (!cxl_rr)
1043384e624bSDan Williams return;
1044384e624bSDan Williams
104527b3f8d1SDan Williams /*
104627b3f8d1SDan Williams * Endpoint ports do not carry cxl_ep references, and they
104727b3f8d1SDan Williams * never target more than one endpoint by definition
104827b3f8d1SDan Williams */
104927b3f8d1SDan Williams if (cxl_rr->decoder == &cxled->cxld)
105027b3f8d1SDan Williams cxl_rr->nr_eps--;
105127b3f8d1SDan Williams else
1052384e624bSDan Williams ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1053384e624bSDan Williams if (ep) {
1054384e624bSDan Williams struct cxl_ep *ep_iter;
1055384e624bSDan Williams unsigned long index;
1056384e624bSDan Williams int found = 0;
1057384e624bSDan Williams
1058384e624bSDan Williams cxl_rr->nr_eps--;
1059384e624bSDan Williams xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1060384e624bSDan Williams if (ep_iter->next == ep->next) {
1061384e624bSDan Williams found++;
1062384e624bSDan Williams break;
1063384e624bSDan Williams }
1064384e624bSDan Williams }
1065384e624bSDan Williams if (!found)
1066384e624bSDan Williams cxl_rr->nr_targets--;
1067384e624bSDan Williams }
1068384e624bSDan Williams
1069384e624bSDan Williams if (cxl_rr->nr_eps == 0)
1070384e624bSDan Williams free_region_ref(cxl_rr);
1071384e624bSDan Williams }
1072384e624bSDan Williams
check_last_peer(struct cxl_endpoint_decoder * cxled,struct cxl_ep * ep,struct cxl_region_ref * cxl_rr,int distance)107327b3f8d1SDan Williams static int check_last_peer(struct cxl_endpoint_decoder *cxled,
107427b3f8d1SDan Williams struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
107527b3f8d1SDan Williams int distance)
107627b3f8d1SDan Williams {
107727b3f8d1SDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
107827b3f8d1SDan Williams struct cxl_region *cxlr = cxl_rr->region;
107927b3f8d1SDan Williams struct cxl_region_params *p = &cxlr->params;
108027b3f8d1SDan Williams struct cxl_endpoint_decoder *cxled_peer;
108127b3f8d1SDan Williams struct cxl_port *port = cxl_rr->port;
108227b3f8d1SDan Williams struct cxl_memdev *cxlmd_peer;
108327b3f8d1SDan Williams struct cxl_ep *ep_peer;
108427b3f8d1SDan Williams int pos = cxled->pos;
108527b3f8d1SDan Williams
108627b3f8d1SDan Williams /*
108727b3f8d1SDan Williams * If this position wants to share a dport with the last endpoint mapped
108827b3f8d1SDan Williams * then that endpoint, at index 'position - distance', must also be
108927b3f8d1SDan Williams * mapped by this dport.
109027b3f8d1SDan Williams */
109127b3f8d1SDan Williams if (pos < distance) {
109227b3f8d1SDan Williams dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
10937481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
109427b3f8d1SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
109527b3f8d1SDan Williams return -ENXIO;
109627b3f8d1SDan Williams }
109727b3f8d1SDan Williams cxled_peer = p->targets[pos - distance];
109827b3f8d1SDan Williams cxlmd_peer = cxled_to_memdev(cxled_peer);
109927b3f8d1SDan Williams ep_peer = cxl_ep_load(port, cxlmd_peer);
110027b3f8d1SDan Williams if (ep->dport != ep_peer->dport) {
110127b3f8d1SDan Williams dev_dbg(&cxlr->dev,
110227b3f8d1SDan Williams "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
11037481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
110427b3f8d1SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
110527b3f8d1SDan Williams dev_name(&cxlmd_peer->dev),
110627b3f8d1SDan Williams dev_name(&cxled_peer->cxld.dev));
110727b3f8d1SDan Williams return -ENXIO;
110827b3f8d1SDan Williams }
110927b3f8d1SDan Williams
111027b3f8d1SDan Williams return 0;
111127b3f8d1SDan Williams }
111227b3f8d1SDan Williams
check_interleave_cap(struct cxl_decoder * cxld,int iw,int ig)1113843836bfSYao Xingtao static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
1114843836bfSYao Xingtao {
1115843836bfSYao Xingtao struct cxl_port *port = to_cxl_port(cxld->dev.parent);
1116843836bfSYao Xingtao struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
1117843836bfSYao Xingtao unsigned int interleave_mask;
1118843836bfSYao Xingtao u8 eiw;
1119843836bfSYao Xingtao u16 eig;
1120843836bfSYao Xingtao int high_pos, low_pos;
1121843836bfSYao Xingtao
1122843836bfSYao Xingtao if (!test_bit(iw, &cxlhdm->iw_cap_mask))
1123843836bfSYao Xingtao return -ENXIO;
1124843836bfSYao Xingtao /*
1125843836bfSYao Xingtao * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
1126843836bfSYao Xingtao * if eiw < 8:
1127843836bfSYao Xingtao * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
1128843836bfSYao Xingtao * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1129843836bfSYao Xingtao *
1130843836bfSYao Xingtao * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
1131843836bfSYao Xingtao * interleave bits are none.
1132843836bfSYao Xingtao *
1133843836bfSYao Xingtao * if eiw >= 8:
1134843836bfSYao Xingtao * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
1135843836bfSYao Xingtao * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1136843836bfSYao Xingtao *
1137843836bfSYao Xingtao * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
1138843836bfSYao Xingtao * interleave bits are none.
1139843836bfSYao Xingtao */
1140843836bfSYao Xingtao ways_to_eiw(iw, &eiw);
1141843836bfSYao Xingtao if (eiw == 0 || eiw == 8)
1142843836bfSYao Xingtao return 0;
1143843836bfSYao Xingtao
1144843836bfSYao Xingtao granularity_to_eig(ig, &eig);
1145843836bfSYao Xingtao if (eiw > 8)
1146843836bfSYao Xingtao high_pos = eiw + eig - 1;
1147843836bfSYao Xingtao else
1148843836bfSYao Xingtao high_pos = eiw + eig + 7;
1149843836bfSYao Xingtao low_pos = eig + 8;
1150843836bfSYao Xingtao interleave_mask = GENMASK(high_pos, low_pos);
1151843836bfSYao Xingtao if (interleave_mask & ~cxlhdm->interleave_mask)
1152843836bfSYao Xingtao return -ENXIO;
1153843836bfSYao Xingtao
1154843836bfSYao Xingtao return 0;
1155843836bfSYao Xingtao }
1156843836bfSYao Xingtao
cxl_port_setup_targets(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)115727b3f8d1SDan Williams static int cxl_port_setup_targets(struct cxl_port *port,
115827b3f8d1SDan Williams struct cxl_region *cxlr,
115927b3f8d1SDan Williams struct cxl_endpoint_decoder *cxled)
116027b3f8d1SDan Williams {
116127b3f8d1SDan Williams struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
116227b3f8d1SDan Williams int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
116327b3f8d1SDan Williams struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
116427b3f8d1SDan Williams struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
116527b3f8d1SDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
116627b3f8d1SDan Williams struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
116727b3f8d1SDan Williams struct cxl_region_params *p = &cxlr->params;
116827b3f8d1SDan Williams struct cxl_decoder *cxld = cxl_rr->decoder;
116927b3f8d1SDan Williams struct cxl_switch_decoder *cxlsd;
1170*06518a75SHuaisheng Ye struct cxl_port *iter = port;
117127b3f8d1SDan Williams u16 eig, peig;
117227b3f8d1SDan Williams u8 eiw, peiw;
117327b3f8d1SDan Williams
117427b3f8d1SDan Williams /*
117527b3f8d1SDan Williams * While root level decoders support x3, x6, x12, switch level
117627b3f8d1SDan Williams * decoders only support powers of 2 up to x16.
117727b3f8d1SDan Williams */
117827b3f8d1SDan Williams if (!is_power_of_2(cxl_rr->nr_targets)) {
117927b3f8d1SDan Williams dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
11807481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
118127b3f8d1SDan Williams cxl_rr->nr_targets);
118227b3f8d1SDan Williams return -EINVAL;
118327b3f8d1SDan Williams }
118427b3f8d1SDan Williams
118527b3f8d1SDan Williams cxlsd = to_cxl_switch_decoder(&cxld->dev);
118627b3f8d1SDan Williams if (cxl_rr->nr_targets_set) {
1187*06518a75SHuaisheng Ye int i, distance = 1;
1188*06518a75SHuaisheng Ye struct cxl_region_ref *cxl_rr_iter;
118927b3f8d1SDan Williams
1190e4f6dfa9SDan Williams /*
1191*06518a75SHuaisheng Ye * The "distance" between peer downstream ports represents which
1192*06518a75SHuaisheng Ye * endpoint positions in the region interleave a given port can
1193*06518a75SHuaisheng Ye * host.
1194*06518a75SHuaisheng Ye *
1195*06518a75SHuaisheng Ye * For example, at the root of a hierarchy the distance is
1196*06518a75SHuaisheng Ye * always 1 as every index targets a different host-bridge. At
1197*06518a75SHuaisheng Ye * each subsequent switch level those ports map every Nth region
1198*06518a75SHuaisheng Ye * position where N is the width of the switch == distance.
1199e4f6dfa9SDan Williams */
1200*06518a75SHuaisheng Ye do {
1201*06518a75SHuaisheng Ye cxl_rr_iter = cxl_rr_load(iter, cxlr);
1202*06518a75SHuaisheng Ye distance *= cxl_rr_iter->nr_targets;
1203*06518a75SHuaisheng Ye iter = to_cxl_port(iter->dev.parent);
1204*06518a75SHuaisheng Ye } while (!is_cxl_root(iter));
1205*06518a75SHuaisheng Ye distance *= cxlrd->cxlsd.cxld.interleave_ways;
1206*06518a75SHuaisheng Ye
120727b3f8d1SDan Williams for (i = 0; i < cxl_rr->nr_targets_set; i++)
120827b3f8d1SDan Williams if (ep->dport == cxlsd->target[i]) {
120927b3f8d1SDan Williams rc = check_last_peer(cxled, ep, cxl_rr,
121027b3f8d1SDan Williams distance);
121127b3f8d1SDan Williams if (rc)
121227b3f8d1SDan Williams return rc;
121327b3f8d1SDan Williams goto out_target_set;
121427b3f8d1SDan Williams }
121527b3f8d1SDan Williams goto add_target;
121627b3f8d1SDan Williams }
121727b3f8d1SDan Williams
121827b3f8d1SDan Williams if (is_cxl_root(parent_port)) {
12198a9ab903SJim Harris /*
12208a9ab903SJim Harris * Root decoder IG is always set to value in CFMWS which
12218a9ab903SJim Harris * may be different than this region's IG. We can use the
12228a9ab903SJim Harris * region's IG here since interleave_granularity_store()
12238a9ab903SJim Harris * does not allow interleaved host-bridges with
12248a9ab903SJim Harris * root IG != region IG.
12258a9ab903SJim Harris */
12268a9ab903SJim Harris parent_ig = p->interleave_granularity;
122727b3f8d1SDan Williams parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
122827b3f8d1SDan Williams /*
122927b3f8d1SDan Williams * For purposes of address bit routing, use power-of-2 math for
123027b3f8d1SDan Williams * switch ports.
123127b3f8d1SDan Williams */
123227b3f8d1SDan Williams if (!is_power_of_2(parent_iw))
123327b3f8d1SDan Williams parent_iw /= 3;
123427b3f8d1SDan Williams } else {
123527b3f8d1SDan Williams struct cxl_region_ref *parent_rr;
123627b3f8d1SDan Williams struct cxl_decoder *parent_cxld;
123727b3f8d1SDan Williams
123827b3f8d1SDan Williams parent_rr = cxl_rr_load(parent_port, cxlr);
123927b3f8d1SDan Williams parent_cxld = parent_rr->decoder;
124027b3f8d1SDan Williams parent_ig = parent_cxld->interleave_granularity;
124127b3f8d1SDan Williams parent_iw = parent_cxld->interleave_ways;
124227b3f8d1SDan Williams }
124327b3f8d1SDan Williams
124483351ddbSDave Jiang rc = granularity_to_eig(parent_ig, &peig);
12458d428542SDan Williams if (rc) {
12468d428542SDan Williams dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
12477481653dSDan Williams dev_name(parent_port->uport_dev),
12488d428542SDan Williams dev_name(&parent_port->dev), parent_ig);
12498d428542SDan Williams return rc;
12508d428542SDan Williams }
12518d428542SDan Williams
1252c99b2e8cSDave Jiang rc = ways_to_eiw(parent_iw, &peiw);
12538d428542SDan Williams if (rc) {
12548d428542SDan Williams dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
12557481653dSDan Williams dev_name(parent_port->uport_dev),
12568d428542SDan Williams dev_name(&parent_port->dev), parent_iw);
12578d428542SDan Williams return rc;
12588d428542SDan Williams }
125927b3f8d1SDan Williams
126027b3f8d1SDan Williams iw = cxl_rr->nr_targets;
1261c99b2e8cSDave Jiang rc = ways_to_eiw(iw, &eiw);
12628d428542SDan Williams if (rc) {
12638d428542SDan Williams dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
12647481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev), iw);
12658d428542SDan Williams return rc;
12668d428542SDan Williams }
12678d428542SDan Williams
1268298d44d0SDan Williams /*
126918f35dc9SAlison Schofield * Interleave granularity is a multiple of @parent_port granularity.
127018f35dc9SAlison Schofield * Multiplier is the parent port interleave ways.
1271298d44d0SDan Williams */
127218f35dc9SAlison Schofield rc = granularity_to_eig(parent_ig * parent_iw, &eig);
127318f35dc9SAlison Schofield if (rc) {
127418f35dc9SAlison Schofield dev_dbg(&cxlr->dev,
127518f35dc9SAlison Schofield "%s: invalid granularity calculation (%d * %d)\n",
127618f35dc9SAlison Schofield dev_name(&parent_port->dev), parent_ig, parent_iw);
127718f35dc9SAlison Schofield return rc;
127827b3f8d1SDan Williams }
127927b3f8d1SDan Williams
128083351ddbSDave Jiang rc = eig_to_granularity(eig, &ig);
128127b3f8d1SDan Williams if (rc) {
128227b3f8d1SDan Williams dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
12837481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
128427b3f8d1SDan Williams 256 << eig);
128527b3f8d1SDan Williams return rc;
128627b3f8d1SDan Williams }
128727b3f8d1SDan Williams
12886723e58dSDan Williams if (iw > 8 || iw > cxlsd->nr_targets) {
12896723e58dSDan Williams dev_dbg(&cxlr->dev,
12906723e58dSDan Williams "%s:%s:%s: ways: %d overflows targets: %d\n",
12916723e58dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
12926723e58dSDan Williams dev_name(&cxld->dev), iw, cxlsd->nr_targets);
12936723e58dSDan Williams return -ENXIO;
12946723e58dSDan Williams }
12956723e58dSDan Williams
1296a32320b7SDan Williams if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1297a32320b7SDan Williams if (cxld->interleave_ways != iw ||
1298a32320b7SDan Williams cxld->interleave_granularity != ig ||
1299a32320b7SDan Williams cxld->hpa_range.start != p->res->start ||
1300a32320b7SDan Williams cxld->hpa_range.end != p->res->end ||
1301a32320b7SDan Williams ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1302a32320b7SDan Williams dev_err(&cxlr->dev,
1303a32320b7SDan Williams "%s:%s %s expected iw: %d ig: %d %pr\n",
13047481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
1305a32320b7SDan Williams __func__, iw, ig, p->res);
1306a32320b7SDan Williams dev_err(&cxlr->dev,
1307a32320b7SDan Williams "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
13087481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
1309a32320b7SDan Williams __func__, cxld->interleave_ways,
1310a32320b7SDan Williams cxld->interleave_granularity,
1311a32320b7SDan Williams (cxld->flags & CXL_DECODER_F_ENABLE) ?
1312a32320b7SDan Williams "enabled" :
1313a32320b7SDan Williams "disabled",
1314a32320b7SDan Williams cxld->hpa_range.start, cxld->hpa_range.end);
1315a32320b7SDan Williams return -ENXIO;
1316a32320b7SDan Williams }
1317a32320b7SDan Williams } else {
1318843836bfSYao Xingtao rc = check_interleave_cap(cxld, iw, ig);
1319843836bfSYao Xingtao if (rc) {
1320843836bfSYao Xingtao dev_dbg(&cxlr->dev,
1321843836bfSYao Xingtao "%s:%s iw: %d ig: %d is not supported\n",
1322843836bfSYao Xingtao dev_name(port->uport_dev),
1323843836bfSYao Xingtao dev_name(&port->dev), iw, ig);
1324843836bfSYao Xingtao return rc;
1325843836bfSYao Xingtao }
1326843836bfSYao Xingtao
132727b3f8d1SDan Williams cxld->interleave_ways = iw;
132827b3f8d1SDan Williams cxld->interleave_granularity = ig;
1329910bc55dSDan Williams cxld->hpa_range = (struct range) {
1330910bc55dSDan Williams .start = p->res->start,
1331910bc55dSDan Williams .end = p->res->end,
1332910bc55dSDan Williams };
1333a32320b7SDan Williams }
13347481653dSDan Williams dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
133527b3f8d1SDan Williams dev_name(&port->dev), iw, ig);
133627b3f8d1SDan Williams add_target:
133727b3f8d1SDan Williams if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
133827b3f8d1SDan Williams dev_dbg(&cxlr->dev,
133927b3f8d1SDan Williams "%s:%s: targets full trying to add %s:%s at %d\n",
13407481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
134127b3f8d1SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
134227b3f8d1SDan Williams return -ENXIO;
134327b3f8d1SDan Williams }
1344a32320b7SDan Williams if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1345a32320b7SDan Williams if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1346a32320b7SDan Williams dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
13477481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
1348a32320b7SDan Williams dev_name(&cxlsd->cxld.dev),
1349227db574SRobert Richter dev_name(ep->dport->dport_dev),
1350a32320b7SDan Williams cxl_rr->nr_targets_set);
1351a32320b7SDan Williams return -ENXIO;
1352a32320b7SDan Williams }
1353a32320b7SDan Williams } else
135427b3f8d1SDan Williams cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
135527b3f8d1SDan Williams inc = 1;
135627b3f8d1SDan Williams out_target_set:
135727b3f8d1SDan Williams cxl_rr->nr_targets_set += inc;
135827b3f8d1SDan Williams dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
13597481653dSDan Williams dev_name(port->uport_dev), dev_name(&port->dev),
1360227db574SRobert Richter cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
136127b3f8d1SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
136227b3f8d1SDan Williams
136327b3f8d1SDan Williams return 0;
136427b3f8d1SDan Williams }
136527b3f8d1SDan Williams
cxl_port_reset_targets(struct cxl_port * port,struct cxl_region * cxlr)136627b3f8d1SDan Williams static void cxl_port_reset_targets(struct cxl_port *port,
136727b3f8d1SDan Williams struct cxl_region *cxlr)
136827b3f8d1SDan Williams {
136927b3f8d1SDan Williams struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1370910bc55dSDan Williams struct cxl_decoder *cxld;
137127b3f8d1SDan Williams
137227b3f8d1SDan Williams /*
137327b3f8d1SDan Williams * After the last endpoint has been detached the entire cxl_rr may now
137427b3f8d1SDan Williams * be gone.
137527b3f8d1SDan Williams */
1376910bc55dSDan Williams if (!cxl_rr)
1377910bc55dSDan Williams return;
137827b3f8d1SDan Williams cxl_rr->nr_targets_set = 0;
1379910bc55dSDan Williams
1380910bc55dSDan Williams cxld = cxl_rr->decoder;
1381910bc55dSDan Williams cxld->hpa_range = (struct range) {
1382910bc55dSDan Williams .start = 0,
1383910bc55dSDan Williams .end = -1,
1384910bc55dSDan Williams };
138527b3f8d1SDan Williams }
138627b3f8d1SDan Williams
cxl_region_teardown_targets(struct cxl_region * cxlr)138727b3f8d1SDan Williams static void cxl_region_teardown_targets(struct cxl_region *cxlr)
138827b3f8d1SDan Williams {
138927b3f8d1SDan Williams struct cxl_region_params *p = &cxlr->params;
139027b3f8d1SDan Williams struct cxl_endpoint_decoder *cxled;
1391030f8803SDan Williams struct cxl_dev_state *cxlds;
139227b3f8d1SDan Williams struct cxl_memdev *cxlmd;
139327b3f8d1SDan Williams struct cxl_port *iter;
139427b3f8d1SDan Williams struct cxl_ep *ep;
139527b3f8d1SDan Williams int i;
139627b3f8d1SDan Williams
1397a32320b7SDan Williams /*
1398a32320b7SDan Williams * In the auto-discovery case skip automatic teardown since the
1399a32320b7SDan Williams * address space is already active
1400a32320b7SDan Williams */
1401a32320b7SDan Williams if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1402a32320b7SDan Williams return;
1403a32320b7SDan Williams
140427b3f8d1SDan Williams for (i = 0; i < p->nr_targets; i++) {
140527b3f8d1SDan Williams cxled = p->targets[i];
140627b3f8d1SDan Williams cxlmd = cxled_to_memdev(cxled);
1407030f8803SDan Williams cxlds = cxlmd->cxlds;
1408030f8803SDan Williams
1409030f8803SDan Williams if (cxlds->rcd)
1410030f8803SDan Williams continue;
141127b3f8d1SDan Williams
141227b3f8d1SDan Williams iter = cxled_to_port(cxled);
141327b3f8d1SDan Williams while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
141427b3f8d1SDan Williams iter = to_cxl_port(iter->dev.parent);
141527b3f8d1SDan Williams
141627b3f8d1SDan Williams for (ep = cxl_ep_load(iter, cxlmd); iter;
141727b3f8d1SDan Williams iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
141827b3f8d1SDan Williams cxl_port_reset_targets(iter, cxlr);
141927b3f8d1SDan Williams }
142027b3f8d1SDan Williams }
142127b3f8d1SDan Williams
cxl_region_setup_targets(struct cxl_region * cxlr)142227b3f8d1SDan Williams static int cxl_region_setup_targets(struct cxl_region *cxlr)
142327b3f8d1SDan Williams {
142427b3f8d1SDan Williams struct cxl_region_params *p = &cxlr->params;
142527b3f8d1SDan Williams struct cxl_endpoint_decoder *cxled;
1426030f8803SDan Williams struct cxl_dev_state *cxlds;
1427030f8803SDan Williams int i, rc, rch = 0, vh = 0;
142827b3f8d1SDan Williams struct cxl_memdev *cxlmd;
142927b3f8d1SDan Williams struct cxl_port *iter;
143027b3f8d1SDan Williams struct cxl_ep *ep;
143127b3f8d1SDan Williams
143227b3f8d1SDan Williams for (i = 0; i < p->nr_targets; i++) {
143327b3f8d1SDan Williams cxled = p->targets[i];
143427b3f8d1SDan Williams cxlmd = cxled_to_memdev(cxled);
1435030f8803SDan Williams cxlds = cxlmd->cxlds;
1436030f8803SDan Williams
1437030f8803SDan Williams /* validate that all targets agree on topology */
1438030f8803SDan Williams if (!cxlds->rcd) {
1439030f8803SDan Williams vh++;
1440030f8803SDan Williams } else {
1441030f8803SDan Williams rch++;
1442030f8803SDan Williams continue;
1443030f8803SDan Williams }
144427b3f8d1SDan Williams
144527b3f8d1SDan Williams iter = cxled_to_port(cxled);
144627b3f8d1SDan Williams while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
144727b3f8d1SDan Williams iter = to_cxl_port(iter->dev.parent);
144827b3f8d1SDan Williams
144927b3f8d1SDan Williams /*
1450a32320b7SDan Williams * Descend the topology tree programming / validating
1451a32320b7SDan Williams * targets while looking for conflicts.
145227b3f8d1SDan Williams */
145327b3f8d1SDan Williams for (ep = cxl_ep_load(iter, cxlmd); iter;
145427b3f8d1SDan Williams iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
145527b3f8d1SDan Williams rc = cxl_port_setup_targets(iter, cxlr, cxled);
145627b3f8d1SDan Williams if (rc) {
145727b3f8d1SDan Williams cxl_region_teardown_targets(cxlr);
145827b3f8d1SDan Williams return rc;
145927b3f8d1SDan Williams }
146027b3f8d1SDan Williams }
146127b3f8d1SDan Williams }
146227b3f8d1SDan Williams
1463030f8803SDan Williams if (rch && vh) {
1464030f8803SDan Williams dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1465030f8803SDan Williams cxl_region_teardown_targets(cxlr);
1466030f8803SDan Williams return -ENXIO;
1467030f8803SDan Williams }
1468030f8803SDan Williams
146927b3f8d1SDan Williams return 0;
147027b3f8d1SDan Williams }
147127b3f8d1SDan Williams
cxl_region_validate_position(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)14729995576cSDan Williams static int cxl_region_validate_position(struct cxl_region *cxlr,
14739995576cSDan Williams struct cxl_endpoint_decoder *cxled,
14749995576cSDan Williams int pos)
1475b9686e8cSDan Williams {
1476384e624bSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1477b9686e8cSDan Williams struct cxl_region_params *p = &cxlr->params;
14789995576cSDan Williams int i;
1479384e624bSDan Williams
1480384e624bSDan Williams if (pos < 0 || pos >= p->interleave_ways) {
1481b9686e8cSDan Williams dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1482b9686e8cSDan Williams p->interleave_ways);
1483b9686e8cSDan Williams return -ENXIO;
1484b9686e8cSDan Williams }
1485b9686e8cSDan Williams
1486b9686e8cSDan Williams if (p->targets[pos] == cxled)
1487b9686e8cSDan Williams return 0;
1488b9686e8cSDan Williams
1489b9686e8cSDan Williams if (p->targets[pos]) {
1490b9686e8cSDan Williams struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1491b9686e8cSDan Williams struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1492b9686e8cSDan Williams
1493b9686e8cSDan Williams dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1494b9686e8cSDan Williams pos, dev_name(&cxlmd_target->dev),
1495b9686e8cSDan Williams dev_name(&cxled_target->cxld.dev));
1496b9686e8cSDan Williams return -EBUSY;
1497b9686e8cSDan Williams }
1498b9686e8cSDan Williams
1499384e624bSDan Williams for (i = 0; i < p->interleave_ways; i++) {
1500384e624bSDan Williams struct cxl_endpoint_decoder *cxled_target;
1501384e624bSDan Williams struct cxl_memdev *cxlmd_target;
1502384e624bSDan Williams
1503f04facfbSFan Ni cxled_target = p->targets[i];
1504384e624bSDan Williams if (!cxled_target)
1505384e624bSDan Williams continue;
1506384e624bSDan Williams
1507384e624bSDan Williams cxlmd_target = cxled_to_memdev(cxled_target);
1508384e624bSDan Williams if (cxlmd_target == cxlmd) {
1509384e624bSDan Williams dev_dbg(&cxlr->dev,
1510384e624bSDan Williams "%s already specified at position %d via: %s\n",
1511384e624bSDan Williams dev_name(&cxlmd->dev), pos,
1512384e624bSDan Williams dev_name(&cxled_target->cxld.dev));
1513384e624bSDan Williams return -EBUSY;
1514384e624bSDan Williams }
1515384e624bSDan Williams }
1516384e624bSDan Williams
15179995576cSDan Williams return 0;
15189995576cSDan Williams }
15199995576cSDan Williams
cxl_region_attach_position(struct cxl_region * cxlr,struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled,const struct cxl_dport * dport,int pos)15209995576cSDan Williams static int cxl_region_attach_position(struct cxl_region *cxlr,
15219995576cSDan Williams struct cxl_root_decoder *cxlrd,
15229995576cSDan Williams struct cxl_endpoint_decoder *cxled,
15239995576cSDan Williams const struct cxl_dport *dport, int pos)
15249995576cSDan Williams {
15259995576cSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1526124451bbSAlison Schofield struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1527124451bbSAlison Schofield struct cxl_decoder *cxld = &cxlsd->cxld;
1528124451bbSAlison Schofield int iw = cxld->interleave_ways;
15299995576cSDan Williams struct cxl_port *iter;
15309995576cSDan Williams int rc;
15319995576cSDan Williams
1532124451bbSAlison Schofield if (dport != cxlrd->cxlsd.target[pos % iw]) {
15339995576cSDan Williams dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
15349995576cSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
15359995576cSDan Williams dev_name(&cxlrd->cxlsd.cxld.dev));
15369995576cSDan Williams return -ENXIO;
15379995576cSDan Williams }
15389995576cSDan Williams
15399995576cSDan Williams for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
15409995576cSDan Williams iter = to_cxl_port(iter->dev.parent)) {
15419995576cSDan Williams rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
15429995576cSDan Williams if (rc)
15439995576cSDan Williams goto err;
15449995576cSDan Williams }
15459995576cSDan Williams
15469995576cSDan Williams return 0;
15479995576cSDan Williams
15489995576cSDan Williams err:
15499995576cSDan Williams for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
15509995576cSDan Williams iter = to_cxl_port(iter->dev.parent))
15519995576cSDan Williams cxl_port_detach_region(iter, cxlr, cxled);
15529995576cSDan Williams return rc;
15539995576cSDan Williams }
15549995576cSDan Williams
cxl_region_attach_auto(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1555a32320b7SDan Williams static int cxl_region_attach_auto(struct cxl_region *cxlr,
1556a32320b7SDan Williams struct cxl_endpoint_decoder *cxled, int pos)
1557a32320b7SDan Williams {
1558a32320b7SDan Williams struct cxl_region_params *p = &cxlr->params;
1559a32320b7SDan Williams
1560a32320b7SDan Williams if (cxled->state != CXL_DECODER_STATE_AUTO) {
1561a32320b7SDan Williams dev_err(&cxlr->dev,
1562a32320b7SDan Williams "%s: unable to add decoder to autodetected region\n",
1563a32320b7SDan Williams dev_name(&cxled->cxld.dev));
1564a32320b7SDan Williams return -EINVAL;
1565a32320b7SDan Williams }
1566a32320b7SDan Williams
1567a32320b7SDan Williams if (pos >= 0) {
1568a32320b7SDan Williams dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1569a32320b7SDan Williams dev_name(&cxled->cxld.dev), pos);
1570a32320b7SDan Williams return -EINVAL;
1571a32320b7SDan Williams }
1572a32320b7SDan Williams
1573a32320b7SDan Williams if (p->nr_targets >= p->interleave_ways) {
1574a32320b7SDan Williams dev_err(&cxlr->dev, "%s: no more target slots available\n",
1575a32320b7SDan Williams dev_name(&cxled->cxld.dev));
1576a32320b7SDan Williams return -ENXIO;
1577a32320b7SDan Williams }
1578a32320b7SDan Williams
1579a32320b7SDan Williams /*
1580a32320b7SDan Williams * Temporarily record the endpoint decoder into the target array. Yes,
1581a32320b7SDan Williams * this means that userspace can view devices in the wrong position
1582a32320b7SDan Williams * before the region activates, and must be careful to understand when
1583a32320b7SDan Williams * it might be racing region autodiscovery.
1584a32320b7SDan Williams */
1585a32320b7SDan Williams pos = p->nr_targets;
1586a32320b7SDan Williams p->targets[pos] = cxled;
1587a32320b7SDan Williams cxled->pos = pos;
1588a32320b7SDan Williams p->nr_targets++;
1589a32320b7SDan Williams
1590a32320b7SDan Williams return 0;
1591a32320b7SDan Williams }
1592a32320b7SDan Williams
cmp_interleave_pos(const void * a,const void * b)15933cfdfce0SAlison Schofield static int cmp_interleave_pos(const void *a, const void *b)
15943cfdfce0SAlison Schofield {
15953cfdfce0SAlison Schofield struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
15963cfdfce0SAlison Schofield struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
15973cfdfce0SAlison Schofield
15983cfdfce0SAlison Schofield return cxled_a->pos - cxled_b->pos;
15993cfdfce0SAlison Schofield }
16003cfdfce0SAlison Schofield
next_port(struct cxl_port * port)1601a32320b7SDan Williams static struct cxl_port *next_port(struct cxl_port *port)
1602a32320b7SDan Williams {
1603a32320b7SDan Williams if (!port->parent_dport)
1604a32320b7SDan Williams return NULL;
1605a32320b7SDan Williams return port->parent_dport->port;
1606a32320b7SDan Williams }
1607a32320b7SDan Williams
match_switch_decoder_by_range(struct device * dev,void * data)1608c4255b9bSAlison Schofield static int match_switch_decoder_by_range(struct device *dev, void *data)
1609a32320b7SDan Williams {
1610a32320b7SDan Williams struct cxl_switch_decoder *cxlsd;
1611c4255b9bSAlison Schofield struct range *r1, *r2 = data;
1612a32320b7SDan Williams
1613a32320b7SDan Williams if (!is_switch_decoder(dev))
1614a32320b7SDan Williams return 0;
1615a32320b7SDan Williams
1616a32320b7SDan Williams cxlsd = to_cxl_switch_decoder(dev);
1617c4255b9bSAlison Schofield r1 = &cxlsd->cxld.hpa_range;
1618c4255b9bSAlison Schofield
1619c4255b9bSAlison Schofield if (is_root_decoder(dev))
1620c4255b9bSAlison Schofield return range_contains(r1, r2);
1621c4255b9bSAlison Schofield return (r1->start == r2->start && r1->end == r2->end);
1622a32320b7SDan Williams }
1623a32320b7SDan Williams
find_pos_and_ways(struct cxl_port * port,struct range * range,int * pos,int * ways)1624c6ffabc6SAlison Schofield static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1625c6ffabc6SAlison Schofield int *pos, int *ways)
1626c6ffabc6SAlison Schofield {
1627c6ffabc6SAlison Schofield struct cxl_switch_decoder *cxlsd;
1628c6ffabc6SAlison Schofield struct cxl_port *parent;
1629c6ffabc6SAlison Schofield struct device *dev;
1630c6ffabc6SAlison Schofield int rc = -ENXIO;
1631c6ffabc6SAlison Schofield
1632c6ffabc6SAlison Schofield parent = next_port(port);
1633c6ffabc6SAlison Schofield if (!parent)
1634c6ffabc6SAlison Schofield return rc;
1635c6ffabc6SAlison Schofield
1636c6ffabc6SAlison Schofield dev = device_find_child(&parent->dev, range,
1637c6ffabc6SAlison Schofield match_switch_decoder_by_range);
1638c6ffabc6SAlison Schofield if (!dev) {
1639c6ffabc6SAlison Schofield dev_err(port->uport_dev,
1640c6ffabc6SAlison Schofield "failed to find decoder mapping %#llx-%#llx\n",
1641c6ffabc6SAlison Schofield range->start, range->end);
1642c6ffabc6SAlison Schofield return rc;
1643c6ffabc6SAlison Schofield }
1644c6ffabc6SAlison Schofield cxlsd = to_cxl_switch_decoder(dev);
1645c6ffabc6SAlison Schofield *ways = cxlsd->cxld.interleave_ways;
1646c6ffabc6SAlison Schofield
1647c6ffabc6SAlison Schofield for (int i = 0; i < *ways; i++) {
1648c6ffabc6SAlison Schofield if (cxlsd->target[i] == port->parent_dport) {
1649c6ffabc6SAlison Schofield *pos = i;
1650c6ffabc6SAlison Schofield rc = 0;
1651c6ffabc6SAlison Schofield break;
1652c6ffabc6SAlison Schofield }
1653c6ffabc6SAlison Schofield }
1654c6ffabc6SAlison Schofield put_device(dev);
1655c6ffabc6SAlison Schofield
1656c6ffabc6SAlison Schofield return rc;
1657c6ffabc6SAlison Schofield }
1658c6ffabc6SAlison Schofield
1659c6ffabc6SAlison Schofield /**
1660c6ffabc6SAlison Schofield * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1661c6ffabc6SAlison Schofield * @cxled: endpoint decoder member of given region
1662c6ffabc6SAlison Schofield *
1663c6ffabc6SAlison Schofield * The endpoint position is calculated by traversing the topology from
1664c6ffabc6SAlison Schofield * the endpoint to the root decoder and iteratively applying this
1665c6ffabc6SAlison Schofield * calculation:
1666c6ffabc6SAlison Schofield *
1667c6ffabc6SAlison Schofield * position = position * parent_ways + parent_pos;
1668c6ffabc6SAlison Schofield *
1669c6ffabc6SAlison Schofield * ...where @position is inferred from switch and root decoder target lists.
1670c6ffabc6SAlison Schofield *
1671c6ffabc6SAlison Schofield * Return: position >= 0 on success
1672c6ffabc6SAlison Schofield * -ENXIO on failure
1673c6ffabc6SAlison Schofield */
cxl_calc_interleave_pos(struct cxl_endpoint_decoder * cxled)1674c6ffabc6SAlison Schofield static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1675c6ffabc6SAlison Schofield {
1676c6ffabc6SAlison Schofield struct cxl_port *iter, *port = cxled_to_port(cxled);
1677c6ffabc6SAlison Schofield struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1678c6ffabc6SAlison Schofield struct range *range = &cxled->cxld.hpa_range;
1679c6ffabc6SAlison Schofield int parent_ways = 0, parent_pos = 0, pos = 0;
1680c6ffabc6SAlison Schofield int rc;
1681c6ffabc6SAlison Schofield
1682c6ffabc6SAlison Schofield /*
1683c6ffabc6SAlison Schofield * Example: the expected interleave order of the 4-way region shown
1684c6ffabc6SAlison Schofield * below is: mem0, mem2, mem1, mem3
1685c6ffabc6SAlison Schofield *
1686c6ffabc6SAlison Schofield * root_port
1687c6ffabc6SAlison Schofield * / \
1688c6ffabc6SAlison Schofield * host_bridge_0 host_bridge_1
1689c6ffabc6SAlison Schofield * | | | |
1690c6ffabc6SAlison Schofield * mem0 mem1 mem2 mem3
1691c6ffabc6SAlison Schofield *
1692c6ffabc6SAlison Schofield * In the example the calculator will iterate twice. The first iteration
1693c6ffabc6SAlison Schofield * uses the mem position in the host-bridge and the ways of the host-
1694c6ffabc6SAlison Schofield * bridge to generate the first, or local, position. The second
1695c6ffabc6SAlison Schofield * iteration uses the host-bridge position in the root_port and the ways
1696c6ffabc6SAlison Schofield * of the root_port to refine the position.
1697c6ffabc6SAlison Schofield *
1698c6ffabc6SAlison Schofield * A trace of the calculation per endpoint looks like this:
1699c6ffabc6SAlison Schofield * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1700c6ffabc6SAlison Schofield * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1701c6ffabc6SAlison Schofield * pos: 0 pos: 1
1702c6ffabc6SAlison Schofield *
1703c6ffabc6SAlison Schofield * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1704c6ffabc6SAlison Schofield * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1705c6ffabc6SAlison Schofield * pos: 2 pos = 3
1706c6ffabc6SAlison Schofield *
1707c6ffabc6SAlison Schofield * Note that while this example is simple, the method applies to more
1708c6ffabc6SAlison Schofield * complex topologies, including those with switches.
1709c6ffabc6SAlison Schofield */
1710c6ffabc6SAlison Schofield
1711c6ffabc6SAlison Schofield /* Iterate from endpoint to root_port refining the position */
1712c6ffabc6SAlison Schofield for (iter = port; iter; iter = next_port(iter)) {
1713c6ffabc6SAlison Schofield if (is_cxl_root(iter))
1714c6ffabc6SAlison Schofield break;
1715c6ffabc6SAlison Schofield
1716c6ffabc6SAlison Schofield rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1717c6ffabc6SAlison Schofield if (rc)
1718c6ffabc6SAlison Schofield return rc;
1719c6ffabc6SAlison Schofield
1720c6ffabc6SAlison Schofield pos = pos * parent_ways + parent_pos;
1721c6ffabc6SAlison Schofield }
1722c6ffabc6SAlison Schofield
1723c6ffabc6SAlison Schofield dev_dbg(&cxlmd->dev,
1724c6ffabc6SAlison Schofield "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1725c6ffabc6SAlison Schofield dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1726c6ffabc6SAlison Schofield dev_name(&port->dev), range->start, range->end, pos);
1727c6ffabc6SAlison Schofield
1728c6ffabc6SAlison Schofield return pos;
1729c6ffabc6SAlison Schofield }
1730c6ffabc6SAlison Schofield
cxl_region_sort_targets(struct cxl_region * cxlr)1731a32320b7SDan Williams static int cxl_region_sort_targets(struct cxl_region *cxlr)
1732a32320b7SDan Williams {
1733a32320b7SDan Williams struct cxl_region_params *p = &cxlr->params;
1734a32320b7SDan Williams int i, rc = 0;
1735a32320b7SDan Williams
1736a32320b7SDan Williams for (i = 0; i < p->nr_targets; i++) {
1737a32320b7SDan Williams struct cxl_endpoint_decoder *cxled = p->targets[i];
1738a32320b7SDan Williams
17393cfdfce0SAlison Schofield cxled->pos = cxl_calc_interleave_pos(cxled);
1740a32320b7SDan Williams /*
17413cfdfce0SAlison Schofield * Record that sorting failed, but still continue to calc
17423cfdfce0SAlison Schofield * cxled->pos so that follow-on code paths can reliably
17433cfdfce0SAlison Schofield * do p->targets[cxled->pos] to self-reference their entry.
1744a32320b7SDan Williams */
1745a32320b7SDan Williams if (cxled->pos < 0)
1746a32320b7SDan Williams rc = -ENXIO;
1747a32320b7SDan Williams }
17483cfdfce0SAlison Schofield /* Keep the cxlr target list in interleave position order */
17493cfdfce0SAlison Schofield sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
17503cfdfce0SAlison Schofield cmp_interleave_pos, NULL);
1751a32320b7SDan Williams
1752a32320b7SDan Williams dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1753a32320b7SDan Williams return rc;
1754a32320b7SDan Williams }
1755a32320b7SDan Williams
cxl_region_attach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)17569995576cSDan Williams static int cxl_region_attach(struct cxl_region *cxlr,
17579995576cSDan Williams struct cxl_endpoint_decoder *cxled, int pos)
17589995576cSDan Williams {
17599995576cSDan Williams struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
17609995576cSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
17619995576cSDan Williams struct cxl_region_params *p = &cxlr->params;
17629995576cSDan Williams struct cxl_port *ep_port, *root_port;
17639995576cSDan Williams struct cxl_dport *dport;
17649995576cSDan Williams int rc = -ENXIO;
17659995576cSDan Williams
1766843836bfSYao Xingtao rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
1767843836bfSYao Xingtao p->interleave_granularity);
1768843836bfSYao Xingtao if (rc) {
1769843836bfSYao Xingtao dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
1770843836bfSYao Xingtao dev_name(&cxled->cxld.dev), p->interleave_ways,
1771843836bfSYao Xingtao p->interleave_granularity);
1772843836bfSYao Xingtao return rc;
1773843836bfSYao Xingtao }
1774843836bfSYao Xingtao
17759995576cSDan Williams if (cxled->mode != cxlr->mode) {
17769995576cSDan Williams dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
17779995576cSDan Williams dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
17789995576cSDan Williams return -EINVAL;
17799995576cSDan Williams }
17809995576cSDan Williams
17819995576cSDan Williams if (cxled->mode == CXL_DECODER_DEAD) {
17829995576cSDan Williams dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
17839995576cSDan Williams return -ENODEV;
17849995576cSDan Williams }
17859995576cSDan Williams
17869995576cSDan Williams /* all full of members, or interleave config not established? */
17879995576cSDan Williams if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
17889995576cSDan Williams dev_dbg(&cxlr->dev, "region already active\n");
17899995576cSDan Williams return -EBUSY;
17909995576cSDan Williams } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
17919995576cSDan Williams dev_dbg(&cxlr->dev, "interleave config missing\n");
17929995576cSDan Williams return -ENXIO;
17939995576cSDan Williams }
17949995576cSDan Williams
179507ffcd8eSJim Harris if (p->nr_targets >= p->interleave_ways) {
179607ffcd8eSJim Harris dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
179707ffcd8eSJim Harris p->nr_targets);
179807ffcd8eSJim Harris return -EINVAL;
179907ffcd8eSJim Harris }
180007ffcd8eSJim Harris
1801384e624bSDan Williams ep_port = cxled_to_port(cxled);
1802384e624bSDan Williams root_port = cxlrd_to_port(cxlrd);
1803384e624bSDan Williams dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1804384e624bSDan Williams if (!dport) {
1805384e624bSDan Williams dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1806384e624bSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1807384e624bSDan Williams dev_name(cxlr->dev.parent));
1808384e624bSDan Williams return -ENXIO;
1809384e624bSDan Williams }
1810384e624bSDan Williams
1811384e624bSDan Williams if (cxled->cxld.target_type != cxlr->type) {
1812384e624bSDan Williams dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1813384e624bSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1814384e624bSDan Williams cxled->cxld.target_type, cxlr->type);
1815384e624bSDan Williams return -ENXIO;
1816384e624bSDan Williams }
1817384e624bSDan Williams
1818384e624bSDan Williams if (!cxled->dpa_res) {
1819384e624bSDan Williams dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1820384e624bSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1821384e624bSDan Williams return -ENXIO;
1822384e624bSDan Williams }
1823384e624bSDan Williams
1824384e624bSDan Williams if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1825384e624bSDan Williams resource_size(p->res)) {
1826384e624bSDan Williams dev_dbg(&cxlr->dev,
1827384e624bSDan Williams "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1828384e624bSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1829384e624bSDan Williams (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1830384e624bSDan Williams (u64)resource_size(p->res));
1831384e624bSDan Williams return -EINVAL;
1832384e624bSDan Williams }
1833384e624bSDan Williams
1834a32320b7SDan Williams if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1835a32320b7SDan Williams int i;
1836a32320b7SDan Williams
1837a32320b7SDan Williams rc = cxl_region_attach_auto(cxlr, cxled, pos);
1838384e624bSDan Williams if (rc)
1839a32320b7SDan Williams return rc;
1840a32320b7SDan Williams
1841a32320b7SDan Williams /* await more targets to arrive... */
1842a32320b7SDan Williams if (p->nr_targets < p->interleave_ways)
1843a32320b7SDan Williams return 0;
1844a32320b7SDan Williams
1845a32320b7SDan Williams /*
1846a32320b7SDan Williams * All targets are here, which implies all PCI enumeration that
1847a32320b7SDan Williams * affects this region has been completed. Walk the topology to
1848a32320b7SDan Williams * sort the devices into their relative region decode position.
1849a32320b7SDan Williams */
1850a32320b7SDan Williams rc = cxl_region_sort_targets(cxlr);
1851a32320b7SDan Williams if (rc)
1852a32320b7SDan Williams return rc;
1853a32320b7SDan Williams
1854a32320b7SDan Williams for (i = 0; i < p->nr_targets; i++) {
1855a32320b7SDan Williams cxled = p->targets[i];
1856a32320b7SDan Williams ep_port = cxled_to_port(cxled);
1857a32320b7SDan Williams dport = cxl_find_dport_by_dev(root_port,
1858a32320b7SDan Williams ep_port->host_bridge);
1859a32320b7SDan Williams rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1860a32320b7SDan Williams dport, i);
1861a32320b7SDan Williams if (rc)
1862a32320b7SDan Williams return rc;
1863384e624bSDan Williams }
1864384e624bSDan Williams
1865a32320b7SDan Williams rc = cxl_region_setup_targets(cxlr);
1866a32320b7SDan Williams if (rc)
1867a32320b7SDan Williams return rc;
1868a32320b7SDan Williams
1869a32320b7SDan Williams /*
1870a32320b7SDan Williams * If target setup succeeds in the autodiscovery case
1871a32320b7SDan Williams * then the region is already committed.
1872a32320b7SDan Williams */
1873a32320b7SDan Williams p->state = CXL_CONFIG_COMMIT;
1874a32320b7SDan Williams
1875a32320b7SDan Williams return 0;
1876a32320b7SDan Williams }
1877a32320b7SDan Williams
18789995576cSDan Williams rc = cxl_region_validate_position(cxlr, cxled, pos);
1879b9686e8cSDan Williams if (rc)
18809995576cSDan Williams return rc;
18819995576cSDan Williams
18829995576cSDan Williams rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
18839995576cSDan Williams if (rc)
18849995576cSDan Williams return rc;
1885b9686e8cSDan Williams
1886b9686e8cSDan Williams p->targets[pos] = cxled;
1887b9686e8cSDan Williams cxled->pos = pos;
1888b9686e8cSDan Williams p->nr_targets++;
1889b9686e8cSDan Williams
189027b3f8d1SDan Williams if (p->nr_targets == p->interleave_ways) {
189127b3f8d1SDan Williams rc = cxl_region_setup_targets(cxlr);
189227b3f8d1SDan Williams if (rc)
189307ffcd8eSJim Harris return rc;
1894384e624bSDan Williams p->state = CXL_CONFIG_ACTIVE;
189527b3f8d1SDan Williams }
1896384e624bSDan Williams
18972901c8bdSDan Williams cxled->cxld.interleave_ways = p->interleave_ways;
18982901c8bdSDan Williams cxled->cxld.interleave_granularity = p->interleave_granularity;
1899910bc55dSDan Williams cxled->cxld.hpa_range = (struct range) {
1900910bc55dSDan Williams .start = p->res->start,
1901910bc55dSDan Williams .end = p->res->end,
1902910bc55dSDan Williams };
19032901c8bdSDan Williams
1904c6ffabc6SAlison Schofield if (p->nr_targets != p->interleave_ways)
1905c6ffabc6SAlison Schofield return 0;
1906c6ffabc6SAlison Schofield
1907c6ffabc6SAlison Schofield /*
1908c6ffabc6SAlison Schofield * Test the auto-discovery position calculator function
1909c6ffabc6SAlison Schofield * against this successfully created user-defined region.
1910c6ffabc6SAlison Schofield * A fail message here means that this interleave config
1911c6ffabc6SAlison Schofield * will fail when presented as CXL_REGION_F_AUTO.
1912c6ffabc6SAlison Schofield */
1913c6ffabc6SAlison Schofield for (int i = 0; i < p->nr_targets; i++) {
1914c6ffabc6SAlison Schofield struct cxl_endpoint_decoder *cxled = p->targets[i];
1915c6ffabc6SAlison Schofield int test_pos;
1916c6ffabc6SAlison Schofield
1917c6ffabc6SAlison Schofield test_pos = cxl_calc_interleave_pos(cxled);
1918c6ffabc6SAlison Schofield dev_dbg(&cxled->cxld.dev,
1919c6ffabc6SAlison Schofield "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
1920c6ffabc6SAlison Schofield (test_pos == cxled->pos) ? "success" : "fail",
1921c6ffabc6SAlison Schofield test_pos, cxled->pos);
1922c6ffabc6SAlison Schofield }
1923c6ffabc6SAlison Schofield
1924b9686e8cSDan Williams return 0;
1925b9686e8cSDan Williams }
1926b9686e8cSDan Williams
cxl_region_detach(struct cxl_endpoint_decoder * cxled)1927176baefbSDan Williams static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1928b9686e8cSDan Williams {
1929384e624bSDan Williams struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1930b9686e8cSDan Williams struct cxl_region *cxlr = cxled->cxld.region;
1931b9686e8cSDan Williams struct cxl_region_params *p;
1932176baefbSDan Williams int rc = 0;
1933b9686e8cSDan Williams
1934b9686e8cSDan Williams lockdep_assert_held_write(&cxl_region_rwsem);
1935b9686e8cSDan Williams
1936b9686e8cSDan Williams if (!cxlr)
1937176baefbSDan Williams return 0;
1938b9686e8cSDan Williams
1939b9686e8cSDan Williams p = &cxlr->params;
1940b9686e8cSDan Williams get_device(&cxlr->dev);
1941b9686e8cSDan Williams
1942176baefbSDan Williams if (p->state > CXL_CONFIG_ACTIVE) {
19438e1b52c1SDan Williams cxl_region_decode_reset(cxlr, p->interleave_ways);
1944176baefbSDan Williams p->state = CXL_CONFIG_ACTIVE;
1945176baefbSDan Williams }
1946176baefbSDan Williams
1947384e624bSDan Williams for (iter = ep_port; !is_cxl_root(iter);
1948384e624bSDan Williams iter = to_cxl_port(iter->dev.parent))
1949384e624bSDan Williams cxl_port_detach_region(iter, cxlr, cxled);
1950384e624bSDan Williams
1951b9686e8cSDan Williams if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1952b9686e8cSDan Williams p->targets[cxled->pos] != cxled) {
1953b9686e8cSDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1954b9686e8cSDan Williams
1955b9686e8cSDan Williams dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1956b9686e8cSDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1957b9686e8cSDan Williams cxled->pos);
1958b9686e8cSDan Williams goto out;
1959b9686e8cSDan Williams }
1960b9686e8cSDan Williams
196127b3f8d1SDan Williams if (p->state == CXL_CONFIG_ACTIVE) {
1962384e624bSDan Williams p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
196327b3f8d1SDan Williams cxl_region_teardown_targets(cxlr);
196427b3f8d1SDan Williams }
1965b9686e8cSDan Williams p->targets[cxled->pos] = NULL;
1966b9686e8cSDan Williams p->nr_targets--;
1967910bc55dSDan Williams cxled->cxld.hpa_range = (struct range) {
1968910bc55dSDan Williams .start = 0,
1969910bc55dSDan Williams .end = -1,
1970910bc55dSDan Williams };
1971b9686e8cSDan Williams
1972384e624bSDan Williams /* notify the region driver that one of its targets has departed */
1973b9686e8cSDan Williams up_write(&cxl_region_rwsem);
1974b9686e8cSDan Williams device_release_driver(&cxlr->dev);
1975b9686e8cSDan Williams down_write(&cxl_region_rwsem);
1976b9686e8cSDan Williams out:
1977b9686e8cSDan Williams put_device(&cxlr->dev);
1978176baefbSDan Williams return rc;
1979b9686e8cSDan Williams }
1980b9686e8cSDan Williams
cxl_decoder_kill_region(struct cxl_endpoint_decoder * cxled)1981b9686e8cSDan Williams void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1982b9686e8cSDan Williams {
1983b9686e8cSDan Williams down_write(&cxl_region_rwsem);
1984b9686e8cSDan Williams cxled->mode = CXL_DECODER_DEAD;
1985b9686e8cSDan Williams cxl_region_detach(cxled);
1986b9686e8cSDan Williams up_write(&cxl_region_rwsem);
1987b9686e8cSDan Williams }
1988b9686e8cSDan Williams
attach_target(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,unsigned int state)19893528b1e1SDan Williams static int attach_target(struct cxl_region *cxlr,
19903528b1e1SDan Williams struct cxl_endpoint_decoder *cxled, int pos,
19913528b1e1SDan Williams unsigned int state)
1992b9686e8cSDan Williams {
19933528b1e1SDan Williams int rc = 0;
1994b9686e8cSDan Williams
19953528b1e1SDan Williams if (state == TASK_INTERRUPTIBLE)
1996b9686e8cSDan Williams rc = down_write_killable(&cxl_region_rwsem);
19973528b1e1SDan Williams else
19983528b1e1SDan Williams down_write(&cxl_region_rwsem);
1999b9686e8cSDan Williams if (rc)
20003528b1e1SDan Williams return rc;
20013528b1e1SDan Williams
2002b9686e8cSDan Williams down_read(&cxl_dpa_rwsem);
20033528b1e1SDan Williams rc = cxl_region_attach(cxlr, cxled, pos);
2004b9686e8cSDan Williams up_read(&cxl_dpa_rwsem);
2005b9686e8cSDan Williams up_write(&cxl_region_rwsem);
2006b9686e8cSDan Williams return rc;
2007b9686e8cSDan Williams }
2008b9686e8cSDan Williams
detach_target(struct cxl_region * cxlr,int pos)2009b9686e8cSDan Williams static int detach_target(struct cxl_region *cxlr, int pos)
2010b9686e8cSDan Williams {
2011b9686e8cSDan Williams struct cxl_region_params *p = &cxlr->params;
2012b9686e8cSDan Williams int rc;
2013b9686e8cSDan Williams
2014b9686e8cSDan Williams rc = down_write_killable(&cxl_region_rwsem);
2015b9686e8cSDan Williams if (rc)
2016b9686e8cSDan Williams return rc;
2017b9686e8cSDan Williams
2018b9686e8cSDan Williams if (pos >= p->interleave_ways) {
2019b9686e8cSDan Williams dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
2020b9686e8cSDan Williams p->interleave_ways);
2021b9686e8cSDan Williams rc = -ENXIO;
2022b9686e8cSDan Williams goto out;
2023b9686e8cSDan Williams }
2024b9686e8cSDan Williams
2025b9686e8cSDan Williams if (!p->targets[pos]) {
2026b9686e8cSDan Williams rc = 0;
2027b9686e8cSDan Williams goto out;
2028b9686e8cSDan Williams }
2029b9686e8cSDan Williams
2030176baefbSDan Williams rc = cxl_region_detach(p->targets[pos]);
2031b9686e8cSDan Williams out:
2032b9686e8cSDan Williams up_write(&cxl_region_rwsem);
2033b9686e8cSDan Williams return rc;
2034b9686e8cSDan Williams }
2035b9686e8cSDan Williams
store_targetN(struct cxl_region * cxlr,const char * buf,int pos,size_t len)2036b9686e8cSDan Williams static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
2037b9686e8cSDan Williams size_t len)
2038b9686e8cSDan Williams {
2039b9686e8cSDan Williams int rc;
2040b9686e8cSDan Williams
2041b9686e8cSDan Williams if (sysfs_streq(buf, "\n"))
2042b9686e8cSDan Williams rc = detach_target(cxlr, pos);
20433528b1e1SDan Williams else {
20443528b1e1SDan Williams struct device *dev;
20453528b1e1SDan Williams
20463528b1e1SDan Williams dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
20473528b1e1SDan Williams if (!dev)
20483528b1e1SDan Williams return -ENODEV;
20493528b1e1SDan Williams
20503528b1e1SDan Williams if (!is_endpoint_decoder(dev)) {
20513528b1e1SDan Williams rc = -EINVAL;
20523528b1e1SDan Williams goto out;
20533528b1e1SDan Williams }
20543528b1e1SDan Williams
20553528b1e1SDan Williams rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
20563528b1e1SDan Williams TASK_INTERRUPTIBLE);
20573528b1e1SDan Williams out:
20583528b1e1SDan Williams put_device(dev);
20593528b1e1SDan Williams }
2060b9686e8cSDan Williams
2061b9686e8cSDan Williams if (rc < 0)
2062b9686e8cSDan Williams return rc;
2063b9686e8cSDan Williams return len;
2064b9686e8cSDan Williams }
2065b9686e8cSDan Williams
2066b9686e8cSDan Williams #define TARGET_ATTR_RW(n) \
2067b9686e8cSDan Williams static ssize_t target##n##_show( \
2068b9686e8cSDan Williams struct device *dev, struct device_attribute *attr, char *buf) \
2069b9686e8cSDan Williams { \
2070b9686e8cSDan Williams return show_targetN(to_cxl_region(dev), buf, (n)); \
2071b9686e8cSDan Williams } \
2072b9686e8cSDan Williams static ssize_t target##n##_store(struct device *dev, \
2073b9686e8cSDan Williams struct device_attribute *attr, \
2074b9686e8cSDan Williams const char *buf, size_t len) \
2075b9686e8cSDan Williams { \
2076b9686e8cSDan Williams return store_targetN(to_cxl_region(dev), buf, (n), len); \
2077b9686e8cSDan Williams } \
2078b9686e8cSDan Williams static DEVICE_ATTR_RW(target##n)
2079b9686e8cSDan Williams
2080b9686e8cSDan Williams TARGET_ATTR_RW(0);
2081b9686e8cSDan Williams TARGET_ATTR_RW(1);
2082b9686e8cSDan Williams TARGET_ATTR_RW(2);
2083b9686e8cSDan Williams TARGET_ATTR_RW(3);
2084b9686e8cSDan Williams TARGET_ATTR_RW(4);
2085b9686e8cSDan Williams TARGET_ATTR_RW(5);
2086b9686e8cSDan Williams TARGET_ATTR_RW(6);
2087b9686e8cSDan Williams TARGET_ATTR_RW(7);
2088b9686e8cSDan Williams TARGET_ATTR_RW(8);
2089b9686e8cSDan Williams TARGET_ATTR_RW(9);
2090b9686e8cSDan Williams TARGET_ATTR_RW(10);
2091b9686e8cSDan Williams TARGET_ATTR_RW(11);
2092b9686e8cSDan Williams TARGET_ATTR_RW(12);
2093b9686e8cSDan Williams TARGET_ATTR_RW(13);
2094b9686e8cSDan Williams TARGET_ATTR_RW(14);
2095b9686e8cSDan Williams TARGET_ATTR_RW(15);
2096b9686e8cSDan Williams
2097b9686e8cSDan Williams static struct attribute *target_attrs[] = {
2098b9686e8cSDan Williams &dev_attr_target0.attr,
2099b9686e8cSDan Williams &dev_attr_target1.attr,
2100b9686e8cSDan Williams &dev_attr_target2.attr,
2101b9686e8cSDan Williams &dev_attr_target3.attr,
2102b9686e8cSDan Williams &dev_attr_target4.attr,
2103b9686e8cSDan Williams &dev_attr_target5.attr,
2104b9686e8cSDan Williams &dev_attr_target6.attr,
2105b9686e8cSDan Williams &dev_attr_target7.attr,
2106b9686e8cSDan Williams &dev_attr_target8.attr,
2107b9686e8cSDan Williams &dev_attr_target9.attr,
2108b9686e8cSDan Williams &dev_attr_target10.attr,
2109b9686e8cSDan Williams &dev_attr_target11.attr,
2110b9686e8cSDan Williams &dev_attr_target12.attr,
2111b9686e8cSDan Williams &dev_attr_target13.attr,
2112b9686e8cSDan Williams &dev_attr_target14.attr,
2113b9686e8cSDan Williams &dev_attr_target15.attr,
2114b9686e8cSDan Williams NULL,
2115b9686e8cSDan Williams };
2116b9686e8cSDan Williams
cxl_region_target_visible(struct kobject * kobj,struct attribute * a,int n)2117b9686e8cSDan Williams static umode_t cxl_region_target_visible(struct kobject *kobj,
2118b9686e8cSDan Williams struct attribute *a, int n)
2119b9686e8cSDan Williams {
2120b9686e8cSDan Williams struct device *dev = kobj_to_dev(kobj);
2121b9686e8cSDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
2122b9686e8cSDan Williams struct cxl_region_params *p = &cxlr->params;
2123b9686e8cSDan Williams
2124b9686e8cSDan Williams if (n < p->interleave_ways)
2125b9686e8cSDan Williams return a->mode;
2126b9686e8cSDan Williams return 0;
2127b9686e8cSDan Williams }
2128b9686e8cSDan Williams
2129b9686e8cSDan Williams static const struct attribute_group cxl_region_target_group = {
2130b9686e8cSDan Williams .attrs = target_attrs,
2131b9686e8cSDan Williams .is_visible = cxl_region_target_visible,
2132b9686e8cSDan Williams };
2133b9686e8cSDan Williams
get_cxl_region_target_group(void)2134b9686e8cSDan Williams static const struct attribute_group *get_cxl_region_target_group(void)
2135b9686e8cSDan Williams {
2136b9686e8cSDan Williams return &cxl_region_target_group;
2137b9686e8cSDan Williams }
2138b9686e8cSDan Williams
2139dd5ba0ebSBen Widawsky static const struct attribute_group *region_groups[] = {
2140dd5ba0ebSBen Widawsky &cxl_base_attribute_group,
2141dd5ba0ebSBen Widawsky &cxl_region_group,
2142b9686e8cSDan Williams &cxl_region_target_group,
2143dd5ba0ebSBen Widawsky NULL,
2144dd5ba0ebSBen Widawsky };
2145dd5ba0ebSBen Widawsky
cxl_region_release(struct device * dev)2146779dd20cSBen Widawsky static void cxl_region_release(struct device *dev)
2147779dd20cSBen Widawsky {
21488f401ec1SDan Williams struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2149779dd20cSBen Widawsky struct cxl_region *cxlr = to_cxl_region(dev);
21508f401ec1SDan Williams int id = atomic_read(&cxlrd->region_id);
21518f401ec1SDan Williams
21528f401ec1SDan Williams /*
21538f401ec1SDan Williams * Try to reuse the recently idled id rather than the cached
21548f401ec1SDan Williams * next id to prevent the region id space from increasing
21558f401ec1SDan Williams * unnecessarily.
21568f401ec1SDan Williams */
21578f401ec1SDan Williams if (cxlr->id < id)
21588f401ec1SDan Williams if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
21598f401ec1SDan Williams memregion_free(id);
21608f401ec1SDan Williams goto out;
21618f401ec1SDan Williams }
2162779dd20cSBen Widawsky
2163779dd20cSBen Widawsky memregion_free(cxlr->id);
21648f401ec1SDan Williams out:
21658f401ec1SDan Williams put_device(dev->parent);
2166779dd20cSBen Widawsky kfree(cxlr);
2167779dd20cSBen Widawsky }
2168779dd20cSBen Widawsky
21698d48817dSDan Williams const struct device_type cxl_region_type = {
2170779dd20cSBen Widawsky .name = "cxl_region",
2171779dd20cSBen Widawsky .release = cxl_region_release,
2172dd5ba0ebSBen Widawsky .groups = region_groups
2173779dd20cSBen Widawsky };
2174779dd20cSBen Widawsky
is_cxl_region(struct device * dev)2175779dd20cSBen Widawsky bool is_cxl_region(struct device *dev)
2176779dd20cSBen Widawsky {
2177779dd20cSBen Widawsky return dev->type == &cxl_region_type;
2178779dd20cSBen Widawsky }
2179779dd20cSBen Widawsky EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2180779dd20cSBen Widawsky
to_cxl_region(struct device * dev)2181779dd20cSBen Widawsky static struct cxl_region *to_cxl_region(struct device *dev)
2182779dd20cSBen Widawsky {
2183779dd20cSBen Widawsky if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2184779dd20cSBen Widawsky "not a cxl_region device\n"))
2185779dd20cSBen Widawsky return NULL;
2186779dd20cSBen Widawsky
2187779dd20cSBen Widawsky return container_of(dev, struct cxl_region, dev);
2188779dd20cSBen Widawsky }
2189779dd20cSBen Widawsky
unregister_region(void * dev)2190779dd20cSBen Widawsky static void unregister_region(void *dev)
2191779dd20cSBen Widawsky {
219223a22cd1SDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
21930d9e7340SDan Williams struct cxl_region_params *p = &cxlr->params;
21940d9e7340SDan Williams int i;
219523a22cd1SDan Williams
219623a22cd1SDan Williams device_del(dev);
21970d9e7340SDan Williams
21980d9e7340SDan Williams /*
21990d9e7340SDan Williams * Now that region sysfs is shutdown, the parameter block is now
22000d9e7340SDan Williams * read-only, so no need to hold the region rwsem to access the
22010d9e7340SDan Williams * region parameters.
22020d9e7340SDan Williams */
22030d9e7340SDan Williams for (i = 0; i < p->interleave_ways; i++)
22040d9e7340SDan Williams detach_target(cxlr, i);
22050d9e7340SDan Williams
220623a22cd1SDan Williams cxl_region_iomem_release(cxlr);
220723a22cd1SDan Williams put_device(dev);
2208779dd20cSBen Widawsky }
2209779dd20cSBen Widawsky
2210779dd20cSBen Widawsky static struct lock_class_key cxl_region_key;
2211779dd20cSBen Widawsky
cxl_region_alloc(struct cxl_root_decoder * cxlrd,int id)2212779dd20cSBen Widawsky static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2213779dd20cSBen Widawsky {
2214779dd20cSBen Widawsky struct cxl_region *cxlr;
2215779dd20cSBen Widawsky struct device *dev;
2216779dd20cSBen Widawsky
2217779dd20cSBen Widawsky cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2218779dd20cSBen Widawsky if (!cxlr) {
2219779dd20cSBen Widawsky memregion_free(id);
2220779dd20cSBen Widawsky return ERR_PTR(-ENOMEM);
2221779dd20cSBen Widawsky }
2222779dd20cSBen Widawsky
2223779dd20cSBen Widawsky dev = &cxlr->dev;
2224779dd20cSBen Widawsky device_initialize(dev);
2225779dd20cSBen Widawsky lockdep_set_class(&dev->mutex, &cxl_region_key);
2226779dd20cSBen Widawsky dev->parent = &cxlrd->cxlsd.cxld.dev;
22278f401ec1SDan Williams /*
22288f401ec1SDan Williams * Keep root decoder pinned through cxl_region_release to fixup
22298f401ec1SDan Williams * region id allocations
22308f401ec1SDan Williams */
22318f401ec1SDan Williams get_device(dev->parent);
2232779dd20cSBen Widawsky device_set_pm_not_required(dev);
2233779dd20cSBen Widawsky dev->bus = &cxl_bus_type;
2234779dd20cSBen Widawsky dev->type = &cxl_region_type;
2235779dd20cSBen Widawsky cxlr->id = id;
2236779dd20cSBen Widawsky
2237779dd20cSBen Widawsky return cxlr;
2238779dd20cSBen Widawsky }
2239779dd20cSBen Widawsky
2240779dd20cSBen Widawsky /**
2241779dd20cSBen Widawsky * devm_cxl_add_region - Adds a region to a decoder
2242779dd20cSBen Widawsky * @cxlrd: root decoder
2243779dd20cSBen Widawsky * @id: memregion id to create, or memregion_free() on failure
2244779dd20cSBen Widawsky * @mode: mode for the endpoint decoders of this region
2245779dd20cSBen Widawsky * @type: select whether this is an expander or accelerator (type-2 or type-3)
2246779dd20cSBen Widawsky *
2247779dd20cSBen Widawsky * This is the second step of region initialization. Regions exist within an
2248779dd20cSBen Widawsky * address space which is mapped by a @cxlrd.
2249779dd20cSBen Widawsky *
2250779dd20cSBen Widawsky * Return: 0 if the region was added to the @cxlrd, else returns negative error
2251779dd20cSBen Widawsky * code. The region will be named "regionZ" where Z is the unique region number.
2252779dd20cSBen Widawsky */
devm_cxl_add_region(struct cxl_root_decoder * cxlrd,int id,enum cxl_decoder_mode mode,enum cxl_decoder_type type)2253779dd20cSBen Widawsky static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2254779dd20cSBen Widawsky int id,
2255779dd20cSBen Widawsky enum cxl_decoder_mode mode,
2256779dd20cSBen Widawsky enum cxl_decoder_type type)
2257779dd20cSBen Widawsky {
2258779dd20cSBen Widawsky struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2259779dd20cSBen Widawsky struct cxl_region *cxlr;
2260779dd20cSBen Widawsky struct device *dev;
2261779dd20cSBen Widawsky int rc;
2262779dd20cSBen Widawsky
2263779dd20cSBen Widawsky cxlr = cxl_region_alloc(cxlrd, id);
2264779dd20cSBen Widawsky if (IS_ERR(cxlr))
2265779dd20cSBen Widawsky return cxlr;
2266779dd20cSBen Widawsky cxlr->mode = mode;
2267779dd20cSBen Widawsky cxlr->type = type;
2268779dd20cSBen Widawsky
2269779dd20cSBen Widawsky dev = &cxlr->dev;
2270779dd20cSBen Widawsky rc = dev_set_name(dev, "region%d", id);
2271779dd20cSBen Widawsky if (rc)
2272779dd20cSBen Widawsky goto err;
2273779dd20cSBen Widawsky
2274779dd20cSBen Widawsky rc = device_add(dev);
2275779dd20cSBen Widawsky if (rc)
2276779dd20cSBen Widawsky goto err;
2277779dd20cSBen Widawsky
22787481653dSDan Williams rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2279779dd20cSBen Widawsky if (rc)
2280779dd20cSBen Widawsky return ERR_PTR(rc);
2281779dd20cSBen Widawsky
22827481653dSDan Williams dev_dbg(port->uport_dev, "%s: created %s\n",
2283779dd20cSBen Widawsky dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2284779dd20cSBen Widawsky return cxlr;
2285779dd20cSBen Widawsky
2286779dd20cSBen Widawsky err:
2287779dd20cSBen Widawsky put_device(dev);
2288779dd20cSBen Widawsky return ERR_PTR(rc);
2289779dd20cSBen Widawsky }
2290779dd20cSBen Widawsky
__create_region_show(struct cxl_root_decoder * cxlrd,char * buf)22916e099264SDan Williams static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
22926e099264SDan Williams {
22936e099264SDan Williams return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
22946e099264SDan Williams }
22956e099264SDan Williams
create_pmem_region_show(struct device * dev,struct device_attribute * attr,char * buf)2296779dd20cSBen Widawsky static ssize_t create_pmem_region_show(struct device *dev,
2297779dd20cSBen Widawsky struct device_attribute *attr, char *buf)
2298779dd20cSBen Widawsky {
22996e099264SDan Williams return __create_region_show(to_cxl_root_decoder(dev), buf);
23006e099264SDan Williams }
2301779dd20cSBen Widawsky
create_ram_region_show(struct device * dev,struct device_attribute * attr,char * buf)23026e099264SDan Williams static ssize_t create_ram_region_show(struct device *dev,
23036e099264SDan Williams struct device_attribute *attr, char *buf)
23046e099264SDan Williams {
23056e099264SDan Williams return __create_region_show(to_cxl_root_decoder(dev), buf);
23066e099264SDan Williams }
23076e099264SDan Williams
__create_region(struct cxl_root_decoder * cxlrd,enum cxl_decoder_mode mode,int id)23086e099264SDan Williams static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
23096e099264SDan Williams enum cxl_decoder_mode mode, int id)
23106e099264SDan Williams {
23116e099264SDan Williams int rc;
23126e099264SDan Williams
2313d8316838SLi Zhijian switch (mode) {
2314d8316838SLi Zhijian case CXL_DECODER_RAM:
2315d8316838SLi Zhijian case CXL_DECODER_PMEM:
2316d8316838SLi Zhijian break;
2317d8316838SLi Zhijian default:
2318d8316838SLi Zhijian dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2319d8316838SLi Zhijian return ERR_PTR(-EINVAL);
2320d8316838SLi Zhijian }
2321d8316838SLi Zhijian
23226e099264SDan Williams rc = memregion_alloc(GFP_KERNEL);
23236e099264SDan Williams if (rc < 0)
23246e099264SDan Williams return ERR_PTR(rc);
23256e099264SDan Williams
23266e099264SDan Williams if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
23276e099264SDan Williams memregion_free(rc);
23286e099264SDan Williams return ERR_PTR(-EBUSY);
23296e099264SDan Williams }
23306e099264SDan Williams
23315aa39a91SDan Williams return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2332779dd20cSBen Widawsky }
2333779dd20cSBen Widawsky
create_pmem_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2334779dd20cSBen Widawsky static ssize_t create_pmem_region_store(struct device *dev,
2335779dd20cSBen Widawsky struct device_attribute *attr,
2336779dd20cSBen Widawsky const char *buf, size_t len)
2337779dd20cSBen Widawsky {
2338779dd20cSBen Widawsky struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2339779dd20cSBen Widawsky struct cxl_region *cxlr;
23406e099264SDan Williams int rc, id;
2341779dd20cSBen Widawsky
2342779dd20cSBen Widawsky rc = sscanf(buf, "region%d\n", &id);
2343779dd20cSBen Widawsky if (rc != 1)
2344779dd20cSBen Widawsky return -EINVAL;
2345779dd20cSBen Widawsky
23466e099264SDan Williams cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2347779dd20cSBen Widawsky if (IS_ERR(cxlr))
2348779dd20cSBen Widawsky return PTR_ERR(cxlr);
2349779dd20cSBen Widawsky
2350779dd20cSBen Widawsky return len;
2351779dd20cSBen Widawsky }
2352779dd20cSBen Widawsky DEVICE_ATTR_RW(create_pmem_region);
2353779dd20cSBen Widawsky
create_ram_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)23546e099264SDan Williams static ssize_t create_ram_region_store(struct device *dev,
23556e099264SDan Williams struct device_attribute *attr,
23566e099264SDan Williams const char *buf, size_t len)
23576e099264SDan Williams {
23586e099264SDan Williams struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
23596e099264SDan Williams struct cxl_region *cxlr;
23606e099264SDan Williams int rc, id;
23616e099264SDan Williams
23626e099264SDan Williams rc = sscanf(buf, "region%d\n", &id);
23636e099264SDan Williams if (rc != 1)
23646e099264SDan Williams return -EINVAL;
23656e099264SDan Williams
23666e099264SDan Williams cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
23676e099264SDan Williams if (IS_ERR(cxlr))
23686e099264SDan Williams return PTR_ERR(cxlr);
23696e099264SDan Williams
23706e099264SDan Williams return len;
23716e099264SDan Williams }
23726e099264SDan Williams DEVICE_ATTR_RW(create_ram_region);
23736e099264SDan Williams
region_show(struct device * dev,struct device_attribute * attr,char * buf)2374b9686e8cSDan Williams static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2375b9686e8cSDan Williams char *buf)
2376b9686e8cSDan Williams {
2377b9686e8cSDan Williams struct cxl_decoder *cxld = to_cxl_decoder(dev);
2378b9686e8cSDan Williams ssize_t rc;
2379b9686e8cSDan Williams
2380b9686e8cSDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
2381b9686e8cSDan Williams if (rc)
2382b9686e8cSDan Williams return rc;
2383b9686e8cSDan Williams
2384b9686e8cSDan Williams if (cxld->region)
2385b9686e8cSDan Williams rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2386b9686e8cSDan Williams else
2387b9686e8cSDan Williams rc = sysfs_emit(buf, "\n");
2388b9686e8cSDan Williams up_read(&cxl_region_rwsem);
2389b9686e8cSDan Williams
2390b9686e8cSDan Williams return rc;
2391b9686e8cSDan Williams }
2392b9686e8cSDan Williams DEVICE_ATTR_RO(region);
2393b9686e8cSDan Williams
2394779dd20cSBen Widawsky static struct cxl_region *
cxl_find_region_by_name(struct cxl_root_decoder * cxlrd,const char * name)2395779dd20cSBen Widawsky cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2396779dd20cSBen Widawsky {
2397779dd20cSBen Widawsky struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2398779dd20cSBen Widawsky struct device *region_dev;
2399779dd20cSBen Widawsky
2400779dd20cSBen Widawsky region_dev = device_find_child_by_name(&cxld->dev, name);
2401779dd20cSBen Widawsky if (!region_dev)
2402779dd20cSBen Widawsky return ERR_PTR(-ENODEV);
2403779dd20cSBen Widawsky
2404779dd20cSBen Widawsky return to_cxl_region(region_dev);
2405779dd20cSBen Widawsky }
2406779dd20cSBen Widawsky
delete_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2407779dd20cSBen Widawsky static ssize_t delete_region_store(struct device *dev,
2408779dd20cSBen Widawsky struct device_attribute *attr,
2409779dd20cSBen Widawsky const char *buf, size_t len)
2410779dd20cSBen Widawsky {
2411779dd20cSBen Widawsky struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2412779dd20cSBen Widawsky struct cxl_port *port = to_cxl_port(dev->parent);
2413779dd20cSBen Widawsky struct cxl_region *cxlr;
2414779dd20cSBen Widawsky
2415779dd20cSBen Widawsky cxlr = cxl_find_region_by_name(cxlrd, buf);
2416779dd20cSBen Widawsky if (IS_ERR(cxlr))
2417779dd20cSBen Widawsky return PTR_ERR(cxlr);
2418779dd20cSBen Widawsky
24197481653dSDan Williams devm_release_action(port->uport_dev, unregister_region, cxlr);
2420779dd20cSBen Widawsky put_device(&cxlr->dev);
2421779dd20cSBen Widawsky
2422779dd20cSBen Widawsky return len;
2423779dd20cSBen Widawsky }
2424779dd20cSBen Widawsky DEVICE_ATTR_WO(delete_region);
242523a22cd1SDan Williams
cxl_pmem_region_release(struct device * dev)242604ad63f0SDan Williams static void cxl_pmem_region_release(struct device *dev)
242704ad63f0SDan Williams {
242804ad63f0SDan Williams struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
242904ad63f0SDan Williams int i;
243004ad63f0SDan Williams
243104ad63f0SDan Williams for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
243204ad63f0SDan Williams struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
243304ad63f0SDan Williams
243404ad63f0SDan Williams put_device(&cxlmd->dev);
243504ad63f0SDan Williams }
243604ad63f0SDan Williams
243704ad63f0SDan Williams kfree(cxlr_pmem);
243804ad63f0SDan Williams }
243904ad63f0SDan Williams
244004ad63f0SDan Williams static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
244104ad63f0SDan Williams &cxl_base_attribute_group,
244204ad63f0SDan Williams NULL,
244304ad63f0SDan Williams };
244404ad63f0SDan Williams
244504ad63f0SDan Williams const struct device_type cxl_pmem_region_type = {
244604ad63f0SDan Williams .name = "cxl_pmem_region",
244704ad63f0SDan Williams .release = cxl_pmem_region_release,
244804ad63f0SDan Williams .groups = cxl_pmem_region_attribute_groups,
244904ad63f0SDan Williams };
245004ad63f0SDan Williams
is_cxl_pmem_region(struct device * dev)245104ad63f0SDan Williams bool is_cxl_pmem_region(struct device *dev)
245204ad63f0SDan Williams {
245304ad63f0SDan Williams return dev->type == &cxl_pmem_region_type;
245404ad63f0SDan Williams }
245504ad63f0SDan Williams EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
245604ad63f0SDan Williams
to_cxl_pmem_region(struct device * dev)245704ad63f0SDan Williams struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
245804ad63f0SDan Williams {
245904ad63f0SDan Williams if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
246004ad63f0SDan Williams "not a cxl_pmem_region device\n"))
246104ad63f0SDan Williams return NULL;
246204ad63f0SDan Williams return container_of(dev, struct cxl_pmem_region, dev);
246304ad63f0SDan Williams }
246404ad63f0SDan Williams EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
246504ad63f0SDan Williams
2466f0832a58SAlison Schofield struct cxl_poison_context {
2467f0832a58SAlison Schofield struct cxl_port *port;
2468f0832a58SAlison Schofield enum cxl_decoder_mode mode;
2469f0832a58SAlison Schofield u64 offset;
2470f0832a58SAlison Schofield };
2471f0832a58SAlison Schofield
cxl_get_poison_unmapped(struct cxl_memdev * cxlmd,struct cxl_poison_context * ctx)2472f0832a58SAlison Schofield static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2473f0832a58SAlison Schofield struct cxl_poison_context *ctx)
2474f0832a58SAlison Schofield {
2475f0832a58SAlison Schofield struct cxl_dev_state *cxlds = cxlmd->cxlds;
2476f0832a58SAlison Schofield u64 offset, length;
2477f0832a58SAlison Schofield int rc = 0;
2478f0832a58SAlison Schofield
2479f0832a58SAlison Schofield /*
2480f0832a58SAlison Schofield * Collect poison for the remaining unmapped resources
2481f0832a58SAlison Schofield * after poison is collected by committed endpoints.
2482f0832a58SAlison Schofield *
2483f0832a58SAlison Schofield * Knowing that PMEM must always follow RAM, get poison
2484f0832a58SAlison Schofield * for unmapped resources based on the last decoder's mode:
2485f0832a58SAlison Schofield * ram: scan remains of ram range, then any pmem range
2486f0832a58SAlison Schofield * pmem: scan remains of pmem range
2487f0832a58SAlison Schofield */
2488f0832a58SAlison Schofield
2489f0832a58SAlison Schofield if (ctx->mode == CXL_DECODER_RAM) {
2490f0832a58SAlison Schofield offset = ctx->offset;
2491f0832a58SAlison Schofield length = resource_size(&cxlds->ram_res) - offset;
2492f0832a58SAlison Schofield rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2493f0832a58SAlison Schofield if (rc == -EFAULT)
2494f0832a58SAlison Schofield rc = 0;
2495f0832a58SAlison Schofield if (rc)
2496f0832a58SAlison Schofield return rc;
2497f0832a58SAlison Schofield }
2498f0832a58SAlison Schofield if (ctx->mode == CXL_DECODER_PMEM) {
2499f0832a58SAlison Schofield offset = ctx->offset;
2500f0832a58SAlison Schofield length = resource_size(&cxlds->dpa_res) - offset;
2501f0832a58SAlison Schofield if (!length)
2502f0832a58SAlison Schofield return 0;
2503f0832a58SAlison Schofield } else if (resource_size(&cxlds->pmem_res)) {
2504f0832a58SAlison Schofield offset = cxlds->pmem_res.start;
2505f0832a58SAlison Schofield length = resource_size(&cxlds->pmem_res);
2506f0832a58SAlison Schofield } else {
2507f0832a58SAlison Schofield return 0;
2508f0832a58SAlison Schofield }
2509f0832a58SAlison Schofield
2510f0832a58SAlison Schofield return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2511f0832a58SAlison Schofield }
2512f0832a58SAlison Schofield
poison_by_decoder(struct device * dev,void * arg)2513f0832a58SAlison Schofield static int poison_by_decoder(struct device *dev, void *arg)
2514f0832a58SAlison Schofield {
2515f0832a58SAlison Schofield struct cxl_poison_context *ctx = arg;
2516f0832a58SAlison Schofield struct cxl_endpoint_decoder *cxled;
2517f0832a58SAlison Schofield struct cxl_memdev *cxlmd;
2518f0832a58SAlison Schofield u64 offset, length;
2519f0832a58SAlison Schofield int rc = 0;
2520f0832a58SAlison Schofield
2521f0832a58SAlison Schofield if (!is_endpoint_decoder(dev))
2522f0832a58SAlison Schofield return rc;
2523f0832a58SAlison Schofield
2524f0832a58SAlison Schofield cxled = to_cxl_endpoint_decoder(dev);
2525f0832a58SAlison Schofield if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2526f0832a58SAlison Schofield return rc;
2527f0832a58SAlison Schofield
2528f0832a58SAlison Schofield /*
2529f0832a58SAlison Schofield * Regions are only created with single mode decoders: pmem or ram.
2530f0832a58SAlison Schofield * Linux does not support mixed mode decoders. This means that
2531f0832a58SAlison Schofield * reading poison per endpoint decoder adheres to the requirement
2532f0832a58SAlison Schofield * that poison reads of pmem and ram must be separated.
2533f0832a58SAlison Schofield * CXL 3.0 Spec 8.2.9.8.4.1
2534f0832a58SAlison Schofield */
2535f0832a58SAlison Schofield if (cxled->mode == CXL_DECODER_MIXED) {
2536f0832a58SAlison Schofield dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2537f0832a58SAlison Schofield return rc;
2538f0832a58SAlison Schofield }
2539f0832a58SAlison Schofield
2540f0832a58SAlison Schofield cxlmd = cxled_to_memdev(cxled);
2541f0832a58SAlison Schofield if (cxled->skip) {
2542f0832a58SAlison Schofield offset = cxled->dpa_res->start - cxled->skip;
2543f0832a58SAlison Schofield length = cxled->skip;
2544f0832a58SAlison Schofield rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2545f0832a58SAlison Schofield if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2546f0832a58SAlison Schofield rc = 0;
2547f0832a58SAlison Schofield if (rc)
2548f0832a58SAlison Schofield return rc;
2549f0832a58SAlison Schofield }
2550f0832a58SAlison Schofield
2551f0832a58SAlison Schofield offset = cxled->dpa_res->start;
2552f0832a58SAlison Schofield length = cxled->dpa_res->end - offset + 1;
2553f0832a58SAlison Schofield rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2554f0832a58SAlison Schofield if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2555f0832a58SAlison Schofield rc = 0;
2556f0832a58SAlison Schofield if (rc)
2557f0832a58SAlison Schofield return rc;
2558f0832a58SAlison Schofield
2559f0832a58SAlison Schofield /* Iterate until commit_end is reached */
2560f0832a58SAlison Schofield if (cxled->cxld.id == ctx->port->commit_end) {
2561f0832a58SAlison Schofield ctx->offset = cxled->dpa_res->end + 1;
2562f0832a58SAlison Schofield ctx->mode = cxled->mode;
2563f0832a58SAlison Schofield return 1;
2564f0832a58SAlison Schofield }
2565f0832a58SAlison Schofield
2566f0832a58SAlison Schofield return 0;
2567f0832a58SAlison Schofield }
2568f0832a58SAlison Schofield
cxl_get_poison_by_endpoint(struct cxl_port * port)2569f0832a58SAlison Schofield int cxl_get_poison_by_endpoint(struct cxl_port *port)
2570f0832a58SAlison Schofield {
2571f0832a58SAlison Schofield struct cxl_poison_context ctx;
2572f0832a58SAlison Schofield int rc = 0;
2573f0832a58SAlison Schofield
2574f0832a58SAlison Schofield ctx = (struct cxl_poison_context) {
2575f0832a58SAlison Schofield .port = port
2576f0832a58SAlison Schofield };
2577f0832a58SAlison Schofield
2578f0832a58SAlison Schofield rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2579f0832a58SAlison Schofield if (rc == 1)
25807481653dSDan Williams rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
25817481653dSDan Williams &ctx);
2582f0832a58SAlison Schofield
2583f0832a58SAlison Schofield return rc;
2584f0832a58SAlison Schofield }
2585f0832a58SAlison Schofield
2586f12be1a1SAlison Schofield struct cxl_dpa_to_region_context {
2587f12be1a1SAlison Schofield struct cxl_region *cxlr;
2588f12be1a1SAlison Schofield u64 dpa;
2589f12be1a1SAlison Schofield };
2590f12be1a1SAlison Schofield
__cxl_dpa_to_region(struct device * dev,void * arg)2591f12be1a1SAlison Schofield static int __cxl_dpa_to_region(struct device *dev, void *arg)
2592f12be1a1SAlison Schofield {
2593f12be1a1SAlison Schofield struct cxl_dpa_to_region_context *ctx = arg;
2594f12be1a1SAlison Schofield struct cxl_endpoint_decoder *cxled;
2595a9e099e2SAlison Schofield struct cxl_region *cxlr;
2596f12be1a1SAlison Schofield u64 dpa = ctx->dpa;
2597f12be1a1SAlison Schofield
2598f12be1a1SAlison Schofield if (!is_endpoint_decoder(dev))
2599f12be1a1SAlison Schofield return 0;
2600f12be1a1SAlison Schofield
2601f12be1a1SAlison Schofield cxled = to_cxl_endpoint_decoder(dev);
2602a9e099e2SAlison Schofield if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
2603f12be1a1SAlison Schofield return 0;
2604f12be1a1SAlison Schofield
2605f12be1a1SAlison Schofield if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
2606f12be1a1SAlison Schofield return 0;
2607f12be1a1SAlison Schofield
2608a9e099e2SAlison Schofield /*
2609a9e099e2SAlison Schofield * Stop the region search (return 1) when an endpoint mapping is
2610a9e099e2SAlison Schofield * found. The region may not be fully constructed so offering
2611a9e099e2SAlison Schofield * the cxlr in the context structure is not guaranteed.
2612a9e099e2SAlison Schofield */
2613a9e099e2SAlison Schofield cxlr = cxled->cxld.region;
2614a9e099e2SAlison Schofield if (cxlr)
2615f12be1a1SAlison Schofield dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
2616a9e099e2SAlison Schofield dev_name(&cxlr->dev));
2617a9e099e2SAlison Schofield else
2618a9e099e2SAlison Schofield dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
2619a9e099e2SAlison Schofield dev_name(dev));
2620f12be1a1SAlison Schofield
2621a9e099e2SAlison Schofield ctx->cxlr = cxlr;
2622f12be1a1SAlison Schofield
2623f12be1a1SAlison Schofield return 1;
2624f12be1a1SAlison Schofield }
2625f12be1a1SAlison Schofield
cxl_dpa_to_region(const struct cxl_memdev * cxlmd,u64 dpa)2626f12be1a1SAlison Schofield struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
2627f12be1a1SAlison Schofield {
2628f12be1a1SAlison Schofield struct cxl_dpa_to_region_context ctx;
2629f12be1a1SAlison Schofield struct cxl_port *port;
2630f12be1a1SAlison Schofield
2631f12be1a1SAlison Schofield ctx = (struct cxl_dpa_to_region_context) {
2632f12be1a1SAlison Schofield .dpa = dpa,
2633f12be1a1SAlison Schofield };
2634f12be1a1SAlison Schofield port = cxlmd->endpoint;
2635f12be1a1SAlison Schofield if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
2636f12be1a1SAlison Schofield device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
2637f12be1a1SAlison Schofield
2638f12be1a1SAlison Schofield return ctx.cxlr;
2639f12be1a1SAlison Schofield }
2640f12be1a1SAlison Schofield
264104ad63f0SDan Williams static struct lock_class_key cxl_pmem_region_key;
264204ad63f0SDan Williams
cxl_pmem_region_alloc(struct cxl_region * cxlr)264304ad63f0SDan Williams static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
264404ad63f0SDan Williams {
264504ad63f0SDan Williams struct cxl_region_params *p = &cxlr->params;
2646f17b558dSDan Williams struct cxl_nvdimm_bridge *cxl_nvb;
264704ad63f0SDan Williams struct cxl_pmem_region *cxlr_pmem;
264804ad63f0SDan Williams struct device *dev;
264904ad63f0SDan Williams int i;
265004ad63f0SDan Williams
265104ad63f0SDan Williams down_read(&cxl_region_rwsem);
265204ad63f0SDan Williams if (p->state != CXL_CONFIG_COMMIT) {
265304ad63f0SDan Williams cxlr_pmem = ERR_PTR(-ENXIO);
265404ad63f0SDan Williams goto out;
265504ad63f0SDan Williams }
265604ad63f0SDan Williams
265704ad63f0SDan Williams cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
265804ad63f0SDan Williams GFP_KERNEL);
265904ad63f0SDan Williams if (!cxlr_pmem) {
266004ad63f0SDan Williams cxlr_pmem = ERR_PTR(-ENOMEM);
266104ad63f0SDan Williams goto out;
266204ad63f0SDan Williams }
266304ad63f0SDan Williams
266404ad63f0SDan Williams cxlr_pmem->hpa_range.start = p->res->start;
266504ad63f0SDan Williams cxlr_pmem->hpa_range.end = p->res->end;
266604ad63f0SDan Williams
266704ad63f0SDan Williams /* Snapshot the region configuration underneath the cxl_region_rwsem */
266804ad63f0SDan Williams cxlr_pmem->nr_mappings = p->nr_targets;
266904ad63f0SDan Williams for (i = 0; i < p->nr_targets; i++) {
267004ad63f0SDan Williams struct cxl_endpoint_decoder *cxled = p->targets[i];
267104ad63f0SDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
267204ad63f0SDan Williams struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
267304ad63f0SDan Williams
2674f17b558dSDan Williams /*
2675f17b558dSDan Williams * Regions never span CXL root devices, so by definition the
2676f17b558dSDan Williams * bridge for one device is the same for all.
2677f17b558dSDan Williams */
2678f17b558dSDan Williams if (i == 0) {
2679d35b495dSDan Williams cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
2680f17b558dSDan Williams if (!cxl_nvb) {
268124b9362cSLi Zhijian kfree(cxlr_pmem);
2682f17b558dSDan Williams cxlr_pmem = ERR_PTR(-ENODEV);
2683f17b558dSDan Williams goto out;
2684f17b558dSDan Williams }
2685f17b558dSDan Williams cxlr->cxl_nvb = cxl_nvb;
2686f17b558dSDan Williams }
268704ad63f0SDan Williams m->cxlmd = cxlmd;
268804ad63f0SDan Williams get_device(&cxlmd->dev);
268904ad63f0SDan Williams m->start = cxled->dpa_res->start;
269004ad63f0SDan Williams m->size = resource_size(cxled->dpa_res);
269104ad63f0SDan Williams m->position = i;
269204ad63f0SDan Williams }
269304ad63f0SDan Williams
269404ad63f0SDan Williams dev = &cxlr_pmem->dev;
269504ad63f0SDan Williams cxlr_pmem->cxlr = cxlr;
2696f17b558dSDan Williams cxlr->cxlr_pmem = cxlr_pmem;
269704ad63f0SDan Williams device_initialize(dev);
269804ad63f0SDan Williams lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
269904ad63f0SDan Williams device_set_pm_not_required(dev);
270004ad63f0SDan Williams dev->parent = &cxlr->dev;
270104ad63f0SDan Williams dev->bus = &cxl_bus_type;
270204ad63f0SDan Williams dev->type = &cxl_pmem_region_type;
270304ad63f0SDan Williams out:
270404ad63f0SDan Williams up_read(&cxl_region_rwsem);
270504ad63f0SDan Williams
270604ad63f0SDan Williams return cxlr_pmem;
270704ad63f0SDan Williams }
270804ad63f0SDan Williams
cxl_dax_region_release(struct device * dev)270909d09e04SDan Williams static void cxl_dax_region_release(struct device *dev)
271009d09e04SDan Williams {
271109d09e04SDan Williams struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
271209d09e04SDan Williams
271309d09e04SDan Williams kfree(cxlr_dax);
271409d09e04SDan Williams }
271509d09e04SDan Williams
271609d09e04SDan Williams static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
271709d09e04SDan Williams &cxl_base_attribute_group,
271809d09e04SDan Williams NULL,
271909d09e04SDan Williams };
272009d09e04SDan Williams
272109d09e04SDan Williams const struct device_type cxl_dax_region_type = {
272209d09e04SDan Williams .name = "cxl_dax_region",
272309d09e04SDan Williams .release = cxl_dax_region_release,
272409d09e04SDan Williams .groups = cxl_dax_region_attribute_groups,
272509d09e04SDan Williams };
272609d09e04SDan Williams
is_cxl_dax_region(struct device * dev)272709d09e04SDan Williams static bool is_cxl_dax_region(struct device *dev)
272809d09e04SDan Williams {
272909d09e04SDan Williams return dev->type == &cxl_dax_region_type;
273009d09e04SDan Williams }
273109d09e04SDan Williams
to_cxl_dax_region(struct device * dev)273209d09e04SDan Williams struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
273309d09e04SDan Williams {
273409d09e04SDan Williams if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
273509d09e04SDan Williams "not a cxl_dax_region device\n"))
273609d09e04SDan Williams return NULL;
273709d09e04SDan Williams return container_of(dev, struct cxl_dax_region, dev);
273809d09e04SDan Williams }
273909d09e04SDan Williams EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
274009d09e04SDan Williams
274109d09e04SDan Williams static struct lock_class_key cxl_dax_region_key;
274209d09e04SDan Williams
cxl_dax_region_alloc(struct cxl_region * cxlr)274309d09e04SDan Williams static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
274409d09e04SDan Williams {
274509d09e04SDan Williams struct cxl_region_params *p = &cxlr->params;
274609d09e04SDan Williams struct cxl_dax_region *cxlr_dax;
274709d09e04SDan Williams struct device *dev;
274809d09e04SDan Williams
274909d09e04SDan Williams down_read(&cxl_region_rwsem);
275009d09e04SDan Williams if (p->state != CXL_CONFIG_COMMIT) {
275109d09e04SDan Williams cxlr_dax = ERR_PTR(-ENXIO);
275209d09e04SDan Williams goto out;
275309d09e04SDan Williams }
275409d09e04SDan Williams
275509d09e04SDan Williams cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
275609d09e04SDan Williams if (!cxlr_dax) {
275709d09e04SDan Williams cxlr_dax = ERR_PTR(-ENOMEM);
275809d09e04SDan Williams goto out;
275909d09e04SDan Williams }
276009d09e04SDan Williams
276109d09e04SDan Williams cxlr_dax->hpa_range.start = p->res->start;
276209d09e04SDan Williams cxlr_dax->hpa_range.end = p->res->end;
276309d09e04SDan Williams
276409d09e04SDan Williams dev = &cxlr_dax->dev;
276509d09e04SDan Williams cxlr_dax->cxlr = cxlr;
276609d09e04SDan Williams device_initialize(dev);
276709d09e04SDan Williams lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
276809d09e04SDan Williams device_set_pm_not_required(dev);
276909d09e04SDan Williams dev->parent = &cxlr->dev;
277009d09e04SDan Williams dev->bus = &cxl_bus_type;
277109d09e04SDan Williams dev->type = &cxl_dax_region_type;
277209d09e04SDan Williams out:
277309d09e04SDan Williams up_read(&cxl_region_rwsem);
277409d09e04SDan Williams
277509d09e04SDan Williams return cxlr_dax;
277609d09e04SDan Williams }
277709d09e04SDan Williams
cxlr_pmem_unregister(void * _cxlr_pmem)2778f17b558dSDan Williams static void cxlr_pmem_unregister(void *_cxlr_pmem)
277904ad63f0SDan Williams {
2780f17b558dSDan Williams struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
2781f17b558dSDan Williams struct cxl_region *cxlr = cxlr_pmem->cxlr;
2782f17b558dSDan Williams struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2783f17b558dSDan Williams
2784f17b558dSDan Williams /*
2785f17b558dSDan Williams * Either the bridge is in ->remove() context under the device_lock(),
2786f17b558dSDan Williams * or cxlr_release_nvdimm() is cancelling the bridge's release action
2787f17b558dSDan Williams * for @cxlr_pmem and doing it itself (while manually holding the bridge
2788f17b558dSDan Williams * lock).
2789f17b558dSDan Williams */
2790f17b558dSDan Williams device_lock_assert(&cxl_nvb->dev);
2791f17b558dSDan Williams cxlr->cxlr_pmem = NULL;
2792f17b558dSDan Williams cxlr_pmem->cxlr = NULL;
2793f17b558dSDan Williams device_unregister(&cxlr_pmem->dev);
2794f17b558dSDan Williams }
2795f17b558dSDan Williams
cxlr_release_nvdimm(void * _cxlr)2796f17b558dSDan Williams static void cxlr_release_nvdimm(void *_cxlr)
2797f17b558dSDan Williams {
2798f17b558dSDan Williams struct cxl_region *cxlr = _cxlr;
2799f17b558dSDan Williams struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
2800f17b558dSDan Williams
2801f17b558dSDan Williams device_lock(&cxl_nvb->dev);
2802f17b558dSDan Williams if (cxlr->cxlr_pmem)
2803f17b558dSDan Williams devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
2804f17b558dSDan Williams cxlr->cxlr_pmem);
2805f17b558dSDan Williams device_unlock(&cxl_nvb->dev);
2806f17b558dSDan Williams cxlr->cxl_nvb = NULL;
2807f17b558dSDan Williams put_device(&cxl_nvb->dev);
280804ad63f0SDan Williams }
280904ad63f0SDan Williams
281004ad63f0SDan Williams /**
281104ad63f0SDan Williams * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
281204ad63f0SDan Williams * @cxlr: parent CXL region for this pmem region bridge device
281304ad63f0SDan Williams *
281404ad63f0SDan Williams * Return: 0 on success negative error code on failure.
281504ad63f0SDan Williams */
devm_cxl_add_pmem_region(struct cxl_region * cxlr)281604ad63f0SDan Williams static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
281704ad63f0SDan Williams {
281804ad63f0SDan Williams struct cxl_pmem_region *cxlr_pmem;
2819f17b558dSDan Williams struct cxl_nvdimm_bridge *cxl_nvb;
282004ad63f0SDan Williams struct device *dev;
282104ad63f0SDan Williams int rc;
282204ad63f0SDan Williams
282304ad63f0SDan Williams cxlr_pmem = cxl_pmem_region_alloc(cxlr);
282404ad63f0SDan Williams if (IS_ERR(cxlr_pmem))
282504ad63f0SDan Williams return PTR_ERR(cxlr_pmem);
2826f17b558dSDan Williams cxl_nvb = cxlr->cxl_nvb;
282704ad63f0SDan Williams
282804ad63f0SDan Williams dev = &cxlr_pmem->dev;
282904ad63f0SDan Williams rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
283004ad63f0SDan Williams if (rc)
283104ad63f0SDan Williams goto err;
283204ad63f0SDan Williams
283304ad63f0SDan Williams rc = device_add(dev);
283404ad63f0SDan Williams if (rc)
283504ad63f0SDan Williams goto err;
283604ad63f0SDan Williams
283704ad63f0SDan Williams dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
283804ad63f0SDan Williams dev_name(dev));
283904ad63f0SDan Williams
2840f17b558dSDan Williams device_lock(&cxl_nvb->dev);
2841f17b558dSDan Williams if (cxl_nvb->dev.driver)
2842f17b558dSDan Williams rc = devm_add_action_or_reset(&cxl_nvb->dev,
2843f17b558dSDan Williams cxlr_pmem_unregister, cxlr_pmem);
2844f17b558dSDan Williams else
2845f17b558dSDan Williams rc = -ENXIO;
2846f17b558dSDan Williams device_unlock(&cxl_nvb->dev);
2847f17b558dSDan Williams
2848f17b558dSDan Williams if (rc)
2849f17b558dSDan Williams goto err_bridge;
2850f17b558dSDan Williams
2851f17b558dSDan Williams /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2852f17b558dSDan Williams return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
285304ad63f0SDan Williams
285404ad63f0SDan Williams err:
285504ad63f0SDan Williams put_device(dev);
2856f17b558dSDan Williams err_bridge:
2857f17b558dSDan Williams put_device(&cxl_nvb->dev);
2858f17b558dSDan Williams cxlr->cxl_nvb = NULL;
285904ad63f0SDan Williams return rc;
286004ad63f0SDan Williams }
286104ad63f0SDan Williams
cxlr_dax_unregister(void * _cxlr_dax)286209d09e04SDan Williams static void cxlr_dax_unregister(void *_cxlr_dax)
286309d09e04SDan Williams {
286409d09e04SDan Williams struct cxl_dax_region *cxlr_dax = _cxlr_dax;
286509d09e04SDan Williams
286609d09e04SDan Williams device_unregister(&cxlr_dax->dev);
286709d09e04SDan Williams }
286809d09e04SDan Williams
devm_cxl_add_dax_region(struct cxl_region * cxlr)286909d09e04SDan Williams static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
287009d09e04SDan Williams {
287109d09e04SDan Williams struct cxl_dax_region *cxlr_dax;
287209d09e04SDan Williams struct device *dev;
287309d09e04SDan Williams int rc;
287409d09e04SDan Williams
287509d09e04SDan Williams cxlr_dax = cxl_dax_region_alloc(cxlr);
287609d09e04SDan Williams if (IS_ERR(cxlr_dax))
287709d09e04SDan Williams return PTR_ERR(cxlr_dax);
287809d09e04SDan Williams
287909d09e04SDan Williams dev = &cxlr_dax->dev;
288009d09e04SDan Williams rc = dev_set_name(dev, "dax_region%d", cxlr->id);
288109d09e04SDan Williams if (rc)
288209d09e04SDan Williams goto err;
288309d09e04SDan Williams
288409d09e04SDan Williams rc = device_add(dev);
288509d09e04SDan Williams if (rc)
288609d09e04SDan Williams goto err;
288709d09e04SDan Williams
288809d09e04SDan Williams dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
288909d09e04SDan Williams dev_name(dev));
289009d09e04SDan Williams
289109d09e04SDan Williams return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
289209d09e04SDan Williams cxlr_dax);
289309d09e04SDan Williams err:
289409d09e04SDan Williams put_device(dev);
289509d09e04SDan Williams return rc;
289609d09e04SDan Williams }
289709d09e04SDan Williams
match_root_decoder_by_range(struct device * dev,void * data)2898c4255b9bSAlison Schofield static int match_root_decoder_by_range(struct device *dev, void *data)
2899a32320b7SDan Williams {
2900a32320b7SDan Williams struct range *r1, *r2 = data;
2901a32320b7SDan Williams struct cxl_root_decoder *cxlrd;
2902a32320b7SDan Williams
2903a32320b7SDan Williams if (!is_root_decoder(dev))
2904a32320b7SDan Williams return 0;
2905a32320b7SDan Williams
2906a32320b7SDan Williams cxlrd = to_cxl_root_decoder(dev);
2907a32320b7SDan Williams r1 = &cxlrd->cxlsd.cxld.hpa_range;
2908a32320b7SDan Williams return range_contains(r1, r2);
2909a32320b7SDan Williams }
2910a32320b7SDan Williams
match_region_by_range(struct device * dev,void * data)2911a32320b7SDan Williams static int match_region_by_range(struct device *dev, void *data)
2912a32320b7SDan Williams {
2913a32320b7SDan Williams struct cxl_region_params *p;
2914a32320b7SDan Williams struct cxl_region *cxlr;
2915a32320b7SDan Williams struct range *r = data;
2916a32320b7SDan Williams int rc = 0;
2917a32320b7SDan Williams
2918a32320b7SDan Williams if (!is_cxl_region(dev))
2919a32320b7SDan Williams return 0;
2920a32320b7SDan Williams
2921a32320b7SDan Williams cxlr = to_cxl_region(dev);
2922a32320b7SDan Williams p = &cxlr->params;
2923a32320b7SDan Williams
2924a32320b7SDan Williams down_read(&cxl_region_rwsem);
2925a32320b7SDan Williams if (p->res && p->res->start == r->start && p->res->end == r->end)
2926a32320b7SDan Williams rc = 1;
2927a32320b7SDan Williams up_read(&cxl_region_rwsem);
2928a32320b7SDan Williams
2929a32320b7SDan Williams return rc;
2930a32320b7SDan Williams }
2931a32320b7SDan Williams
2932a32320b7SDan Williams /* Establish an empty region covering the given HPA range */
construct_region(struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled)2933a32320b7SDan Williams static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
2934a32320b7SDan Williams struct cxl_endpoint_decoder *cxled)
2935a32320b7SDan Williams {
2936a32320b7SDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2937a32320b7SDan Williams struct cxl_port *port = cxlrd_to_port(cxlrd);
2938a32320b7SDan Williams struct range *hpa = &cxled->cxld.hpa_range;
2939a32320b7SDan Williams struct cxl_region_params *p;
2940a32320b7SDan Williams struct cxl_region *cxlr;
2941a32320b7SDan Williams struct resource *res;
2942a32320b7SDan Williams int rc;
2943a32320b7SDan Williams
2944a32320b7SDan Williams do {
2945a32320b7SDan Williams cxlr = __create_region(cxlrd, cxled->mode,
2946a32320b7SDan Williams atomic_read(&cxlrd->region_id));
2947a32320b7SDan Williams } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
2948a32320b7SDan Williams
2949a32320b7SDan Williams if (IS_ERR(cxlr)) {
2950a32320b7SDan Williams dev_err(cxlmd->dev.parent,
2951a32320b7SDan Williams "%s:%s: %s failed assign region: %ld\n",
2952a32320b7SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2953a32320b7SDan Williams __func__, PTR_ERR(cxlr));
2954a32320b7SDan Williams return cxlr;
2955a32320b7SDan Williams }
2956a32320b7SDan Williams
2957a32320b7SDan Williams down_write(&cxl_region_rwsem);
2958a32320b7SDan Williams p = &cxlr->params;
2959a32320b7SDan Williams if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
2960a32320b7SDan Williams dev_err(cxlmd->dev.parent,
2961a32320b7SDan Williams "%s:%s: %s autodiscovery interrupted\n",
2962a32320b7SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2963a32320b7SDan Williams __func__);
2964a32320b7SDan Williams rc = -EBUSY;
2965a32320b7SDan Williams goto err;
2966a32320b7SDan Williams }
2967a32320b7SDan Williams
2968a32320b7SDan Williams set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
2969a32320b7SDan Williams
2970a32320b7SDan Williams res = kmalloc(sizeof(*res), GFP_KERNEL);
2971a32320b7SDan Williams if (!res) {
2972a32320b7SDan Williams rc = -ENOMEM;
2973a32320b7SDan Williams goto err;
2974a32320b7SDan Williams }
2975a32320b7SDan Williams
2976a32320b7SDan Williams *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
2977a32320b7SDan Williams dev_name(&cxlr->dev));
2978a32320b7SDan Williams rc = insert_resource(cxlrd->res, res);
2979a32320b7SDan Williams if (rc) {
2980a32320b7SDan Williams /*
2981a32320b7SDan Williams * Platform-firmware may not have split resources like "System
2982a32320b7SDan Williams * RAM" on CXL window boundaries see cxl_region_iomem_release()
2983a32320b7SDan Williams */
2984a32320b7SDan Williams dev_warn(cxlmd->dev.parent,
2985a32320b7SDan Williams "%s:%s: %s %s cannot insert resource\n",
2986a32320b7SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2987a32320b7SDan Williams __func__, dev_name(&cxlr->dev));
2988a32320b7SDan Williams }
2989a32320b7SDan Williams
2990a32320b7SDan Williams p->res = res;
2991a32320b7SDan Williams p->interleave_ways = cxled->cxld.interleave_ways;
2992a32320b7SDan Williams p->interleave_granularity = cxled->cxld.interleave_granularity;
2993a32320b7SDan Williams p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2994a32320b7SDan Williams
2995a32320b7SDan Williams rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
2996a32320b7SDan Williams if (rc)
2997a32320b7SDan Williams goto err;
2998a32320b7SDan Williams
2999a32320b7SDan Williams dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
3000a32320b7SDan Williams dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
3001a32320b7SDan Williams dev_name(&cxlr->dev), p->res, p->interleave_ways,
3002a32320b7SDan Williams p->interleave_granularity);
3003a32320b7SDan Williams
3004a32320b7SDan Williams /* ...to match put_device() in cxl_add_to_region() */
3005a32320b7SDan Williams get_device(&cxlr->dev);
3006a32320b7SDan Williams up_write(&cxl_region_rwsem);
3007a32320b7SDan Williams
3008a32320b7SDan Williams return cxlr;
3009a32320b7SDan Williams
3010a32320b7SDan Williams err:
3011a32320b7SDan Williams up_write(&cxl_region_rwsem);
30127481653dSDan Williams devm_release_action(port->uport_dev, unregister_region, cxlr);
3013a32320b7SDan Williams return ERR_PTR(rc);
3014a32320b7SDan Williams }
3015a32320b7SDan Williams
cxl_add_to_region(struct cxl_port * root,struct cxl_endpoint_decoder * cxled)3016a32320b7SDan Williams int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
3017a32320b7SDan Williams {
3018a32320b7SDan Williams struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
3019a32320b7SDan Williams struct range *hpa = &cxled->cxld.hpa_range;
3020a32320b7SDan Williams struct cxl_decoder *cxld = &cxled->cxld;
3021a32320b7SDan Williams struct device *cxlrd_dev, *region_dev;
3022a32320b7SDan Williams struct cxl_root_decoder *cxlrd;
3023a32320b7SDan Williams struct cxl_region_params *p;
3024a32320b7SDan Williams struct cxl_region *cxlr;
3025a32320b7SDan Williams bool attach = false;
3026a32320b7SDan Williams int rc;
3027a32320b7SDan Williams
3028a32320b7SDan Williams cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
3029c4255b9bSAlison Schofield match_root_decoder_by_range);
3030a32320b7SDan Williams if (!cxlrd_dev) {
3031a32320b7SDan Williams dev_err(cxlmd->dev.parent,
3032a32320b7SDan Williams "%s:%s no CXL window for range %#llx:%#llx\n",
3033a32320b7SDan Williams dev_name(&cxlmd->dev), dev_name(&cxld->dev),
3034a32320b7SDan Williams cxld->hpa_range.start, cxld->hpa_range.end);
3035a32320b7SDan Williams return -ENXIO;
3036a32320b7SDan Williams }
3037a32320b7SDan Williams
3038a32320b7SDan Williams cxlrd = to_cxl_root_decoder(cxlrd_dev);
3039a32320b7SDan Williams
3040a32320b7SDan Williams /*
3041a32320b7SDan Williams * Ensure that if multiple threads race to construct_region() for @hpa
3042a32320b7SDan Williams * one does the construction and the others add to that.
3043a32320b7SDan Williams */
3044a32320b7SDan Williams mutex_lock(&cxlrd->range_lock);
3045a32320b7SDan Williams region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
3046a32320b7SDan Williams match_region_by_range);
3047a32320b7SDan Williams if (!region_dev) {
3048a32320b7SDan Williams cxlr = construct_region(cxlrd, cxled);
3049a32320b7SDan Williams region_dev = &cxlr->dev;
3050a32320b7SDan Williams } else
3051a32320b7SDan Williams cxlr = to_cxl_region(region_dev);
3052a32320b7SDan Williams mutex_unlock(&cxlrd->range_lock);
3053a32320b7SDan Williams
30547abcb0b1SArnd Bergmann rc = PTR_ERR_OR_ZERO(cxlr);
30557abcb0b1SArnd Bergmann if (rc)
3056a32320b7SDan Williams goto out;
3057a32320b7SDan Williams
3058a32320b7SDan Williams attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
3059a32320b7SDan Williams
3060a32320b7SDan Williams down_read(&cxl_region_rwsem);
3061a32320b7SDan Williams p = &cxlr->params;
3062a32320b7SDan Williams attach = p->state == CXL_CONFIG_COMMIT;
3063a32320b7SDan Williams up_read(&cxl_region_rwsem);
3064a32320b7SDan Williams
3065a32320b7SDan Williams if (attach) {
3066a32320b7SDan Williams /*
3067a32320b7SDan Williams * If device_attach() fails the range may still be active via
3068a32320b7SDan Williams * the platform-firmware memory map, otherwise the driver for
3069a32320b7SDan Williams * regions is local to this file, so driver matching can't fail.
3070a32320b7SDan Williams */
3071a32320b7SDan Williams if (device_attach(&cxlr->dev) < 0)
3072a32320b7SDan Williams dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
3073a32320b7SDan Williams p->res);
3074a32320b7SDan Williams }
3075a32320b7SDan Williams
3076a32320b7SDan Williams put_device(region_dev);
3077a32320b7SDan Williams out:
3078a32320b7SDan Williams put_device(cxlrd_dev);
3079a32320b7SDan Williams return rc;
3080a32320b7SDan Williams }
3081a32320b7SDan Williams EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
3082a32320b7SDan Williams
is_system_ram(struct resource * res,void * arg)3083a32320b7SDan Williams static int is_system_ram(struct resource *res, void *arg)
3084a32320b7SDan Williams {
3085a32320b7SDan Williams struct cxl_region *cxlr = arg;
3086a32320b7SDan Williams struct cxl_region_params *p = &cxlr->params;
3087a32320b7SDan Williams
3088a32320b7SDan Williams dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
3089a32320b7SDan Williams return 1;
3090a32320b7SDan Williams }
3091a32320b7SDan Williams
cxl_region_probe(struct device * dev)30928d48817dSDan Williams static int cxl_region_probe(struct device *dev)
30938d48817dSDan Williams {
30948d48817dSDan Williams struct cxl_region *cxlr = to_cxl_region(dev);
30958d48817dSDan Williams struct cxl_region_params *p = &cxlr->params;
30968d48817dSDan Williams int rc;
30978d48817dSDan Williams
30988d48817dSDan Williams rc = down_read_interruptible(&cxl_region_rwsem);
30998d48817dSDan Williams if (rc) {
31008d48817dSDan Williams dev_dbg(&cxlr->dev, "probe interrupted\n");
31018d48817dSDan Williams return rc;
31028d48817dSDan Williams }
31038d48817dSDan Williams
31048d48817dSDan Williams if (p->state < CXL_CONFIG_COMMIT) {
31058d48817dSDan Williams dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
31068d48817dSDan Williams rc = -ENXIO;
3107d18bc74aSDan Williams goto out;
31088d48817dSDan Williams }
31098d48817dSDan Williams
31102ab47045SDan Williams if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
31112ab47045SDan Williams dev_err(&cxlr->dev,
31122ab47045SDan Williams "failed to activate, re-commit region and retry\n");
31132ab47045SDan Williams rc = -ENXIO;
31142ab47045SDan Williams goto out;
31152ab47045SDan Williams }
3116d18bc74aSDan Williams
31178d48817dSDan Williams /*
31188d48817dSDan Williams * From this point on any path that changes the region's state away from
31198d48817dSDan Williams * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
31208d48817dSDan Williams */
3121d18bc74aSDan Williams out:
31228d48817dSDan Williams up_read(&cxl_region_rwsem);
31238d48817dSDan Williams
3124bf3e5da8SDan Williams if (rc)
3125bf3e5da8SDan Williams return rc;
3126bf3e5da8SDan Williams
312704ad63f0SDan Williams switch (cxlr->mode) {
312804ad63f0SDan Williams case CXL_DECODER_PMEM:
312904ad63f0SDan Williams return devm_cxl_add_pmem_region(cxlr);
3130a32320b7SDan Williams case CXL_DECODER_RAM:
3131a32320b7SDan Williams /*
3132a32320b7SDan Williams * The region can not be manged by CXL if any portion of
3133a32320b7SDan Williams * it is already online as 'System RAM'
3134a32320b7SDan Williams */
3135a32320b7SDan Williams if (walk_iomem_res_desc(IORES_DESC_NONE,
3136a32320b7SDan Williams IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
3137a32320b7SDan Williams p->res->start, p->res->end, cxlr,
3138a32320b7SDan Williams is_system_ram) > 0)
3139a32320b7SDan Williams return 0;
314009d09e04SDan Williams return devm_cxl_add_dax_region(cxlr);
314104ad63f0SDan Williams default:
314204ad63f0SDan Williams dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
314304ad63f0SDan Williams cxlr->mode);
314404ad63f0SDan Williams return -ENXIO;
314504ad63f0SDan Williams }
31468d48817dSDan Williams }
31478d48817dSDan Williams
31488d48817dSDan Williams static struct cxl_driver cxl_region_driver = {
31498d48817dSDan Williams .name = "cxl_region",
31508d48817dSDan Williams .probe = cxl_region_probe,
31518d48817dSDan Williams .id = CXL_DEVICE_REGION,
31528d48817dSDan Williams };
31538d48817dSDan Williams
cxl_region_init(void)31548d48817dSDan Williams int cxl_region_init(void)
31558d48817dSDan Williams {
31568d48817dSDan Williams return cxl_driver_register(&cxl_region_driver);
31578d48817dSDan Williams }
31588d48817dSDan Williams
cxl_region_exit(void)31598d48817dSDan Williams void cxl_region_exit(void)
31608d48817dSDan Williams {
31618d48817dSDan Williams cxl_driver_unregister(&cxl_region_driver);
31628d48817dSDan Williams }
31638d48817dSDan Williams
316423a22cd1SDan Williams MODULE_IMPORT_NS(CXL);
3165d18bc74aSDan Williams MODULE_IMPORT_NS(DEVMEM);
31668d48817dSDan Williams MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
3167