xref: /openbmc/linux/drivers/cxl/core/port.c (revision 7f7bed74)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/memregion.h>
5 #include <linux/workqueue.h>
6 #include <linux/debugfs.h>
7 #include <linux/device.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/slab.h>
11 #include <linux/idr.h>
12 #include <cxlmem.h>
13 #include <cxlpci.h>
14 #include <cxl.h>
15 #include "core.h"
16 
17 /**
18  * DOC: cxl core
19  *
20  * The CXL core provides a set of interfaces that can be consumed by CXL aware
21  * drivers. The interfaces allow for creation, modification, and destruction of
22  * regions, memory devices, ports, and decoders. CXL aware drivers must register
23  * with the CXL core via these interfaces in order to be able to participate in
24  * cross-device interleave coordination. The CXL core also establishes and
25  * maintains the bridge to the nvdimm subsystem.
26  *
27  * CXL core introduces sysfs hierarchy to control the devices that are
28  * instantiated by the core.
29  */
30 
31 /*
32  * All changes to the interleave configuration occur with this lock held
33  * for write.
34  */
35 DECLARE_RWSEM(cxl_region_rwsem);
36 
37 static DEFINE_IDA(cxl_port_ida);
38 static DEFINE_XARRAY(cxl_root_buses);
39 
40 int cxl_num_decoders_committed(struct cxl_port *port)
41 {
42 	lockdep_assert_held(&cxl_region_rwsem);
43 
44 	return port->commit_end + 1;
45 }
46 
47 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
48 			    char *buf)
49 {
50 	return sysfs_emit(buf, "%s\n", dev->type->name);
51 }
52 static DEVICE_ATTR_RO(devtype);
53 
54 static int cxl_device_id(const struct device *dev)
55 {
56 	if (dev->type == &cxl_nvdimm_bridge_type)
57 		return CXL_DEVICE_NVDIMM_BRIDGE;
58 	if (dev->type == &cxl_nvdimm_type)
59 		return CXL_DEVICE_NVDIMM;
60 	if (dev->type == CXL_PMEM_REGION_TYPE())
61 		return CXL_DEVICE_PMEM_REGION;
62 	if (dev->type == CXL_DAX_REGION_TYPE())
63 		return CXL_DEVICE_DAX_REGION;
64 	if (is_cxl_port(dev)) {
65 		if (is_cxl_root(to_cxl_port(dev)))
66 			return CXL_DEVICE_ROOT;
67 		return CXL_DEVICE_PORT;
68 	}
69 	if (is_cxl_memdev(dev))
70 		return CXL_DEVICE_MEMORY_EXPANDER;
71 	if (dev->type == CXL_REGION_TYPE())
72 		return CXL_DEVICE_REGION;
73 	if (dev->type == &cxl_pmu_type)
74 		return CXL_DEVICE_PMU;
75 	return 0;
76 }
77 
78 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
79 			     char *buf)
80 {
81 	return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
82 }
83 static DEVICE_ATTR_RO(modalias);
84 
85 static struct attribute *cxl_base_attributes[] = {
86 	&dev_attr_devtype.attr,
87 	&dev_attr_modalias.attr,
88 	NULL,
89 };
90 
91 struct attribute_group cxl_base_attribute_group = {
92 	.attrs = cxl_base_attributes,
93 };
94 
95 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
96 			  char *buf)
97 {
98 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
99 
100 	return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
101 }
102 static DEVICE_ATTR_ADMIN_RO(start);
103 
104 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
105 			char *buf)
106 {
107 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
108 
109 	return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
110 }
111 static DEVICE_ATTR_RO(size);
112 
113 #define CXL_DECODER_FLAG_ATTR(name, flag)                            \
114 static ssize_t name##_show(struct device *dev,                       \
115 			   struct device_attribute *attr, char *buf) \
116 {                                                                    \
117 	struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
118                                                                      \
119 	return sysfs_emit(buf, "%s\n",                               \
120 			  (cxld->flags & (flag)) ? "1" : "0");       \
121 }                                                                    \
122 static DEVICE_ATTR_RO(name)
123 
124 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
125 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
126 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
127 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
128 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
129 
130 static ssize_t target_type_show(struct device *dev,
131 				struct device_attribute *attr, char *buf)
132 {
133 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
134 
135 	switch (cxld->target_type) {
136 	case CXL_DECODER_DEVMEM:
137 		return sysfs_emit(buf, "accelerator\n");
138 	case CXL_DECODER_HOSTONLYMEM:
139 		return sysfs_emit(buf, "expander\n");
140 	}
141 	return -ENXIO;
142 }
143 static DEVICE_ATTR_RO(target_type);
144 
145 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
146 {
147 	struct cxl_decoder *cxld = &cxlsd->cxld;
148 	ssize_t offset = 0;
149 	int i, rc = 0;
150 
151 	for (i = 0; i < cxld->interleave_ways; i++) {
152 		struct cxl_dport *dport = cxlsd->target[i];
153 		struct cxl_dport *next = NULL;
154 
155 		if (!dport)
156 			break;
157 
158 		if (i + 1 < cxld->interleave_ways)
159 			next = cxlsd->target[i + 1];
160 		rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
161 				   next ? "," : "");
162 		if (rc < 0)
163 			return rc;
164 		offset += rc;
165 	}
166 
167 	return offset;
168 }
169 
170 static ssize_t target_list_show(struct device *dev,
171 				struct device_attribute *attr, char *buf)
172 {
173 	struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
174 	ssize_t offset;
175 	unsigned int seq;
176 	int rc;
177 
178 	do {
179 		seq = read_seqbegin(&cxlsd->target_lock);
180 		rc = emit_target_list(cxlsd, buf);
181 	} while (read_seqretry(&cxlsd->target_lock, seq));
182 
183 	if (rc < 0)
184 		return rc;
185 	offset = rc;
186 
187 	rc = sysfs_emit_at(buf, offset, "\n");
188 	if (rc < 0)
189 		return rc;
190 
191 	return offset + rc;
192 }
193 static DEVICE_ATTR_RO(target_list);
194 
195 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
196 			 char *buf)
197 {
198 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
199 
200 	return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
201 }
202 
203 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
204 			  const char *buf, size_t len)
205 {
206 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
207 	enum cxl_decoder_mode mode;
208 	ssize_t rc;
209 
210 	if (sysfs_streq(buf, "pmem"))
211 		mode = CXL_DECODER_PMEM;
212 	else if (sysfs_streq(buf, "ram"))
213 		mode = CXL_DECODER_RAM;
214 	else
215 		return -EINVAL;
216 
217 	rc = cxl_dpa_set_mode(cxled, mode);
218 	if (rc)
219 		return rc;
220 
221 	return len;
222 }
223 static DEVICE_ATTR_RW(mode);
224 
225 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
226 			    char *buf)
227 {
228 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
229 
230 	guard(rwsem_read)(&cxl_dpa_rwsem);
231 	return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
232 }
233 static DEVICE_ATTR_RO(dpa_resource);
234 
235 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
236 			     char *buf)
237 {
238 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
239 	resource_size_t size = cxl_dpa_size(cxled);
240 
241 	return sysfs_emit(buf, "%pa\n", &size);
242 }
243 
244 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
245 			      const char *buf, size_t len)
246 {
247 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
248 	unsigned long long size;
249 	ssize_t rc;
250 
251 	rc = kstrtoull(buf, 0, &size);
252 	if (rc)
253 		return rc;
254 
255 	if (!IS_ALIGNED(size, SZ_256M))
256 		return -EINVAL;
257 
258 	rc = cxl_dpa_free(cxled);
259 	if (rc)
260 		return rc;
261 
262 	if (size == 0)
263 		return len;
264 
265 	rc = cxl_dpa_alloc(cxled, size);
266 	if (rc)
267 		return rc;
268 
269 	return len;
270 }
271 static DEVICE_ATTR_RW(dpa_size);
272 
273 static ssize_t interleave_granularity_show(struct device *dev,
274 					   struct device_attribute *attr,
275 					   char *buf)
276 {
277 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
278 
279 	return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
280 }
281 
282 static DEVICE_ATTR_RO(interleave_granularity);
283 
284 static ssize_t interleave_ways_show(struct device *dev,
285 				    struct device_attribute *attr, char *buf)
286 {
287 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
288 
289 	return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
290 }
291 
292 static DEVICE_ATTR_RO(interleave_ways);
293 
294 static struct attribute *cxl_decoder_base_attrs[] = {
295 	&dev_attr_start.attr,
296 	&dev_attr_size.attr,
297 	&dev_attr_locked.attr,
298 	&dev_attr_interleave_granularity.attr,
299 	&dev_attr_interleave_ways.attr,
300 	NULL,
301 };
302 
303 static struct attribute_group cxl_decoder_base_attribute_group = {
304 	.attrs = cxl_decoder_base_attrs,
305 };
306 
307 static struct attribute *cxl_decoder_root_attrs[] = {
308 	&dev_attr_cap_pmem.attr,
309 	&dev_attr_cap_ram.attr,
310 	&dev_attr_cap_type2.attr,
311 	&dev_attr_cap_type3.attr,
312 	&dev_attr_target_list.attr,
313 	SET_CXL_REGION_ATTR(create_pmem_region)
314 	SET_CXL_REGION_ATTR(create_ram_region)
315 	SET_CXL_REGION_ATTR(delete_region)
316 	NULL,
317 };
318 
319 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
320 {
321 	unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
322 
323 	return (cxlrd->cxlsd.cxld.flags & flags) == flags;
324 }
325 
326 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
327 {
328 	unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
329 
330 	return (cxlrd->cxlsd.cxld.flags & flags) == flags;
331 }
332 
333 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
334 {
335 	struct device *dev = kobj_to_dev(kobj);
336 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
337 
338 	if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
339 		return 0;
340 
341 	if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
342 		return 0;
343 
344 	if (a == CXL_REGION_ATTR(delete_region) &&
345 	    !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
346 		return 0;
347 
348 	return a->mode;
349 }
350 
351 static struct attribute_group cxl_decoder_root_attribute_group = {
352 	.attrs = cxl_decoder_root_attrs,
353 	.is_visible = cxl_root_decoder_visible,
354 };
355 
356 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
357 	&cxl_decoder_root_attribute_group,
358 	&cxl_decoder_base_attribute_group,
359 	&cxl_base_attribute_group,
360 	NULL,
361 };
362 
363 static struct attribute *cxl_decoder_switch_attrs[] = {
364 	&dev_attr_target_type.attr,
365 	&dev_attr_target_list.attr,
366 	SET_CXL_REGION_ATTR(region)
367 	NULL,
368 };
369 
370 static struct attribute_group cxl_decoder_switch_attribute_group = {
371 	.attrs = cxl_decoder_switch_attrs,
372 };
373 
374 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
375 	&cxl_decoder_switch_attribute_group,
376 	&cxl_decoder_base_attribute_group,
377 	&cxl_base_attribute_group,
378 	NULL,
379 };
380 
381 static struct attribute *cxl_decoder_endpoint_attrs[] = {
382 	&dev_attr_target_type.attr,
383 	&dev_attr_mode.attr,
384 	&dev_attr_dpa_size.attr,
385 	&dev_attr_dpa_resource.attr,
386 	SET_CXL_REGION_ATTR(region)
387 	NULL,
388 };
389 
390 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
391 	.attrs = cxl_decoder_endpoint_attrs,
392 };
393 
394 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
395 	&cxl_decoder_base_attribute_group,
396 	&cxl_decoder_endpoint_attribute_group,
397 	&cxl_base_attribute_group,
398 	NULL,
399 };
400 
401 static void __cxl_decoder_release(struct cxl_decoder *cxld)
402 {
403 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
404 
405 	ida_free(&port->decoder_ida, cxld->id);
406 	put_device(&port->dev);
407 }
408 
409 static void cxl_endpoint_decoder_release(struct device *dev)
410 {
411 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
412 
413 	__cxl_decoder_release(&cxled->cxld);
414 	kfree(cxled);
415 }
416 
417 static void cxl_switch_decoder_release(struct device *dev)
418 {
419 	struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
420 
421 	__cxl_decoder_release(&cxlsd->cxld);
422 	kfree(cxlsd);
423 }
424 
425 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
426 {
427 	if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
428 			  "not a cxl_root_decoder device\n"))
429 		return NULL;
430 	return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
431 }
432 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
433 
434 static void cxl_root_decoder_release(struct device *dev)
435 {
436 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
437 
438 	if (atomic_read(&cxlrd->region_id) >= 0)
439 		memregion_free(atomic_read(&cxlrd->region_id));
440 	__cxl_decoder_release(&cxlrd->cxlsd.cxld);
441 	kfree(cxlrd);
442 }
443 
444 static const struct device_type cxl_decoder_endpoint_type = {
445 	.name = "cxl_decoder_endpoint",
446 	.release = cxl_endpoint_decoder_release,
447 	.groups = cxl_decoder_endpoint_attribute_groups,
448 };
449 
450 static const struct device_type cxl_decoder_switch_type = {
451 	.name = "cxl_decoder_switch",
452 	.release = cxl_switch_decoder_release,
453 	.groups = cxl_decoder_switch_attribute_groups,
454 };
455 
456 static const struct device_type cxl_decoder_root_type = {
457 	.name = "cxl_decoder_root",
458 	.release = cxl_root_decoder_release,
459 	.groups = cxl_decoder_root_attribute_groups,
460 };
461 
462 bool is_endpoint_decoder(struct device *dev)
463 {
464 	return dev->type == &cxl_decoder_endpoint_type;
465 }
466 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
467 
468 bool is_root_decoder(struct device *dev)
469 {
470 	return dev->type == &cxl_decoder_root_type;
471 }
472 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
473 
474 bool is_switch_decoder(struct device *dev)
475 {
476 	return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
477 }
478 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
479 
480 struct cxl_decoder *to_cxl_decoder(struct device *dev)
481 {
482 	if (dev_WARN_ONCE(dev,
483 			  !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
484 			  "not a cxl_decoder device\n"))
485 		return NULL;
486 	return container_of(dev, struct cxl_decoder, dev);
487 }
488 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
489 
490 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
491 {
492 	if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
493 			  "not a cxl_endpoint_decoder device\n"))
494 		return NULL;
495 	return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
496 }
497 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
498 
499 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
500 {
501 	if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
502 			  "not a cxl_switch_decoder device\n"))
503 		return NULL;
504 	return container_of(dev, struct cxl_switch_decoder, cxld.dev);
505 }
506 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
507 
508 static void cxl_ep_release(struct cxl_ep *ep)
509 {
510 	put_device(ep->ep);
511 	kfree(ep);
512 }
513 
514 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
515 {
516 	if (!ep)
517 		return;
518 	xa_erase(&port->endpoints, (unsigned long) ep->ep);
519 	cxl_ep_release(ep);
520 }
521 
522 static void cxl_port_release(struct device *dev)
523 {
524 	struct cxl_port *port = to_cxl_port(dev);
525 	unsigned long index;
526 	struct cxl_ep *ep;
527 
528 	xa_for_each(&port->endpoints, index, ep)
529 		cxl_ep_remove(port, ep);
530 	xa_destroy(&port->endpoints);
531 	xa_destroy(&port->dports);
532 	xa_destroy(&port->regions);
533 	ida_free(&cxl_port_ida, port->id);
534 	kfree(port);
535 }
536 
537 static const struct attribute_group *cxl_port_attribute_groups[] = {
538 	&cxl_base_attribute_group,
539 	NULL,
540 };
541 
542 static const struct device_type cxl_port_type = {
543 	.name = "cxl_port",
544 	.release = cxl_port_release,
545 	.groups = cxl_port_attribute_groups,
546 };
547 
548 bool is_cxl_port(const struct device *dev)
549 {
550 	return dev->type == &cxl_port_type;
551 }
552 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
553 
554 struct cxl_port *to_cxl_port(const struct device *dev)
555 {
556 	if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
557 			  "not a cxl_port device\n"))
558 		return NULL;
559 	return container_of(dev, struct cxl_port, dev);
560 }
561 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
562 
563 static void unregister_port(void *_port)
564 {
565 	struct cxl_port *port = _port;
566 	struct cxl_port *parent;
567 	struct device *lock_dev;
568 
569 	if (is_cxl_root(port))
570 		parent = NULL;
571 	else
572 		parent = to_cxl_port(port->dev.parent);
573 
574 	/*
575 	 * CXL root port's and the first level of ports are unregistered
576 	 * under the platform firmware device lock, all other ports are
577 	 * unregistered while holding their parent port lock.
578 	 */
579 	if (!parent)
580 		lock_dev = port->uport_dev;
581 	else if (is_cxl_root(parent))
582 		lock_dev = parent->uport_dev;
583 	else
584 		lock_dev = &parent->dev;
585 
586 	device_lock_assert(lock_dev);
587 	port->dead = true;
588 	device_unregister(&port->dev);
589 }
590 
591 static void cxl_unlink_uport(void *_port)
592 {
593 	struct cxl_port *port = _port;
594 
595 	sysfs_remove_link(&port->dev.kobj, "uport");
596 }
597 
598 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
599 {
600 	int rc;
601 
602 	rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
603 			       "uport");
604 	if (rc)
605 		return rc;
606 	return devm_add_action_or_reset(host, cxl_unlink_uport, port);
607 }
608 
609 static void cxl_unlink_parent_dport(void *_port)
610 {
611 	struct cxl_port *port = _port;
612 
613 	sysfs_remove_link(&port->dev.kobj, "parent_dport");
614 }
615 
616 static int devm_cxl_link_parent_dport(struct device *host,
617 				      struct cxl_port *port,
618 				      struct cxl_dport *parent_dport)
619 {
620 	int rc;
621 
622 	if (!parent_dport)
623 		return 0;
624 
625 	rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
626 			       "parent_dport");
627 	if (rc)
628 		return rc;
629 	return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
630 }
631 
632 static struct lock_class_key cxl_port_key;
633 
634 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
635 				       resource_size_t component_reg_phys,
636 				       struct cxl_dport *parent_dport)
637 {
638 	struct cxl_port *port;
639 	struct device *dev;
640 	int rc;
641 
642 	port = kzalloc(sizeof(*port), GFP_KERNEL);
643 	if (!port)
644 		return ERR_PTR(-ENOMEM);
645 
646 	rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
647 	if (rc < 0)
648 		goto err;
649 	port->id = rc;
650 	port->uport_dev = uport_dev;
651 
652 	/*
653 	 * The top-level cxl_port "cxl_root" does not have a cxl_port as
654 	 * its parent and it does not have any corresponding component
655 	 * registers as its decode is described by a fixed platform
656 	 * description.
657 	 */
658 	dev = &port->dev;
659 	if (parent_dport) {
660 		struct cxl_port *parent_port = parent_dport->port;
661 		struct cxl_port *iter;
662 
663 		dev->parent = &parent_port->dev;
664 		port->depth = parent_port->depth + 1;
665 		port->parent_dport = parent_dport;
666 
667 		/*
668 		 * walk to the host bridge, or the first ancestor that knows
669 		 * the host bridge
670 		 */
671 		iter = port;
672 		while (!iter->host_bridge &&
673 		       !is_cxl_root(to_cxl_port(iter->dev.parent)))
674 			iter = to_cxl_port(iter->dev.parent);
675 		if (iter->host_bridge)
676 			port->host_bridge = iter->host_bridge;
677 		else if (parent_dport->rch)
678 			port->host_bridge = parent_dport->dport_dev;
679 		else
680 			port->host_bridge = iter->uport_dev;
681 		dev_dbg(uport_dev, "host-bridge: %s\n",
682 			dev_name(port->host_bridge));
683 	} else
684 		dev->parent = uport_dev;
685 
686 	port->component_reg_phys = component_reg_phys;
687 	ida_init(&port->decoder_ida);
688 	port->hdm_end = -1;
689 	port->commit_end = -1;
690 	xa_init(&port->dports);
691 	xa_init(&port->endpoints);
692 	xa_init(&port->regions);
693 
694 	device_initialize(dev);
695 	lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
696 	device_set_pm_not_required(dev);
697 	dev->bus = &cxl_bus_type;
698 	dev->type = &cxl_port_type;
699 
700 	return port;
701 
702 err:
703 	kfree(port);
704 	return ERR_PTR(rc);
705 }
706 
707 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
708 			       resource_size_t component_reg_phys)
709 {
710 	if (component_reg_phys == CXL_RESOURCE_NONE)
711 		return 0;
712 
713 	*map = (struct cxl_register_map) {
714 		.host = host,
715 		.reg_type = CXL_REGLOC_RBI_COMPONENT,
716 		.resource = component_reg_phys,
717 		.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
718 	};
719 
720 	return cxl_setup_regs(map);
721 }
722 
723 static int cxl_port_setup_regs(struct cxl_port *port,
724 			resource_size_t component_reg_phys)
725 {
726 	if (dev_is_platform(port->uport_dev))
727 		return 0;
728 	return cxl_setup_comp_regs(&port->dev, &port->comp_map,
729 				   component_reg_phys);
730 }
731 
732 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
733 				resource_size_t component_reg_phys)
734 {
735 	int rc;
736 
737 	if (dev_is_platform(dport->dport_dev))
738 		return 0;
739 
740 	/*
741 	 * use @dport->dport_dev for the context for error messages during
742 	 * register probing, and fixup @host after the fact, since @host may be
743 	 * NULL.
744 	 */
745 	rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
746 				 component_reg_phys);
747 	dport->comp_map.host = host;
748 	return rc;
749 }
750 
751 static struct cxl_port *__devm_cxl_add_port(struct device *host,
752 					    struct device *uport_dev,
753 					    resource_size_t component_reg_phys,
754 					    struct cxl_dport *parent_dport)
755 {
756 	struct cxl_port *port;
757 	struct device *dev;
758 	int rc;
759 
760 	port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
761 	if (IS_ERR(port))
762 		return port;
763 
764 	dev = &port->dev;
765 	if (is_cxl_memdev(uport_dev))
766 		rc = dev_set_name(dev, "endpoint%d", port->id);
767 	else if (parent_dport)
768 		rc = dev_set_name(dev, "port%d", port->id);
769 	else
770 		rc = dev_set_name(dev, "root%d", port->id);
771 	if (rc)
772 		goto err;
773 
774 	rc = cxl_port_setup_regs(port, component_reg_phys);
775 	if (rc)
776 		goto err;
777 
778 	rc = device_add(dev);
779 	if (rc)
780 		goto err;
781 
782 	rc = devm_add_action_or_reset(host, unregister_port, port);
783 	if (rc)
784 		return ERR_PTR(rc);
785 
786 	rc = devm_cxl_link_uport(host, port);
787 	if (rc)
788 		return ERR_PTR(rc);
789 
790 	rc = devm_cxl_link_parent_dport(host, port, parent_dport);
791 	if (rc)
792 		return ERR_PTR(rc);
793 
794 	return port;
795 
796 err:
797 	put_device(dev);
798 	return ERR_PTR(rc);
799 }
800 
801 /**
802  * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
803  * @host: host device for devm operations
804  * @uport_dev: "physical" device implementing this upstream port
805  * @component_reg_phys: (optional) for configurable cxl_port instances
806  * @parent_dport: next hop up in the CXL memory decode hierarchy
807  */
808 struct cxl_port *devm_cxl_add_port(struct device *host,
809 				   struct device *uport_dev,
810 				   resource_size_t component_reg_phys,
811 				   struct cxl_dport *parent_dport)
812 {
813 	struct cxl_port *port, *parent_port;
814 
815 	port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
816 				   parent_dport);
817 
818 	parent_port = parent_dport ? parent_dport->port : NULL;
819 	if (IS_ERR(port)) {
820 		dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
821 			parent_port ? " port to " : "",
822 			parent_port ? dev_name(&parent_port->dev) : "",
823 			parent_port ? "" : " root port",
824 			PTR_ERR(port));
825 	} else {
826 		dev_dbg(uport_dev, "%s added%s%s%s\n",
827 			dev_name(&port->dev),
828 			parent_port ? " to " : "",
829 			parent_port ? dev_name(&parent_port->dev) : "",
830 			parent_port ? "" : " (root port)");
831 	}
832 
833 	return port;
834 }
835 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
836 
837 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
838 {
839 	/* There is no pci_bus associated with a CXL platform-root port */
840 	if (is_cxl_root(port))
841 		return NULL;
842 
843 	if (dev_is_pci(port->uport_dev)) {
844 		struct pci_dev *pdev = to_pci_dev(port->uport_dev);
845 
846 		return pdev->subordinate;
847 	}
848 
849 	return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
850 }
851 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
852 
853 static void unregister_pci_bus(void *uport_dev)
854 {
855 	xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
856 }
857 
858 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
859 			      struct pci_bus *bus)
860 {
861 	int rc;
862 
863 	if (dev_is_pci(uport_dev))
864 		return -EINVAL;
865 
866 	rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
867 		       GFP_KERNEL);
868 	if (rc)
869 		return rc;
870 	return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
871 }
872 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
873 
874 static bool dev_is_cxl_root_child(struct device *dev)
875 {
876 	struct cxl_port *port, *parent;
877 
878 	if (!is_cxl_port(dev))
879 		return false;
880 
881 	port = to_cxl_port(dev);
882 	if (is_cxl_root(port))
883 		return false;
884 
885 	parent = to_cxl_port(port->dev.parent);
886 	if (is_cxl_root(parent))
887 		return true;
888 
889 	return false;
890 }
891 
892 struct cxl_port *find_cxl_root(struct cxl_port *port)
893 {
894 	struct cxl_port *iter = port;
895 
896 	while (iter && !is_cxl_root(iter))
897 		iter = to_cxl_port(iter->dev.parent);
898 
899 	if (!iter)
900 		return NULL;
901 	get_device(&iter->dev);
902 	return iter;
903 }
904 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
905 
906 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
907 {
908 	struct cxl_dport *dport;
909 	unsigned long index;
910 
911 	device_lock_assert(&port->dev);
912 	xa_for_each(&port->dports, index, dport)
913 		if (dport->port_id == id)
914 			return dport;
915 	return NULL;
916 }
917 
918 static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
919 {
920 	struct cxl_dport *dup;
921 	int rc;
922 
923 	device_lock_assert(&port->dev);
924 	dup = find_dport(port, dport->port_id);
925 	if (dup) {
926 		dev_err(&port->dev,
927 			"unable to add dport%d-%s non-unique port id (%s)\n",
928 			dport->port_id, dev_name(dport->dport_dev),
929 			dev_name(dup->dport_dev));
930 		return -EBUSY;
931 	}
932 
933 	rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
934 		       GFP_KERNEL);
935 	if (rc)
936 		return rc;
937 
938 	port->nr_dports++;
939 	return 0;
940 }
941 
942 /*
943  * Since root-level CXL dports cannot be enumerated by PCI they are not
944  * enumerated by the common port driver that acquires the port lock over
945  * dport add/remove. Instead, root dports are manually added by a
946  * platform driver and cond_cxl_root_lock() is used to take the missing
947  * port lock in that case.
948  */
949 static void cond_cxl_root_lock(struct cxl_port *port)
950 {
951 	if (is_cxl_root(port))
952 		device_lock(&port->dev);
953 }
954 
955 static void cond_cxl_root_unlock(struct cxl_port *port)
956 {
957 	if (is_cxl_root(port))
958 		device_unlock(&port->dev);
959 }
960 
961 static void cxl_dport_remove(void *data)
962 {
963 	struct cxl_dport *dport = data;
964 	struct cxl_port *port = dport->port;
965 
966 	xa_erase(&port->dports, (unsigned long) dport->dport_dev);
967 	put_device(dport->dport_dev);
968 }
969 
970 static void cxl_dport_unlink(void *data)
971 {
972 	struct cxl_dport *dport = data;
973 	struct cxl_port *port = dport->port;
974 	char link_name[CXL_TARGET_STRLEN];
975 
976 	sprintf(link_name, "dport%d", dport->port_id);
977 	sysfs_remove_link(&port->dev.kobj, link_name);
978 }
979 
980 static struct cxl_dport *
981 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
982 		     int port_id, resource_size_t component_reg_phys,
983 		     resource_size_t rcrb)
984 {
985 	char link_name[CXL_TARGET_STRLEN];
986 	struct cxl_dport *dport;
987 	struct device *host;
988 	int rc;
989 
990 	if (is_cxl_root(port))
991 		host = port->uport_dev;
992 	else
993 		host = &port->dev;
994 
995 	if (!host->driver) {
996 		dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
997 			      dev_name(dport_dev));
998 		return ERR_PTR(-ENXIO);
999 	}
1000 
1001 	if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
1002 	    CXL_TARGET_STRLEN)
1003 		return ERR_PTR(-EINVAL);
1004 
1005 	dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
1006 	if (!dport)
1007 		return ERR_PTR(-ENOMEM);
1008 
1009 	dport->dport_dev = dport_dev;
1010 	dport->port_id = port_id;
1011 	dport->port = port;
1012 
1013 	if (rcrb == CXL_RESOURCE_NONE) {
1014 		rc = cxl_dport_setup_regs(&port->dev, dport,
1015 					  component_reg_phys);
1016 		if (rc)
1017 			return ERR_PTR(rc);
1018 	} else {
1019 		dport->rcrb.base = rcrb;
1020 		component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
1021 							 CXL_RCRB_DOWNSTREAM);
1022 		if (component_reg_phys == CXL_RESOURCE_NONE) {
1023 			dev_warn(dport_dev, "Invalid Component Registers in RCRB");
1024 			return ERR_PTR(-ENXIO);
1025 		}
1026 
1027 		/*
1028 		 * RCH @dport is not ready to map until associated with its
1029 		 * memdev
1030 		 */
1031 		rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
1032 		if (rc)
1033 			return ERR_PTR(rc);
1034 
1035 		dport->rch = true;
1036 	}
1037 
1038 	if (component_reg_phys != CXL_RESOURCE_NONE)
1039 		dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
1040 			&component_reg_phys);
1041 
1042 	cond_cxl_root_lock(port);
1043 	rc = add_dport(port, dport);
1044 	cond_cxl_root_unlock(port);
1045 	if (rc)
1046 		return ERR_PTR(rc);
1047 
1048 	get_device(dport_dev);
1049 	rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
1050 	if (rc)
1051 		return ERR_PTR(rc);
1052 
1053 	rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
1054 	if (rc)
1055 		return ERR_PTR(rc);
1056 
1057 	rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
1058 	if (rc)
1059 		return ERR_PTR(rc);
1060 
1061 	return dport;
1062 }
1063 
1064 /**
1065  * devm_cxl_add_dport - append VH downstream port data to a cxl_port
1066  * @port: the cxl_port that references this dport
1067  * @dport_dev: firmware or PCI device representing the dport
1068  * @port_id: identifier for this dport in a decoder's target list
1069  * @component_reg_phys: optional location of CXL component registers
1070  *
1071  * Note that dports are appended to the devm release action's of the
1072  * either the port's host (for root ports), or the port itself (for
1073  * switch ports)
1074  */
1075 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
1076 				     struct device *dport_dev, int port_id,
1077 				     resource_size_t component_reg_phys)
1078 {
1079 	struct cxl_dport *dport;
1080 
1081 	dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1082 				     component_reg_phys, CXL_RESOURCE_NONE);
1083 	if (IS_ERR(dport)) {
1084 		dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
1085 			dev_name(&port->dev), PTR_ERR(dport));
1086 	} else {
1087 		dev_dbg(dport_dev, "dport added to %s\n",
1088 			dev_name(&port->dev));
1089 	}
1090 
1091 	return dport;
1092 }
1093 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1094 
1095 /**
1096  * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1097  * @port: the cxl_port that references this dport
1098  * @dport_dev: firmware or PCI device representing the dport
1099  * @port_id: identifier for this dport in a decoder's target list
1100  * @rcrb: mandatory location of a Root Complex Register Block
1101  *
1102  * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1103  */
1104 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1105 					 struct device *dport_dev, int port_id,
1106 					 resource_size_t rcrb)
1107 {
1108 	struct cxl_dport *dport;
1109 
1110 	if (rcrb == CXL_RESOURCE_NONE) {
1111 		dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1112 		return ERR_PTR(-EINVAL);
1113 	}
1114 
1115 	dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1116 				     CXL_RESOURCE_NONE, rcrb);
1117 	if (IS_ERR(dport)) {
1118 		dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1119 			dev_name(&port->dev), PTR_ERR(dport));
1120 	} else {
1121 		dev_dbg(dport_dev, "RCH dport added to %s\n",
1122 			dev_name(&port->dev));
1123 	}
1124 
1125 	return dport;
1126 }
1127 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1128 
1129 static int add_ep(struct cxl_ep *new)
1130 {
1131 	struct cxl_port *port = new->dport->port;
1132 	int rc;
1133 
1134 	device_lock(&port->dev);
1135 	if (port->dead) {
1136 		device_unlock(&port->dev);
1137 		return -ENXIO;
1138 	}
1139 	rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1140 		       GFP_KERNEL);
1141 	device_unlock(&port->dev);
1142 
1143 	return rc;
1144 }
1145 
1146 /**
1147  * cxl_add_ep - register an endpoint's interest in a port
1148  * @dport: the dport that routes to @ep_dev
1149  * @ep_dev: device representing the endpoint
1150  *
1151  * Intermediate CXL ports are scanned based on the arrival of endpoints.
1152  * When those endpoints depart the port can be destroyed once all
1153  * endpoints that care about that port have been removed.
1154  */
1155 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1156 {
1157 	struct cxl_ep *ep;
1158 	int rc;
1159 
1160 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1161 	if (!ep)
1162 		return -ENOMEM;
1163 
1164 	ep->ep = get_device(ep_dev);
1165 	ep->dport = dport;
1166 
1167 	rc = add_ep(ep);
1168 	if (rc)
1169 		cxl_ep_release(ep);
1170 	return rc;
1171 }
1172 
1173 struct cxl_find_port_ctx {
1174 	const struct device *dport_dev;
1175 	const struct cxl_port *parent_port;
1176 	struct cxl_dport **dport;
1177 };
1178 
1179 static int match_port_by_dport(struct device *dev, const void *data)
1180 {
1181 	const struct cxl_find_port_ctx *ctx = data;
1182 	struct cxl_dport *dport;
1183 	struct cxl_port *port;
1184 
1185 	if (!is_cxl_port(dev))
1186 		return 0;
1187 	if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1188 		return 0;
1189 
1190 	port = to_cxl_port(dev);
1191 	dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1192 	if (ctx->dport)
1193 		*ctx->dport = dport;
1194 	return dport != NULL;
1195 }
1196 
1197 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1198 {
1199 	struct device *dev;
1200 
1201 	if (!ctx->dport_dev)
1202 		return NULL;
1203 
1204 	dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1205 	if (dev)
1206 		return to_cxl_port(dev);
1207 	return NULL;
1208 }
1209 
1210 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1211 				      struct cxl_dport **dport)
1212 {
1213 	struct cxl_find_port_ctx ctx = {
1214 		.dport_dev = dport_dev,
1215 		.dport = dport,
1216 	};
1217 	struct cxl_port *port;
1218 
1219 	port = __find_cxl_port(&ctx);
1220 	return port;
1221 }
1222 
1223 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1224 					 struct device *dport_dev,
1225 					 struct cxl_dport **dport)
1226 {
1227 	struct cxl_find_port_ctx ctx = {
1228 		.dport_dev = dport_dev,
1229 		.parent_port = parent_port,
1230 		.dport = dport,
1231 	};
1232 	struct cxl_port *port;
1233 
1234 	port = __find_cxl_port(&ctx);
1235 	return port;
1236 }
1237 
1238 /*
1239  * All users of grandparent() are using it to walk PCIe-like switch port
1240  * hierarchy. A PCIe switch is comprised of a bridge device representing the
1241  * upstream switch port and N bridges representing downstream switch ports. When
1242  * bridges stack the grand-parent of a downstream switch port is another
1243  * downstream switch port in the immediate ancestor switch.
1244  */
1245 static struct device *grandparent(struct device *dev)
1246 {
1247 	if (dev && dev->parent)
1248 		return dev->parent->parent;
1249 	return NULL;
1250 }
1251 
1252 static struct device *endpoint_host(struct cxl_port *endpoint)
1253 {
1254 	struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
1255 
1256 	if (is_cxl_root(port))
1257 		return port->uport_dev;
1258 	return &port->dev;
1259 }
1260 
1261 static void delete_endpoint(void *data)
1262 {
1263 	struct cxl_memdev *cxlmd = data;
1264 	struct cxl_port *endpoint = cxlmd->endpoint;
1265 	struct device *host = endpoint_host(endpoint);
1266 
1267 	device_lock(host);
1268 	if (host->driver && !endpoint->dead) {
1269 		devm_release_action(host, cxl_unlink_parent_dport, endpoint);
1270 		devm_release_action(host, cxl_unlink_uport, endpoint);
1271 		devm_release_action(host, unregister_port, endpoint);
1272 	}
1273 	cxlmd->endpoint = NULL;
1274 	device_unlock(host);
1275 	put_device(&endpoint->dev);
1276 	put_device(host);
1277 }
1278 
1279 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1280 {
1281 	struct device *host = endpoint_host(endpoint);
1282 	struct device *dev = &cxlmd->dev;
1283 
1284 	get_device(host);
1285 	get_device(&endpoint->dev);
1286 	cxlmd->endpoint = endpoint;
1287 	cxlmd->depth = endpoint->depth;
1288 	return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1289 }
1290 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1291 
1292 /*
1293  * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1294  * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1295  * for a port to be unregistered is when all memdevs beneath that port have gone
1296  * through ->remove(). This "bottom-up" removal selectively removes individual
1297  * child ports manually. This depends on devm_cxl_add_port() to not change is
1298  * devm action registration order, and for dports to have already been
1299  * destroyed by reap_dports().
1300  */
1301 static void delete_switch_port(struct cxl_port *port)
1302 {
1303 	devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1304 	devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1305 	devm_release_action(port->dev.parent, unregister_port, port);
1306 }
1307 
1308 static void reap_dports(struct cxl_port *port)
1309 {
1310 	struct cxl_dport *dport;
1311 	unsigned long index;
1312 
1313 	device_lock_assert(&port->dev);
1314 
1315 	xa_for_each(&port->dports, index, dport) {
1316 		devm_release_action(&port->dev, cxl_dport_unlink, dport);
1317 		devm_release_action(&port->dev, cxl_dport_remove, dport);
1318 		devm_kfree(&port->dev, dport);
1319 	}
1320 }
1321 
1322 struct detach_ctx {
1323 	struct cxl_memdev *cxlmd;
1324 	int depth;
1325 };
1326 
1327 static int port_has_memdev(struct device *dev, const void *data)
1328 {
1329 	const struct detach_ctx *ctx = data;
1330 	struct cxl_port *port;
1331 
1332 	if (!is_cxl_port(dev))
1333 		return 0;
1334 
1335 	port = to_cxl_port(dev);
1336 	if (port->depth != ctx->depth)
1337 		return 0;
1338 
1339 	return !!cxl_ep_load(port, ctx->cxlmd);
1340 }
1341 
1342 static void cxl_detach_ep(void *data)
1343 {
1344 	struct cxl_memdev *cxlmd = data;
1345 
1346 	for (int i = cxlmd->depth - 1; i >= 1; i--) {
1347 		struct cxl_port *port, *parent_port;
1348 		struct detach_ctx ctx = {
1349 			.cxlmd = cxlmd,
1350 			.depth = i,
1351 		};
1352 		struct device *dev;
1353 		struct cxl_ep *ep;
1354 		bool died = false;
1355 
1356 		dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1357 				      port_has_memdev);
1358 		if (!dev)
1359 			continue;
1360 		port = to_cxl_port(dev);
1361 
1362 		parent_port = to_cxl_port(port->dev.parent);
1363 		device_lock(&parent_port->dev);
1364 		device_lock(&port->dev);
1365 		ep = cxl_ep_load(port, cxlmd);
1366 		dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1367 			ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1368 		cxl_ep_remove(port, ep);
1369 		if (ep && !port->dead && xa_empty(&port->endpoints) &&
1370 		    !is_cxl_root(parent_port) && parent_port->dev.driver) {
1371 			/*
1372 			 * This was the last ep attached to a dynamically
1373 			 * enumerated port. Block new cxl_add_ep() and garbage
1374 			 * collect the port.
1375 			 */
1376 			died = true;
1377 			port->dead = true;
1378 			reap_dports(port);
1379 		}
1380 		device_unlock(&port->dev);
1381 
1382 		if (died) {
1383 			dev_dbg(&cxlmd->dev, "delete %s\n",
1384 				dev_name(&port->dev));
1385 			delete_switch_port(port);
1386 		}
1387 		put_device(&port->dev);
1388 		device_unlock(&parent_port->dev);
1389 	}
1390 }
1391 
1392 static resource_size_t find_component_registers(struct device *dev)
1393 {
1394 	struct cxl_register_map map;
1395 	struct pci_dev *pdev;
1396 
1397 	/*
1398 	 * Theoretically, CXL component registers can be hosted on a
1399 	 * non-PCI device, in practice, only cxl_test hits this case.
1400 	 */
1401 	if (!dev_is_pci(dev))
1402 		return CXL_RESOURCE_NONE;
1403 
1404 	pdev = to_pci_dev(dev);
1405 
1406 	cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1407 	return map.resource;
1408 }
1409 
1410 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1411 			      struct device *uport_dev,
1412 			      struct device *dport_dev)
1413 {
1414 	struct device *dparent = grandparent(dport_dev);
1415 	struct cxl_port *port, *parent_port = NULL;
1416 	struct cxl_dport *dport, *parent_dport;
1417 	resource_size_t component_reg_phys;
1418 	int rc;
1419 
1420 	if (!dparent) {
1421 		/*
1422 		 * The iteration reached the topology root without finding the
1423 		 * CXL-root 'cxl_port' on a previous iteration, fail for now to
1424 		 * be re-probed after platform driver attaches.
1425 		 */
1426 		dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1427 			dev_name(dport_dev));
1428 		return -ENXIO;
1429 	}
1430 
1431 	parent_port = find_cxl_port(dparent, &parent_dport);
1432 	if (!parent_port) {
1433 		/* iterate to create this parent_port */
1434 		return -EAGAIN;
1435 	}
1436 
1437 	device_lock(&parent_port->dev);
1438 	if (!parent_port->dev.driver) {
1439 		dev_warn(&cxlmd->dev,
1440 			 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1441 			 dev_name(&parent_port->dev), dev_name(uport_dev));
1442 		port = ERR_PTR(-ENXIO);
1443 		goto out;
1444 	}
1445 
1446 	port = find_cxl_port_at(parent_port, dport_dev, &dport);
1447 	if (!port) {
1448 		component_reg_phys = find_component_registers(uport_dev);
1449 		port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1450 					 component_reg_phys, parent_dport);
1451 		/* retry find to pick up the new dport information */
1452 		if (!IS_ERR(port))
1453 			port = find_cxl_port_at(parent_port, dport_dev, &dport);
1454 	}
1455 out:
1456 	device_unlock(&parent_port->dev);
1457 
1458 	if (IS_ERR(port))
1459 		rc = PTR_ERR(port);
1460 	else {
1461 		dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1462 			dev_name(&port->dev), dev_name(port->uport_dev));
1463 		rc = cxl_add_ep(dport, &cxlmd->dev);
1464 		if (rc == -EBUSY) {
1465 			/*
1466 			 * "can't" happen, but this error code means
1467 			 * something to the caller, so translate it.
1468 			 */
1469 			rc = -ENXIO;
1470 		}
1471 		put_device(&port->dev);
1472 	}
1473 
1474 	put_device(&parent_port->dev);
1475 	return rc;
1476 }
1477 
1478 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1479 {
1480 	struct device *dev = &cxlmd->dev;
1481 	struct device *iter;
1482 	int rc;
1483 
1484 	/*
1485 	 * Skip intermediate port enumeration in the RCH case, there
1486 	 * are no ports in between a host bridge and an endpoint.
1487 	 */
1488 	if (cxlmd->cxlds->rcd)
1489 		return 0;
1490 
1491 	rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1492 	if (rc)
1493 		return rc;
1494 
1495 	/*
1496 	 * Scan for and add all cxl_ports in this device's ancestry.
1497 	 * Repeat until no more ports are added. Abort if a port add
1498 	 * attempt fails.
1499 	 */
1500 retry:
1501 	for (iter = dev; iter; iter = grandparent(iter)) {
1502 		struct device *dport_dev = grandparent(iter);
1503 		struct device *uport_dev;
1504 		struct cxl_dport *dport;
1505 		struct cxl_port *port;
1506 
1507 		if (!dport_dev)
1508 			return 0;
1509 
1510 		uport_dev = dport_dev->parent;
1511 		if (!uport_dev) {
1512 			dev_warn(dev, "at %s no parent for dport: %s\n",
1513 				 dev_name(iter), dev_name(dport_dev));
1514 			return -ENXIO;
1515 		}
1516 
1517 		dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1518 			dev_name(iter), dev_name(dport_dev),
1519 			dev_name(uport_dev));
1520 		port = find_cxl_port(dport_dev, &dport);
1521 		if (port) {
1522 			dev_dbg(&cxlmd->dev,
1523 				"found already registered port %s:%s\n",
1524 				dev_name(&port->dev),
1525 				dev_name(port->uport_dev));
1526 			rc = cxl_add_ep(dport, &cxlmd->dev);
1527 
1528 			/*
1529 			 * If the endpoint already exists in the port's list,
1530 			 * that's ok, it was added on a previous pass.
1531 			 * Otherwise, retry in add_port_attach_ep() after taking
1532 			 * the parent_port lock as the current port may be being
1533 			 * reaped.
1534 			 */
1535 			if (rc && rc != -EBUSY) {
1536 				put_device(&port->dev);
1537 				return rc;
1538 			}
1539 
1540 			/* Any more ports to add between this one and the root? */
1541 			if (!dev_is_cxl_root_child(&port->dev)) {
1542 				put_device(&port->dev);
1543 				continue;
1544 			}
1545 
1546 			put_device(&port->dev);
1547 			return 0;
1548 		}
1549 
1550 		rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1551 		/* port missing, try to add parent */
1552 		if (rc == -EAGAIN)
1553 			continue;
1554 		/* failed to add ep or port */
1555 		if (rc)
1556 			return rc;
1557 		/* port added, new descendants possible, start over */
1558 		goto retry;
1559 	}
1560 
1561 	return 0;
1562 }
1563 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1564 
1565 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
1566 				   struct cxl_dport **dport)
1567 {
1568 	return find_cxl_port(pdev->dev.parent, dport);
1569 }
1570 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
1571 
1572 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1573 				   struct cxl_dport **dport)
1574 {
1575 	return find_cxl_port(grandparent(&cxlmd->dev), dport);
1576 }
1577 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1578 
1579 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1580 				    struct cxl_port *port, int *target_map)
1581 {
1582 	int i, rc = 0;
1583 
1584 	if (!target_map)
1585 		return 0;
1586 
1587 	device_lock_assert(&port->dev);
1588 
1589 	if (xa_empty(&port->dports))
1590 		return -EINVAL;
1591 
1592 	write_seqlock(&cxlsd->target_lock);
1593 	for (i = 0; i < cxlsd->nr_targets; i++) {
1594 		struct cxl_dport *dport = find_dport(port, target_map[i]);
1595 
1596 		if (!dport) {
1597 			rc = -ENXIO;
1598 			break;
1599 		}
1600 		cxlsd->target[i] = dport;
1601 	}
1602 	write_sequnlock(&cxlsd->target_lock);
1603 
1604 	return rc;
1605 }
1606 
1607 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1608 {
1609 	struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1610 	struct cxl_decoder *cxld = &cxlsd->cxld;
1611 	int iw;
1612 
1613 	iw = cxld->interleave_ways;
1614 	if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1615 			  "misconfigured root decoder\n"))
1616 		return NULL;
1617 
1618 	return cxlrd->cxlsd.target[pos % iw];
1619 }
1620 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
1621 
1622 static struct lock_class_key cxl_decoder_key;
1623 
1624 /**
1625  * cxl_decoder_init - Common decoder setup / initialization
1626  * @port: owning port of this decoder
1627  * @cxld: common decoder properties to initialize
1628  *
1629  * A port may contain one or more decoders. Each of those decoders
1630  * enable some address space for CXL.mem utilization. A decoder is
1631  * expected to be configured by the caller before registering via
1632  * cxl_decoder_add()
1633  */
1634 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1635 {
1636 	struct device *dev;
1637 	int rc;
1638 
1639 	rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1640 	if (rc < 0)
1641 		return rc;
1642 
1643 	/* need parent to stick around to release the id */
1644 	get_device(&port->dev);
1645 	cxld->id = rc;
1646 
1647 	dev = &cxld->dev;
1648 	device_initialize(dev);
1649 	lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1650 	device_set_pm_not_required(dev);
1651 	dev->parent = &port->dev;
1652 	dev->bus = &cxl_bus_type;
1653 
1654 	/* Pre initialize an "empty" decoder */
1655 	cxld->interleave_ways = 1;
1656 	cxld->interleave_granularity = PAGE_SIZE;
1657 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1658 	cxld->hpa_range = (struct range) {
1659 		.start = 0,
1660 		.end = -1,
1661 	};
1662 
1663 	return 0;
1664 }
1665 
1666 static int cxl_switch_decoder_init(struct cxl_port *port,
1667 				   struct cxl_switch_decoder *cxlsd,
1668 				   int nr_targets)
1669 {
1670 	if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1671 		return -EINVAL;
1672 
1673 	cxlsd->nr_targets = nr_targets;
1674 	seqlock_init(&cxlsd->target_lock);
1675 	return cxl_decoder_init(port, &cxlsd->cxld);
1676 }
1677 
1678 /**
1679  * cxl_root_decoder_alloc - Allocate a root level decoder
1680  * @port: owning CXL root of this decoder
1681  * @nr_targets: static number of downstream targets
1682  * @calc_hb: which host bridge covers the n'th position by granularity
1683  *
1684  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1685  * 'CXL root' decoder is one that decodes from a top-level / static platform
1686  * firmware description of CXL resources into a CXL standard decode
1687  * topology.
1688  */
1689 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1690 						unsigned int nr_targets,
1691 						cxl_calc_hb_fn calc_hb)
1692 {
1693 	struct cxl_root_decoder *cxlrd;
1694 	struct cxl_switch_decoder *cxlsd;
1695 	struct cxl_decoder *cxld;
1696 	int rc;
1697 
1698 	if (!is_cxl_root(port))
1699 		return ERR_PTR(-EINVAL);
1700 
1701 	cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1702 			GFP_KERNEL);
1703 	if (!cxlrd)
1704 		return ERR_PTR(-ENOMEM);
1705 
1706 	cxlsd = &cxlrd->cxlsd;
1707 	rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1708 	if (rc) {
1709 		kfree(cxlrd);
1710 		return ERR_PTR(rc);
1711 	}
1712 
1713 	cxlrd->calc_hb = calc_hb;
1714 	mutex_init(&cxlrd->range_lock);
1715 
1716 	cxld = &cxlsd->cxld;
1717 	cxld->dev.type = &cxl_decoder_root_type;
1718 	/*
1719 	 * cxl_root_decoder_release() special cases negative ids to
1720 	 * detect memregion_alloc() failures.
1721 	 */
1722 	atomic_set(&cxlrd->region_id, -1);
1723 	rc = memregion_alloc(GFP_KERNEL);
1724 	if (rc < 0) {
1725 		put_device(&cxld->dev);
1726 		return ERR_PTR(rc);
1727 	}
1728 
1729 	atomic_set(&cxlrd->region_id, rc);
1730 	return cxlrd;
1731 }
1732 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1733 
1734 /**
1735  * cxl_switch_decoder_alloc - Allocate a switch level decoder
1736  * @port: owning CXL switch port of this decoder
1737  * @nr_targets: max number of dynamically addressable downstream targets
1738  *
1739  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1740  * 'switch' decoder is any decoder that can be enumerated by PCIe
1741  * topology and the HDM Decoder Capability. This includes the decoders
1742  * that sit between Switch Upstream Ports / Switch Downstream Ports and
1743  * Host Bridges / Root Ports.
1744  */
1745 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1746 						    unsigned int nr_targets)
1747 {
1748 	struct cxl_switch_decoder *cxlsd;
1749 	struct cxl_decoder *cxld;
1750 	int rc;
1751 
1752 	if (is_cxl_root(port) || is_cxl_endpoint(port))
1753 		return ERR_PTR(-EINVAL);
1754 
1755 	cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1756 	if (!cxlsd)
1757 		return ERR_PTR(-ENOMEM);
1758 
1759 	rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1760 	if (rc) {
1761 		kfree(cxlsd);
1762 		return ERR_PTR(rc);
1763 	}
1764 
1765 	cxld = &cxlsd->cxld;
1766 	cxld->dev.type = &cxl_decoder_switch_type;
1767 	return cxlsd;
1768 }
1769 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1770 
1771 /**
1772  * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1773  * @port: owning port of this decoder
1774  *
1775  * Return: A new cxl decoder to be registered by cxl_decoder_add()
1776  */
1777 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1778 {
1779 	struct cxl_endpoint_decoder *cxled;
1780 	struct cxl_decoder *cxld;
1781 	int rc;
1782 
1783 	if (!is_cxl_endpoint(port))
1784 		return ERR_PTR(-EINVAL);
1785 
1786 	cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1787 	if (!cxled)
1788 		return ERR_PTR(-ENOMEM);
1789 
1790 	cxled->pos = -1;
1791 	cxld = &cxled->cxld;
1792 	rc = cxl_decoder_init(port, cxld);
1793 	if (rc)	 {
1794 		kfree(cxled);
1795 		return ERR_PTR(rc);
1796 	}
1797 
1798 	cxld->dev.type = &cxl_decoder_endpoint_type;
1799 	return cxled;
1800 }
1801 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1802 
1803 /**
1804  * cxl_decoder_add_locked - Add a decoder with targets
1805  * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1806  * @target_map: A list of downstream ports that this decoder can direct memory
1807  *              traffic to. These numbers should correspond with the port number
1808  *              in the PCIe Link Capabilities structure.
1809  *
1810  * Certain types of decoders may not have any targets. The main example of this
1811  * is an endpoint device. A more awkward example is a hostbridge whose root
1812  * ports get hot added (technically possible, though unlikely).
1813  *
1814  * This is the locked variant of cxl_decoder_add().
1815  *
1816  * Context: Process context. Expects the device lock of the port that owns the
1817  *	    @cxld to be held.
1818  *
1819  * Return: Negative error code if the decoder wasn't properly configured; else
1820  *	   returns 0.
1821  */
1822 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1823 {
1824 	struct cxl_port *port;
1825 	struct device *dev;
1826 	int rc;
1827 
1828 	if (WARN_ON_ONCE(!cxld))
1829 		return -EINVAL;
1830 
1831 	if (WARN_ON_ONCE(IS_ERR(cxld)))
1832 		return PTR_ERR(cxld);
1833 
1834 	if (cxld->interleave_ways < 1)
1835 		return -EINVAL;
1836 
1837 	dev = &cxld->dev;
1838 
1839 	port = to_cxl_port(cxld->dev.parent);
1840 	if (!is_endpoint_decoder(dev)) {
1841 		struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1842 
1843 		rc = decoder_populate_targets(cxlsd, port, target_map);
1844 		if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1845 			dev_err(&port->dev,
1846 				"Failed to populate active decoder targets\n");
1847 			return rc;
1848 		}
1849 	}
1850 
1851 	rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1852 	if (rc)
1853 		return rc;
1854 
1855 	return device_add(dev);
1856 }
1857 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1858 
1859 /**
1860  * cxl_decoder_add - Add a decoder with targets
1861  * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1862  * @target_map: A list of downstream ports that this decoder can direct memory
1863  *              traffic to. These numbers should correspond with the port number
1864  *              in the PCIe Link Capabilities structure.
1865  *
1866  * This is the unlocked variant of cxl_decoder_add_locked().
1867  * See cxl_decoder_add_locked().
1868  *
1869  * Context: Process context. Takes and releases the device lock of the port that
1870  *	    owns the @cxld.
1871  */
1872 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1873 {
1874 	struct cxl_port *port;
1875 	int rc;
1876 
1877 	if (WARN_ON_ONCE(!cxld))
1878 		return -EINVAL;
1879 
1880 	if (WARN_ON_ONCE(IS_ERR(cxld)))
1881 		return PTR_ERR(cxld);
1882 
1883 	port = to_cxl_port(cxld->dev.parent);
1884 
1885 	device_lock(&port->dev);
1886 	rc = cxl_decoder_add_locked(cxld, target_map);
1887 	device_unlock(&port->dev);
1888 
1889 	return rc;
1890 }
1891 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1892 
1893 static void cxld_unregister(void *dev)
1894 {
1895 	struct cxl_endpoint_decoder *cxled;
1896 
1897 	if (is_endpoint_decoder(dev)) {
1898 		cxled = to_cxl_endpoint_decoder(dev);
1899 		cxl_decoder_kill_region(cxled);
1900 	}
1901 
1902 	device_unregister(dev);
1903 }
1904 
1905 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1906 {
1907 	return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1908 }
1909 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1910 
1911 /**
1912  * __cxl_driver_register - register a driver for the cxl bus
1913  * @cxl_drv: cxl driver structure to attach
1914  * @owner: owning module/driver
1915  * @modname: KBUILD_MODNAME for parent driver
1916  */
1917 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1918 			  const char *modname)
1919 {
1920 	if (!cxl_drv->probe) {
1921 		pr_debug("%s ->probe() must be specified\n", modname);
1922 		return -EINVAL;
1923 	}
1924 
1925 	if (!cxl_drv->name) {
1926 		pr_debug("%s ->name must be specified\n", modname);
1927 		return -EINVAL;
1928 	}
1929 
1930 	if (!cxl_drv->id) {
1931 		pr_debug("%s ->id must be specified\n", modname);
1932 		return -EINVAL;
1933 	}
1934 
1935 	cxl_drv->drv.bus = &cxl_bus_type;
1936 	cxl_drv->drv.owner = owner;
1937 	cxl_drv->drv.mod_name = modname;
1938 	cxl_drv->drv.name = cxl_drv->name;
1939 
1940 	return driver_register(&cxl_drv->drv);
1941 }
1942 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1943 
1944 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1945 {
1946 	driver_unregister(&cxl_drv->drv);
1947 }
1948 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1949 
1950 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
1951 {
1952 	return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1953 			      cxl_device_id(dev));
1954 }
1955 
1956 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1957 {
1958 	return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1959 }
1960 
1961 static int cxl_bus_probe(struct device *dev)
1962 {
1963 	int rc;
1964 
1965 	rc = to_cxl_drv(dev->driver)->probe(dev);
1966 	dev_dbg(dev, "probe: %d\n", rc);
1967 	return rc;
1968 }
1969 
1970 static void cxl_bus_remove(struct device *dev)
1971 {
1972 	struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1973 
1974 	if (cxl_drv->remove)
1975 		cxl_drv->remove(dev);
1976 }
1977 
1978 static struct workqueue_struct *cxl_bus_wq;
1979 
1980 static void cxl_bus_rescan_queue(struct work_struct *w)
1981 {
1982 	int rc = bus_rescan_devices(&cxl_bus_type);
1983 
1984 	pr_debug("CXL bus rescan result: %d\n", rc);
1985 }
1986 
1987 void cxl_bus_rescan(void)
1988 {
1989 	static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
1990 
1991 	queue_work(cxl_bus_wq, &rescan_work);
1992 }
1993 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1994 
1995 void cxl_bus_drain(void)
1996 {
1997 	drain_workqueue(cxl_bus_wq);
1998 }
1999 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
2000 
2001 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
2002 {
2003 	return queue_work(cxl_bus_wq, &cxlmd->detach_work);
2004 }
2005 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
2006 
2007 /* for user tooling to ensure port disable work has completed */
2008 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
2009 {
2010 	if (sysfs_streq(buf, "1")) {
2011 		flush_workqueue(cxl_bus_wq);
2012 		return count;
2013 	}
2014 
2015 	return -EINVAL;
2016 }
2017 
2018 static BUS_ATTR_WO(flush);
2019 
2020 static struct attribute *cxl_bus_attributes[] = {
2021 	&bus_attr_flush.attr,
2022 	NULL,
2023 };
2024 
2025 static struct attribute_group cxl_bus_attribute_group = {
2026 	.attrs = cxl_bus_attributes,
2027 };
2028 
2029 static const struct attribute_group *cxl_bus_attribute_groups[] = {
2030 	&cxl_bus_attribute_group,
2031 	NULL,
2032 };
2033 
2034 struct bus_type cxl_bus_type = {
2035 	.name = "cxl",
2036 	.uevent = cxl_bus_uevent,
2037 	.match = cxl_bus_match,
2038 	.probe = cxl_bus_probe,
2039 	.remove = cxl_bus_remove,
2040 	.bus_groups = cxl_bus_attribute_groups,
2041 };
2042 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
2043 
2044 static struct dentry *cxl_debugfs;
2045 
2046 struct dentry *cxl_debugfs_create_dir(const char *dir)
2047 {
2048 	return debugfs_create_dir(dir, cxl_debugfs);
2049 }
2050 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
2051 
2052 static __init int cxl_core_init(void)
2053 {
2054 	int rc;
2055 
2056 	cxl_debugfs = debugfs_create_dir("cxl", NULL);
2057 
2058 	cxl_mbox_init();
2059 
2060 	rc = cxl_memdev_init();
2061 	if (rc)
2062 		return rc;
2063 
2064 	cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
2065 	if (!cxl_bus_wq) {
2066 		rc = -ENOMEM;
2067 		goto err_wq;
2068 	}
2069 
2070 	rc = bus_register(&cxl_bus_type);
2071 	if (rc)
2072 		goto err_bus;
2073 
2074 	rc = cxl_region_init();
2075 	if (rc)
2076 		goto err_region;
2077 
2078 	return 0;
2079 
2080 err_region:
2081 	bus_unregister(&cxl_bus_type);
2082 err_bus:
2083 	destroy_workqueue(cxl_bus_wq);
2084 err_wq:
2085 	cxl_memdev_exit();
2086 	return rc;
2087 }
2088 
2089 static void cxl_core_exit(void)
2090 {
2091 	cxl_region_exit();
2092 	bus_unregister(&cxl_bus_type);
2093 	destroy_workqueue(cxl_bus_wq);
2094 	cxl_memdev_exit();
2095 	debugfs_remove_recursive(cxl_debugfs);
2096 }
2097 
2098 subsys_initcall(cxl_core_init);
2099 module_exit(cxl_core_exit);
2100 MODULE_LICENSE("GPL v2");
2101