xref: /openbmc/linux/tools/testing/cxl/test/cxl.c (revision f80be457)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13 
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_ROOT_PORTS 2
16 #define NR_CXL_SWITCH_PORTS 2
17 #define NR_CXL_PORT_DECODERS 8
18 
19 static struct platform_device *cxl_acpi;
20 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
21 static struct platform_device
22 	*cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
23 static struct platform_device
24 	*cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS];
25 static struct platform_device
26 	*cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS *
27 			  NR_CXL_SWITCH_PORTS];
28 struct platform_device
29 	*cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS];
30 
31 static struct acpi_device acpi0017_mock;
32 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = {
33 	[0] = {
34 		.handle = &host_bridge[0],
35 	},
36 	[1] = {
37 		.handle = &host_bridge[1],
38 	},
39 };
40 
41 static bool is_mock_dev(struct device *dev)
42 {
43 	int i;
44 
45 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
46 		if (dev == &cxl_mem[i]->dev)
47 			return true;
48 	if (dev == &cxl_acpi->dev)
49 		return true;
50 	return false;
51 }
52 
53 static bool is_mock_adev(struct acpi_device *adev)
54 {
55 	int i;
56 
57 	if (adev == &acpi0017_mock)
58 		return true;
59 
60 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
61 		if (adev == &host_bridge[i])
62 			return true;
63 
64 	return false;
65 }
66 
67 static struct {
68 	struct acpi_table_cedt cedt;
69 	struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES];
70 	struct {
71 		struct acpi_cedt_cfmws cfmws;
72 		u32 target[1];
73 	} cfmws0;
74 	struct {
75 		struct acpi_cedt_cfmws cfmws;
76 		u32 target[2];
77 	} cfmws1;
78 	struct {
79 		struct acpi_cedt_cfmws cfmws;
80 		u32 target[1];
81 	} cfmws2;
82 	struct {
83 		struct acpi_cedt_cfmws cfmws;
84 		u32 target[2];
85 	} cfmws3;
86 } __packed mock_cedt = {
87 	.cedt = {
88 		.header = {
89 			.signature = "CEDT",
90 			.length = sizeof(mock_cedt),
91 			.revision = 1,
92 		},
93 	},
94 	.chbs[0] = {
95 		.header = {
96 			.type = ACPI_CEDT_TYPE_CHBS,
97 			.length = sizeof(mock_cedt.chbs[0]),
98 		},
99 		.uid = 0,
100 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
101 	},
102 	.chbs[1] = {
103 		.header = {
104 			.type = ACPI_CEDT_TYPE_CHBS,
105 			.length = sizeof(mock_cedt.chbs[0]),
106 		},
107 		.uid = 1,
108 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
109 	},
110 	.cfmws0 = {
111 		.cfmws = {
112 			.header = {
113 				.type = ACPI_CEDT_TYPE_CFMWS,
114 				.length = sizeof(mock_cedt.cfmws0),
115 			},
116 			.interleave_ways = 0,
117 			.granularity = 4,
118 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
119 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
120 			.qtg_id = 0,
121 			.window_size = SZ_256M * 4UL,
122 		},
123 		.target = { 0 },
124 	},
125 	.cfmws1 = {
126 		.cfmws = {
127 			.header = {
128 				.type = ACPI_CEDT_TYPE_CFMWS,
129 				.length = sizeof(mock_cedt.cfmws1),
130 			},
131 			.interleave_ways = 1,
132 			.granularity = 4,
133 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
134 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
135 			.qtg_id = 1,
136 			.window_size = SZ_256M * 8UL,
137 		},
138 		.target = { 0, 1, },
139 	},
140 	.cfmws2 = {
141 		.cfmws = {
142 			.header = {
143 				.type = ACPI_CEDT_TYPE_CFMWS,
144 				.length = sizeof(mock_cedt.cfmws2),
145 			},
146 			.interleave_ways = 0,
147 			.granularity = 4,
148 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
149 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
150 			.qtg_id = 2,
151 			.window_size = SZ_256M * 4UL,
152 		},
153 		.target = { 0 },
154 	},
155 	.cfmws3 = {
156 		.cfmws = {
157 			.header = {
158 				.type = ACPI_CEDT_TYPE_CFMWS,
159 				.length = sizeof(mock_cedt.cfmws3),
160 			},
161 			.interleave_ways = 1,
162 			.granularity = 4,
163 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
164 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
165 			.qtg_id = 3,
166 			.window_size = SZ_256M * 8UL,
167 		},
168 		.target = { 0, 1, },
169 	},
170 };
171 
172 struct acpi_cedt_cfmws *mock_cfmws[4] = {
173 	[0] = &mock_cedt.cfmws0.cfmws,
174 	[1] = &mock_cedt.cfmws1.cfmws,
175 	[2] = &mock_cedt.cfmws2.cfmws,
176 	[3] = &mock_cedt.cfmws3.cfmws,
177 };
178 
179 struct cxl_mock_res {
180 	struct list_head list;
181 	struct range range;
182 };
183 
184 static LIST_HEAD(mock_res);
185 static DEFINE_MUTEX(mock_res_lock);
186 static struct gen_pool *cxl_mock_pool;
187 
188 static void depopulate_all_mock_resources(void)
189 {
190 	struct cxl_mock_res *res, *_res;
191 
192 	mutex_lock(&mock_res_lock);
193 	list_for_each_entry_safe(res, _res, &mock_res, list) {
194 		gen_pool_free(cxl_mock_pool, res->range.start,
195 			      range_len(&res->range));
196 		list_del(&res->list);
197 		kfree(res);
198 	}
199 	mutex_unlock(&mock_res_lock);
200 }
201 
202 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
203 {
204 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
205 	struct genpool_data_align data = {
206 		.align = SZ_256M,
207 	};
208 	unsigned long phys;
209 
210 	INIT_LIST_HEAD(&res->list);
211 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
212 				   gen_pool_first_fit_align, &data);
213 	if (!phys)
214 		return NULL;
215 
216 	res->range = (struct range) {
217 		.start = phys,
218 		.end = phys + size - 1,
219 	};
220 	mutex_lock(&mock_res_lock);
221 	list_add(&res->list, &mock_res);
222 	mutex_unlock(&mock_res_lock);
223 
224 	return res;
225 }
226 
227 static int populate_cedt(void)
228 {
229 	struct cxl_mock_res *res;
230 	int i;
231 
232 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
233 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
234 		resource_size_t size;
235 
236 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
237 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
238 		else
239 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
240 
241 		res = alloc_mock_res(size);
242 		if (!res)
243 			return -ENOMEM;
244 		chbs->base = res->range.start;
245 		chbs->length = size;
246 	}
247 
248 	for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
249 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
250 
251 		res = alloc_mock_res(window->window_size);
252 		if (!res)
253 			return -ENOMEM;
254 		window->base_hpa = res->range.start;
255 	}
256 
257 	return 0;
258 }
259 
260 /*
261  * WARNING, this hack assumes the format of 'struct
262  * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
263  * the first struct member is the device being probed by the cxl_acpi
264  * driver.
265  */
266 struct cxl_cedt_context {
267 	struct device *dev;
268 };
269 
270 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
271 				      acpi_tbl_entry_handler_arg handler_arg,
272 				      void *arg)
273 {
274 	struct cxl_cedt_context *ctx = arg;
275 	struct device *dev = ctx->dev;
276 	union acpi_subtable_headers *h;
277 	unsigned long end;
278 	int i;
279 
280 	if (dev != &cxl_acpi->dev)
281 		return acpi_table_parse_cedt(id, handler_arg, arg);
282 
283 	if (id == ACPI_CEDT_TYPE_CHBS)
284 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
285 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
286 			end = (unsigned long)&mock_cedt.chbs[i + 1];
287 			handler_arg(h, arg, end);
288 		}
289 
290 	if (id == ACPI_CEDT_TYPE_CFMWS)
291 		for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
292 			h = (union acpi_subtable_headers *) mock_cfmws[i];
293 			end = (unsigned long) h + mock_cfmws[i]->header.length;
294 			handler_arg(h, arg, end);
295 		}
296 
297 	return 0;
298 }
299 
300 static bool is_mock_bridge(struct device *dev)
301 {
302 	int i;
303 
304 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
305 		if (dev == &cxl_host_bridge[i]->dev)
306 			return true;
307 	return false;
308 }
309 
310 static bool is_mock_port(struct device *dev)
311 {
312 	int i;
313 
314 	if (is_mock_bridge(dev))
315 		return true;
316 
317 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
318 		if (dev == &cxl_root_port[i]->dev)
319 			return true;
320 
321 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
322 		if (dev == &cxl_switch_uport[i]->dev)
323 			return true;
324 
325 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
326 		if (dev == &cxl_switch_dport[i]->dev)
327 			return true;
328 
329 	if (is_cxl_memdev(dev))
330 		return is_mock_dev(dev->parent);
331 
332 	return false;
333 }
334 
335 static int host_bridge_index(struct acpi_device *adev)
336 {
337 	return adev - host_bridge;
338 }
339 
340 static struct acpi_device *find_host_bridge(acpi_handle handle)
341 {
342 	int i;
343 
344 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
345 		if (handle == host_bridge[i].handle)
346 			return &host_bridge[i];
347 	return NULL;
348 }
349 
350 static acpi_status
351 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
352 			   struct acpi_object_list *arguments,
353 			   unsigned long long *data)
354 {
355 	struct acpi_device *adev = find_host_bridge(handle);
356 
357 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
358 		return acpi_evaluate_integer(handle, pathname, arguments, data);
359 
360 	*data = host_bridge_index(adev);
361 	return AE_OK;
362 }
363 
364 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
365 static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
366 	[0] = {
367 		.bus = &mock_pci_bus[0],
368 	},
369 	[1] = {
370 		.bus = &mock_pci_bus[1],
371 	},
372 };
373 
374 static bool is_mock_bus(struct pci_bus *bus)
375 {
376 	int i;
377 
378 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
379 		if (bus == &mock_pci_bus[i])
380 			return true;
381 	return false;
382 }
383 
384 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
385 {
386 	struct acpi_device *adev = find_host_bridge(handle);
387 
388 	if (!adev)
389 		return acpi_pci_find_root(handle);
390 	return &mock_pci_root[host_bridge_index(adev)];
391 }
392 
393 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
394 {
395 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
396 
397 	if (!cxlhdm)
398 		return ERR_PTR(-ENOMEM);
399 
400 	cxlhdm->port = port;
401 	return cxlhdm;
402 }
403 
404 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
405 {
406 	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
407 	return -EOPNOTSUPP;
408 }
409 
410 
411 struct target_map_ctx {
412 	int *target_map;
413 	int index;
414 	int target_count;
415 };
416 
417 static int map_targets(struct device *dev, void *data)
418 {
419 	struct platform_device *pdev = to_platform_device(dev);
420 	struct target_map_ctx *ctx = data;
421 
422 	ctx->target_map[ctx->index++] = pdev->id;
423 
424 	if (ctx->index > ctx->target_count) {
425 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
426 		return -ENXIO;
427 	}
428 
429 	return 0;
430 }
431 
432 static int mock_decoder_commit(struct cxl_decoder *cxld)
433 {
434 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
435 	int id = cxld->id;
436 
437 	if (cxld->flags & CXL_DECODER_F_ENABLE)
438 		return 0;
439 
440 	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
441 	if (port->commit_end + 1 != id) {
442 		dev_dbg(&port->dev,
443 			"%s: out of order commit, expected decoder%d.%d\n",
444 			dev_name(&cxld->dev), port->id, port->commit_end + 1);
445 		return -EBUSY;
446 	}
447 
448 	port->commit_end++;
449 	cxld->flags |= CXL_DECODER_F_ENABLE;
450 
451 	return 0;
452 }
453 
454 static int mock_decoder_reset(struct cxl_decoder *cxld)
455 {
456 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
457 	int id = cxld->id;
458 
459 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
460 		return 0;
461 
462 	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
463 	if (port->commit_end != id) {
464 		dev_dbg(&port->dev,
465 			"%s: out of order reset, expected decoder%d.%d\n",
466 			dev_name(&cxld->dev), port->id, port->commit_end);
467 		return -EBUSY;
468 	}
469 
470 	port->commit_end--;
471 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
472 
473 	return 0;
474 }
475 
476 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
477 {
478 	struct cxl_port *port = cxlhdm->port;
479 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
480 	int target_count, i;
481 
482 	if (is_cxl_endpoint(port))
483 		target_count = 0;
484 	else if (is_cxl_root(parent_port))
485 		target_count = NR_CXL_ROOT_PORTS;
486 	else
487 		target_count = NR_CXL_SWITCH_PORTS;
488 
489 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
490 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
491 		struct target_map_ctx ctx = {
492 			.target_map = target_map,
493 			.target_count = target_count,
494 		};
495 		struct cxl_decoder *cxld;
496 		int rc;
497 
498 		if (target_count) {
499 			struct cxl_switch_decoder *cxlsd;
500 
501 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
502 			if (IS_ERR(cxlsd)) {
503 				dev_warn(&port->dev,
504 					 "Failed to allocate the decoder\n");
505 				return PTR_ERR(cxlsd);
506 			}
507 			cxld = &cxlsd->cxld;
508 		} else {
509 			struct cxl_endpoint_decoder *cxled;
510 
511 			cxled = cxl_endpoint_decoder_alloc(port);
512 
513 			if (IS_ERR(cxled)) {
514 				dev_warn(&port->dev,
515 					 "Failed to allocate the decoder\n");
516 				return PTR_ERR(cxled);
517 			}
518 			cxld = &cxled->cxld;
519 		}
520 
521 		cxld->hpa_range = (struct range) {
522 			.start = 0,
523 			.end = -1,
524 		};
525 
526 		cxld->interleave_ways = min_not_zero(target_count, 1);
527 		cxld->interleave_granularity = SZ_4K;
528 		cxld->target_type = CXL_DECODER_EXPANDER;
529 		cxld->commit = mock_decoder_commit;
530 		cxld->reset = mock_decoder_reset;
531 
532 		if (target_count) {
533 			rc = device_for_each_child(port->uport, &ctx,
534 						   map_targets);
535 			if (rc) {
536 				put_device(&cxld->dev);
537 				return rc;
538 			}
539 		}
540 
541 		rc = cxl_decoder_add_locked(cxld, target_map);
542 		if (rc) {
543 			put_device(&cxld->dev);
544 			dev_err(&port->dev, "Failed to add decoder\n");
545 			return rc;
546 		}
547 
548 		rc = cxl_decoder_autoremove(&port->dev, cxld);
549 		if (rc)
550 			return rc;
551 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
552 	}
553 
554 	return 0;
555 }
556 
557 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
558 {
559 	struct device *dev = &port->dev;
560 	struct platform_device **array;
561 	int i, array_size;
562 
563 	if (port->depth == 1) {
564 		array_size = ARRAY_SIZE(cxl_root_port);
565 		array = cxl_root_port;
566 	} else if (port->depth == 2) {
567 		array_size = ARRAY_SIZE(cxl_switch_dport);
568 		array = cxl_switch_dport;
569 	} else {
570 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
571 			      port->depth);
572 		return -ENXIO;
573 	}
574 
575 	for (i = 0; i < array_size; i++) {
576 		struct platform_device *pdev = array[i];
577 		struct cxl_dport *dport;
578 
579 		if (pdev->dev.parent != port->uport)
580 			continue;
581 
582 		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
583 					   CXL_RESOURCE_NONE);
584 
585 		if (IS_ERR(dport)) {
586 			dev_err(dev, "failed to add dport: %s (%ld)\n",
587 				dev_name(&pdev->dev), PTR_ERR(dport));
588 			return PTR_ERR(dport);
589 		}
590 
591 		dev_dbg(dev, "add dport%d: %s\n", pdev->id,
592 			dev_name(&pdev->dev));
593 	}
594 
595 	return 0;
596 }
597 
598 static struct cxl_mock_ops cxl_mock_ops = {
599 	.is_mock_adev = is_mock_adev,
600 	.is_mock_bridge = is_mock_bridge,
601 	.is_mock_bus = is_mock_bus,
602 	.is_mock_port = is_mock_port,
603 	.is_mock_dev = is_mock_dev,
604 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
605 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
606 	.acpi_pci_find_root = mock_acpi_pci_find_root,
607 	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
608 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
609 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
610 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
611 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
612 };
613 
614 static void mock_companion(struct acpi_device *adev, struct device *dev)
615 {
616 	device_initialize(&adev->dev);
617 	fwnode_init(&adev->fwnode, NULL);
618 	dev->fwnode = &adev->fwnode;
619 	adev->fwnode.dev = dev;
620 }
621 
622 #ifndef SZ_64G
623 #define SZ_64G (SZ_32G * 2)
624 #endif
625 
626 #ifndef SZ_512G
627 #define SZ_512G (SZ_64G * 8)
628 #endif
629 
630 static __init int cxl_test_init(void)
631 {
632 	int rc, i;
633 
634 	register_cxl_mock_ops(&cxl_mock_ops);
635 
636 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
637 	if (!cxl_mock_pool) {
638 		rc = -ENOMEM;
639 		goto err_gen_pool_create;
640 	}
641 
642 	rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
643 			  SZ_64G, NUMA_NO_NODE);
644 	if (rc)
645 		goto err_gen_pool_add;
646 
647 	rc = populate_cedt();
648 	if (rc)
649 		goto err_populate;
650 
651 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
652 		struct acpi_device *adev = &host_bridge[i];
653 		struct platform_device *pdev;
654 
655 		pdev = platform_device_alloc("cxl_host_bridge", i);
656 		if (!pdev)
657 			goto err_bridge;
658 
659 		mock_companion(adev, &pdev->dev);
660 		rc = platform_device_add(pdev);
661 		if (rc) {
662 			platform_device_put(pdev);
663 			goto err_bridge;
664 		}
665 
666 		cxl_host_bridge[i] = pdev;
667 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
668 				       "physical_node");
669 		if (rc)
670 			goto err_bridge;
671 	}
672 
673 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
674 		struct platform_device *bridge =
675 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
676 		struct platform_device *pdev;
677 
678 		pdev = platform_device_alloc("cxl_root_port", i);
679 		if (!pdev)
680 			goto err_port;
681 		pdev->dev.parent = &bridge->dev;
682 
683 		rc = platform_device_add(pdev);
684 		if (rc) {
685 			platform_device_put(pdev);
686 			goto err_port;
687 		}
688 		cxl_root_port[i] = pdev;
689 	}
690 
691 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
692 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
693 		struct platform_device *root_port = cxl_root_port[i];
694 		struct platform_device *pdev;
695 
696 		pdev = platform_device_alloc("cxl_switch_uport", i);
697 		if (!pdev)
698 			goto err_port;
699 		pdev->dev.parent = &root_port->dev;
700 
701 		rc = platform_device_add(pdev);
702 		if (rc) {
703 			platform_device_put(pdev);
704 			goto err_uport;
705 		}
706 		cxl_switch_uport[i] = pdev;
707 	}
708 
709 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
710 		struct platform_device *uport =
711 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
712 		struct platform_device *pdev;
713 
714 		pdev = platform_device_alloc("cxl_switch_dport", i);
715 		if (!pdev)
716 			goto err_port;
717 		pdev->dev.parent = &uport->dev;
718 
719 		rc = platform_device_add(pdev);
720 		if (rc) {
721 			platform_device_put(pdev);
722 			goto err_dport;
723 		}
724 		cxl_switch_dport[i] = pdev;
725 	}
726 
727 	BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport));
728 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
729 		struct platform_device *dport = cxl_switch_dport[i];
730 		struct platform_device *pdev;
731 
732 		pdev = platform_device_alloc("cxl_mem", i);
733 		if (!pdev)
734 			goto err_mem;
735 		pdev->dev.parent = &dport->dev;
736 		set_dev_node(&pdev->dev, i % 2);
737 
738 		rc = platform_device_add(pdev);
739 		if (rc) {
740 			platform_device_put(pdev);
741 			goto err_mem;
742 		}
743 		cxl_mem[i] = pdev;
744 	}
745 
746 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
747 	if (!cxl_acpi)
748 		goto err_mem;
749 
750 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
751 	acpi0017_mock.dev.bus = &platform_bus_type;
752 
753 	rc = platform_device_add(cxl_acpi);
754 	if (rc)
755 		goto err_add;
756 
757 	return 0;
758 
759 err_add:
760 	platform_device_put(cxl_acpi);
761 err_mem:
762 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
763 		platform_device_unregister(cxl_mem[i]);
764 err_dport:
765 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
766 		platform_device_unregister(cxl_switch_dport[i]);
767 err_uport:
768 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
769 		platform_device_unregister(cxl_switch_uport[i]);
770 err_port:
771 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
772 		platform_device_unregister(cxl_root_port[i]);
773 err_bridge:
774 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
775 		struct platform_device *pdev = cxl_host_bridge[i];
776 
777 		if (!pdev)
778 			continue;
779 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
780 		platform_device_unregister(cxl_host_bridge[i]);
781 	}
782 err_populate:
783 	depopulate_all_mock_resources();
784 err_gen_pool_add:
785 	gen_pool_destroy(cxl_mock_pool);
786 err_gen_pool_create:
787 	unregister_cxl_mock_ops(&cxl_mock_ops);
788 	return rc;
789 }
790 
791 static __exit void cxl_test_exit(void)
792 {
793 	int i;
794 
795 	platform_device_unregister(cxl_acpi);
796 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
797 		platform_device_unregister(cxl_mem[i]);
798 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
799 		platform_device_unregister(cxl_switch_dport[i]);
800 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
801 		platform_device_unregister(cxl_switch_uport[i]);
802 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
803 		platform_device_unregister(cxl_root_port[i]);
804 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
805 		struct platform_device *pdev = cxl_host_bridge[i];
806 
807 		if (!pdev)
808 			continue;
809 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
810 		platform_device_unregister(cxl_host_bridge[i]);
811 	}
812 	depopulate_all_mock_resources();
813 	gen_pool_destroy(cxl_mock_pool);
814 	unregister_cxl_mock_ops(&cxl_mock_ops);
815 }
816 
817 module_init(cxl_test_init);
818 module_exit(cxl_test_exit);
819 MODULE_LICENSE("GPL v2");
820 MODULE_IMPORT_NS(ACPI);
821 MODULE_IMPORT_NS(CXL);
822