xref: /openbmc/linux/tools/testing/cxl/test/cxl.c (revision 15a83487)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13 
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_SINGLE_HOST 1
16 #define NR_CXL_ROOT_PORTS 2
17 #define NR_CXL_SWITCH_PORTS 2
18 #define NR_CXL_PORT_DECODERS 8
19 
20 static struct platform_device *cxl_acpi;
21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
22 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
23 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
24 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
25 #define NR_MEM_MULTI \
26 	(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
27 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
28 
29 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
30 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
31 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
32 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
33 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
34 
35 struct platform_device *cxl_mem[NR_MEM_MULTI];
36 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
37 
38 
39 static inline bool is_multi_bridge(struct device *dev)
40 {
41 	int i;
42 
43 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
44 		if (&cxl_host_bridge[i]->dev == dev)
45 			return true;
46 	return false;
47 }
48 
49 static inline bool is_single_bridge(struct device *dev)
50 {
51 	int i;
52 
53 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
54 		if (&cxl_hb_single[i]->dev == dev)
55 			return true;
56 	return false;
57 }
58 
59 static struct acpi_device acpi0017_mock;
60 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = {
61 	[0] = {
62 		.handle = &host_bridge[0],
63 	},
64 	[1] = {
65 		.handle = &host_bridge[1],
66 	},
67 	[2] = {
68 		.handle = &host_bridge[2],
69 	},
70 
71 };
72 
73 static bool is_mock_dev(struct device *dev)
74 {
75 	int i;
76 
77 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
78 		if (dev == &cxl_mem[i]->dev)
79 			return true;
80 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
81 		if (dev == &cxl_mem_single[i]->dev)
82 			return true;
83 	if (dev == &cxl_acpi->dev)
84 		return true;
85 	return false;
86 }
87 
88 static bool is_mock_adev(struct acpi_device *adev)
89 {
90 	int i;
91 
92 	if (adev == &acpi0017_mock)
93 		return true;
94 
95 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
96 		if (adev == &host_bridge[i])
97 			return true;
98 
99 	return false;
100 }
101 
102 static struct {
103 	struct acpi_table_cedt cedt;
104 	struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST];
105 	struct {
106 		struct acpi_cedt_cfmws cfmws;
107 		u32 target[1];
108 	} cfmws0;
109 	struct {
110 		struct acpi_cedt_cfmws cfmws;
111 		u32 target[2];
112 	} cfmws1;
113 	struct {
114 		struct acpi_cedt_cfmws cfmws;
115 		u32 target[1];
116 	} cfmws2;
117 	struct {
118 		struct acpi_cedt_cfmws cfmws;
119 		u32 target[2];
120 	} cfmws3;
121 	struct {
122 		struct acpi_cedt_cfmws cfmws;
123 		u32 target[1];
124 	} cfmws4;
125 } __packed mock_cedt = {
126 	.cedt = {
127 		.header = {
128 			.signature = "CEDT",
129 			.length = sizeof(mock_cedt),
130 			.revision = 1,
131 		},
132 	},
133 	.chbs[0] = {
134 		.header = {
135 			.type = ACPI_CEDT_TYPE_CHBS,
136 			.length = sizeof(mock_cedt.chbs[0]),
137 		},
138 		.uid = 0,
139 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
140 	},
141 	.chbs[1] = {
142 		.header = {
143 			.type = ACPI_CEDT_TYPE_CHBS,
144 			.length = sizeof(mock_cedt.chbs[0]),
145 		},
146 		.uid = 1,
147 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
148 	},
149 	.chbs[2] = {
150 		.header = {
151 			.type = ACPI_CEDT_TYPE_CHBS,
152 			.length = sizeof(mock_cedt.chbs[0]),
153 		},
154 		.uid = 2,
155 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
156 	},
157 	.cfmws0 = {
158 		.cfmws = {
159 			.header = {
160 				.type = ACPI_CEDT_TYPE_CFMWS,
161 				.length = sizeof(mock_cedt.cfmws0),
162 			},
163 			.interleave_ways = 0,
164 			.granularity = 4,
165 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
166 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
167 			.qtg_id = 0,
168 			.window_size = SZ_256M * 4UL,
169 		},
170 		.target = { 0 },
171 	},
172 	.cfmws1 = {
173 		.cfmws = {
174 			.header = {
175 				.type = ACPI_CEDT_TYPE_CFMWS,
176 				.length = sizeof(mock_cedt.cfmws1),
177 			},
178 			.interleave_ways = 1,
179 			.granularity = 4,
180 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
181 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
182 			.qtg_id = 1,
183 			.window_size = SZ_256M * 8UL,
184 		},
185 		.target = { 0, 1, },
186 	},
187 	.cfmws2 = {
188 		.cfmws = {
189 			.header = {
190 				.type = ACPI_CEDT_TYPE_CFMWS,
191 				.length = sizeof(mock_cedt.cfmws2),
192 			},
193 			.interleave_ways = 0,
194 			.granularity = 4,
195 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
196 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
197 			.qtg_id = 2,
198 			.window_size = SZ_256M * 4UL,
199 		},
200 		.target = { 0 },
201 	},
202 	.cfmws3 = {
203 		.cfmws = {
204 			.header = {
205 				.type = ACPI_CEDT_TYPE_CFMWS,
206 				.length = sizeof(mock_cedt.cfmws3),
207 			},
208 			.interleave_ways = 1,
209 			.granularity = 4,
210 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
211 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
212 			.qtg_id = 3,
213 			.window_size = SZ_256M * 8UL,
214 		},
215 		.target = { 0, 1, },
216 	},
217 	.cfmws4 = {
218 		.cfmws = {
219 			.header = {
220 				.type = ACPI_CEDT_TYPE_CFMWS,
221 				.length = sizeof(mock_cedt.cfmws4),
222 			},
223 			.interleave_ways = 0,
224 			.granularity = 4,
225 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
226 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
227 			.qtg_id = 4,
228 			.window_size = SZ_256M * 4UL,
229 		},
230 		.target = { 2 },
231 	},
232 };
233 
234 struct acpi_cedt_cfmws *mock_cfmws[] = {
235 	[0] = &mock_cedt.cfmws0.cfmws,
236 	[1] = &mock_cedt.cfmws1.cfmws,
237 	[2] = &mock_cedt.cfmws2.cfmws,
238 	[3] = &mock_cedt.cfmws3.cfmws,
239 	[4] = &mock_cedt.cfmws4.cfmws,
240 };
241 
242 struct cxl_mock_res {
243 	struct list_head list;
244 	struct range range;
245 };
246 
247 static LIST_HEAD(mock_res);
248 static DEFINE_MUTEX(mock_res_lock);
249 static struct gen_pool *cxl_mock_pool;
250 
251 static void depopulate_all_mock_resources(void)
252 {
253 	struct cxl_mock_res *res, *_res;
254 
255 	mutex_lock(&mock_res_lock);
256 	list_for_each_entry_safe(res, _res, &mock_res, list) {
257 		gen_pool_free(cxl_mock_pool, res->range.start,
258 			      range_len(&res->range));
259 		list_del(&res->list);
260 		kfree(res);
261 	}
262 	mutex_unlock(&mock_res_lock);
263 }
264 
265 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
266 {
267 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
268 	struct genpool_data_align data = {
269 		.align = SZ_256M,
270 	};
271 	unsigned long phys;
272 
273 	INIT_LIST_HEAD(&res->list);
274 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
275 				   gen_pool_first_fit_align, &data);
276 	if (!phys)
277 		return NULL;
278 
279 	res->range = (struct range) {
280 		.start = phys,
281 		.end = phys + size - 1,
282 	};
283 	mutex_lock(&mock_res_lock);
284 	list_add(&res->list, &mock_res);
285 	mutex_unlock(&mock_res_lock);
286 
287 	return res;
288 }
289 
290 static int populate_cedt(void)
291 {
292 	struct cxl_mock_res *res;
293 	int i;
294 
295 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
296 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
297 		resource_size_t size;
298 
299 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
300 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
301 		else
302 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
303 
304 		res = alloc_mock_res(size);
305 		if (!res)
306 			return -ENOMEM;
307 		chbs->base = res->range.start;
308 		chbs->length = size;
309 	}
310 
311 	for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
312 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
313 
314 		res = alloc_mock_res(window->window_size);
315 		if (!res)
316 			return -ENOMEM;
317 		window->base_hpa = res->range.start;
318 	}
319 
320 	return 0;
321 }
322 
323 /*
324  * WARNING, this hack assumes the format of 'struct
325  * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
326  * the first struct member is the device being probed by the cxl_acpi
327  * driver.
328  */
329 struct cxl_cedt_context {
330 	struct device *dev;
331 };
332 
333 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
334 				      acpi_tbl_entry_handler_arg handler_arg,
335 				      void *arg)
336 {
337 	struct cxl_cedt_context *ctx = arg;
338 	struct device *dev = ctx->dev;
339 	union acpi_subtable_headers *h;
340 	unsigned long end;
341 	int i;
342 
343 	if (dev != &cxl_acpi->dev)
344 		return acpi_table_parse_cedt(id, handler_arg, arg);
345 
346 	if (id == ACPI_CEDT_TYPE_CHBS)
347 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
348 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
349 			end = (unsigned long)&mock_cedt.chbs[i + 1];
350 			handler_arg(h, arg, end);
351 		}
352 
353 	if (id == ACPI_CEDT_TYPE_CFMWS)
354 		for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
355 			h = (union acpi_subtable_headers *) mock_cfmws[i];
356 			end = (unsigned long) h + mock_cfmws[i]->header.length;
357 			handler_arg(h, arg, end);
358 		}
359 
360 	return 0;
361 }
362 
363 static bool is_mock_bridge(struct device *dev)
364 {
365 	int i;
366 
367 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
368 		if (dev == &cxl_host_bridge[i]->dev)
369 			return true;
370 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
371 		if (dev == &cxl_hb_single[i]->dev)
372 			return true;
373 	return false;
374 }
375 
376 static bool is_mock_port(struct device *dev)
377 {
378 	int i;
379 
380 	if (is_mock_bridge(dev))
381 		return true;
382 
383 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
384 		if (dev == &cxl_root_port[i]->dev)
385 			return true;
386 
387 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
388 		if (dev == &cxl_switch_uport[i]->dev)
389 			return true;
390 
391 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
392 		if (dev == &cxl_switch_dport[i]->dev)
393 			return true;
394 
395 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
396 		if (dev == &cxl_root_single[i]->dev)
397 			return true;
398 
399 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
400 		if (dev == &cxl_swu_single[i]->dev)
401 			return true;
402 
403 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
404 		if (dev == &cxl_swd_single[i]->dev)
405 			return true;
406 
407 	if (is_cxl_memdev(dev))
408 		return is_mock_dev(dev->parent);
409 
410 	return false;
411 }
412 
413 static int host_bridge_index(struct acpi_device *adev)
414 {
415 	return adev - host_bridge;
416 }
417 
418 static struct acpi_device *find_host_bridge(acpi_handle handle)
419 {
420 	int i;
421 
422 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
423 		if (handle == host_bridge[i].handle)
424 			return &host_bridge[i];
425 	return NULL;
426 }
427 
428 static acpi_status
429 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
430 			   struct acpi_object_list *arguments,
431 			   unsigned long long *data)
432 {
433 	struct acpi_device *adev = find_host_bridge(handle);
434 
435 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
436 		return acpi_evaluate_integer(handle, pathname, arguments, data);
437 
438 	*data = host_bridge_index(adev);
439 	return AE_OK;
440 }
441 
442 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST];
443 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
444 	[0] = {
445 		.bus = &mock_pci_bus[0],
446 	},
447 	[1] = {
448 		.bus = &mock_pci_bus[1],
449 	},
450 	[2] = {
451 		.bus = &mock_pci_bus[2],
452 	},
453 
454 };
455 
456 static bool is_mock_bus(struct pci_bus *bus)
457 {
458 	int i;
459 
460 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
461 		if (bus == &mock_pci_bus[i])
462 			return true;
463 	return false;
464 }
465 
466 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
467 {
468 	struct acpi_device *adev = find_host_bridge(handle);
469 
470 	if (!adev)
471 		return acpi_pci_find_root(handle);
472 	return &mock_pci_root[host_bridge_index(adev)];
473 }
474 
475 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
476 {
477 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
478 
479 	if (!cxlhdm)
480 		return ERR_PTR(-ENOMEM);
481 
482 	cxlhdm->port = port;
483 	return cxlhdm;
484 }
485 
486 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
487 {
488 	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
489 	return -EOPNOTSUPP;
490 }
491 
492 
493 struct target_map_ctx {
494 	int *target_map;
495 	int index;
496 	int target_count;
497 };
498 
499 static int map_targets(struct device *dev, void *data)
500 {
501 	struct platform_device *pdev = to_platform_device(dev);
502 	struct target_map_ctx *ctx = data;
503 
504 	ctx->target_map[ctx->index++] = pdev->id;
505 
506 	if (ctx->index > ctx->target_count) {
507 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
508 		return -ENXIO;
509 	}
510 
511 	return 0;
512 }
513 
514 static int mock_decoder_commit(struct cxl_decoder *cxld)
515 {
516 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
517 	int id = cxld->id;
518 
519 	if (cxld->flags & CXL_DECODER_F_ENABLE)
520 		return 0;
521 
522 	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
523 	if (port->commit_end + 1 != id) {
524 		dev_dbg(&port->dev,
525 			"%s: out of order commit, expected decoder%d.%d\n",
526 			dev_name(&cxld->dev), port->id, port->commit_end + 1);
527 		return -EBUSY;
528 	}
529 
530 	port->commit_end++;
531 	cxld->flags |= CXL_DECODER_F_ENABLE;
532 
533 	return 0;
534 }
535 
536 static int mock_decoder_reset(struct cxl_decoder *cxld)
537 {
538 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
539 	int id = cxld->id;
540 
541 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
542 		return 0;
543 
544 	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
545 	if (port->commit_end != id) {
546 		dev_dbg(&port->dev,
547 			"%s: out of order reset, expected decoder%d.%d\n",
548 			dev_name(&cxld->dev), port->id, port->commit_end);
549 		return -EBUSY;
550 	}
551 
552 	port->commit_end--;
553 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
554 
555 	return 0;
556 }
557 
558 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
559 {
560 	struct cxl_port *port = cxlhdm->port;
561 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
562 	int target_count, i;
563 
564 	if (is_cxl_endpoint(port))
565 		target_count = 0;
566 	else if (is_cxl_root(parent_port))
567 		target_count = NR_CXL_ROOT_PORTS;
568 	else
569 		target_count = NR_CXL_SWITCH_PORTS;
570 
571 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
572 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
573 		struct target_map_ctx ctx = {
574 			.target_map = target_map,
575 			.target_count = target_count,
576 		};
577 		struct cxl_decoder *cxld;
578 		int rc;
579 
580 		if (target_count) {
581 			struct cxl_switch_decoder *cxlsd;
582 
583 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
584 			if (IS_ERR(cxlsd)) {
585 				dev_warn(&port->dev,
586 					 "Failed to allocate the decoder\n");
587 				return PTR_ERR(cxlsd);
588 			}
589 			cxld = &cxlsd->cxld;
590 		} else {
591 			struct cxl_endpoint_decoder *cxled;
592 
593 			cxled = cxl_endpoint_decoder_alloc(port);
594 
595 			if (IS_ERR(cxled)) {
596 				dev_warn(&port->dev,
597 					 "Failed to allocate the decoder\n");
598 				return PTR_ERR(cxled);
599 			}
600 			cxld = &cxled->cxld;
601 		}
602 
603 		cxld->hpa_range = (struct range) {
604 			.start = 0,
605 			.end = -1,
606 		};
607 
608 		cxld->interleave_ways = min_not_zero(target_count, 1);
609 		cxld->interleave_granularity = SZ_4K;
610 		cxld->target_type = CXL_DECODER_EXPANDER;
611 		cxld->commit = mock_decoder_commit;
612 		cxld->reset = mock_decoder_reset;
613 
614 		if (target_count) {
615 			rc = device_for_each_child(port->uport, &ctx,
616 						   map_targets);
617 			if (rc) {
618 				put_device(&cxld->dev);
619 				return rc;
620 			}
621 		}
622 
623 		rc = cxl_decoder_add_locked(cxld, target_map);
624 		if (rc) {
625 			put_device(&cxld->dev);
626 			dev_err(&port->dev, "Failed to add decoder\n");
627 			return rc;
628 		}
629 
630 		rc = cxl_decoder_autoremove(&port->dev, cxld);
631 		if (rc)
632 			return rc;
633 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
634 	}
635 
636 	return 0;
637 }
638 
639 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
640 {
641 	struct platform_device **array;
642 	int i, array_size;
643 
644 	if (port->depth == 1) {
645 		if (is_multi_bridge(port->uport)) {
646 			array_size = ARRAY_SIZE(cxl_root_port);
647 			array = cxl_root_port;
648 		} else if (is_single_bridge(port->uport)) {
649 			array_size = ARRAY_SIZE(cxl_root_single);
650 			array = cxl_root_single;
651 		} else {
652 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
653 				dev_name(port->uport));
654 			return -ENXIO;
655 		}
656 	} else if (port->depth == 2) {
657 		struct cxl_port *parent = to_cxl_port(port->dev.parent);
658 
659 		if (is_multi_bridge(parent->uport)) {
660 			array_size = ARRAY_SIZE(cxl_switch_dport);
661 			array = cxl_switch_dport;
662 		} else if (is_single_bridge(parent->uport)) {
663 			array_size = ARRAY_SIZE(cxl_swd_single);
664 			array = cxl_swd_single;
665 		} else {
666 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
667 				dev_name(port->uport));
668 			return -ENXIO;
669 		}
670 	} else {
671 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
672 			      port->depth);
673 		return -ENXIO;
674 	}
675 
676 	for (i = 0; i < array_size; i++) {
677 		struct platform_device *pdev = array[i];
678 		struct cxl_dport *dport;
679 
680 		if (pdev->dev.parent != port->uport) {
681 			dev_dbg(&port->dev, "%s: mismatch parent %s\n",
682 				dev_name(port->uport),
683 				dev_name(pdev->dev.parent));
684 			continue;
685 		}
686 
687 		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
688 					   CXL_RESOURCE_NONE);
689 
690 		if (IS_ERR(dport))
691 			return PTR_ERR(dport);
692 	}
693 
694 	return 0;
695 }
696 
697 static struct cxl_mock_ops cxl_mock_ops = {
698 	.is_mock_adev = is_mock_adev,
699 	.is_mock_bridge = is_mock_bridge,
700 	.is_mock_bus = is_mock_bus,
701 	.is_mock_port = is_mock_port,
702 	.is_mock_dev = is_mock_dev,
703 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
704 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
705 	.acpi_pci_find_root = mock_acpi_pci_find_root,
706 	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
707 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
708 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
709 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
710 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
711 };
712 
713 static void mock_companion(struct acpi_device *adev, struct device *dev)
714 {
715 	device_initialize(&adev->dev);
716 	fwnode_init(&adev->fwnode, NULL);
717 	dev->fwnode = &adev->fwnode;
718 	adev->fwnode.dev = dev;
719 }
720 
721 #ifndef SZ_64G
722 #define SZ_64G (SZ_32G * 2)
723 #endif
724 
725 #ifndef SZ_512G
726 #define SZ_512G (SZ_64G * 8)
727 #endif
728 
729 static __init int cxl_single_init(void)
730 {
731 	int i, rc;
732 
733 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
734 		struct acpi_device *adev =
735 			&host_bridge[NR_CXL_HOST_BRIDGES + i];
736 		struct platform_device *pdev;
737 
738 		pdev = platform_device_alloc("cxl_host_bridge",
739 					     NR_CXL_HOST_BRIDGES + i);
740 		if (!pdev)
741 			goto err_bridge;
742 
743 		mock_companion(adev, &pdev->dev);
744 		rc = platform_device_add(pdev);
745 		if (rc) {
746 			platform_device_put(pdev);
747 			goto err_bridge;
748 		}
749 
750 		cxl_hb_single[i] = pdev;
751 		mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
752 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
753 				       "physical_node");
754 		if (rc)
755 			goto err_bridge;
756 	}
757 
758 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
759 		struct platform_device *bridge =
760 			cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
761 		struct platform_device *pdev;
762 
763 		pdev = platform_device_alloc("cxl_root_port",
764 					     NR_MULTI_ROOT + i);
765 		if (!pdev)
766 			goto err_port;
767 		pdev->dev.parent = &bridge->dev;
768 
769 		rc = platform_device_add(pdev);
770 		if (rc) {
771 			platform_device_put(pdev);
772 			goto err_port;
773 		}
774 		cxl_root_single[i] = pdev;
775 	}
776 
777 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
778 		struct platform_device *root_port = cxl_root_single[i];
779 		struct platform_device *pdev;
780 
781 		pdev = platform_device_alloc("cxl_switch_uport",
782 					     NR_MULTI_ROOT + i);
783 		if (!pdev)
784 			goto err_uport;
785 		pdev->dev.parent = &root_port->dev;
786 
787 		rc = platform_device_add(pdev);
788 		if (rc) {
789 			platform_device_put(pdev);
790 			goto err_uport;
791 		}
792 		cxl_swu_single[i] = pdev;
793 	}
794 
795 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
796 		struct platform_device *uport =
797 			cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
798 		struct platform_device *pdev;
799 
800 		pdev = platform_device_alloc("cxl_switch_dport",
801 					     i + NR_MEM_MULTI);
802 		if (!pdev)
803 			goto err_dport;
804 		pdev->dev.parent = &uport->dev;
805 
806 		rc = platform_device_add(pdev);
807 		if (rc) {
808 			platform_device_put(pdev);
809 			goto err_dport;
810 		}
811 		cxl_swd_single[i] = pdev;
812 	}
813 
814 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
815 		struct platform_device *dport = cxl_swd_single[i];
816 		struct platform_device *pdev;
817 
818 		pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
819 		if (!pdev)
820 			goto err_mem;
821 		pdev->dev.parent = &dport->dev;
822 		set_dev_node(&pdev->dev, i % 2);
823 
824 		rc = platform_device_add(pdev);
825 		if (rc) {
826 			platform_device_put(pdev);
827 			goto err_mem;
828 		}
829 		cxl_mem_single[i] = pdev;
830 	}
831 
832 	return 0;
833 
834 err_mem:
835 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
836 		platform_device_unregister(cxl_mem_single[i]);
837 err_dport:
838 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
839 		platform_device_unregister(cxl_swd_single[i]);
840 err_uport:
841 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
842 		platform_device_unregister(cxl_swu_single[i]);
843 err_port:
844 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
845 		platform_device_unregister(cxl_root_single[i]);
846 err_bridge:
847 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
848 		struct platform_device *pdev = cxl_hb_single[i];
849 
850 		if (!pdev)
851 			continue;
852 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
853 		platform_device_unregister(cxl_hb_single[i]);
854 	}
855 
856 	return rc;
857 }
858 
859 static void cxl_single_exit(void)
860 {
861 	int i;
862 
863 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
864 		platform_device_unregister(cxl_mem_single[i]);
865 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
866 		platform_device_unregister(cxl_swd_single[i]);
867 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
868 		platform_device_unregister(cxl_swu_single[i]);
869 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
870 		platform_device_unregister(cxl_root_single[i]);
871 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
872 		struct platform_device *pdev = cxl_hb_single[i];
873 
874 		if (!pdev)
875 			continue;
876 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
877 		platform_device_unregister(cxl_hb_single[i]);
878 	}
879 }
880 
881 static __init int cxl_test_init(void)
882 {
883 	int rc, i;
884 
885 	register_cxl_mock_ops(&cxl_mock_ops);
886 
887 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
888 	if (!cxl_mock_pool) {
889 		rc = -ENOMEM;
890 		goto err_gen_pool_create;
891 	}
892 
893 	rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
894 			  SZ_64G, NUMA_NO_NODE);
895 	if (rc)
896 		goto err_gen_pool_add;
897 
898 	rc = populate_cedt();
899 	if (rc)
900 		goto err_populate;
901 
902 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
903 		struct acpi_device *adev = &host_bridge[i];
904 		struct platform_device *pdev;
905 
906 		pdev = platform_device_alloc("cxl_host_bridge", i);
907 		if (!pdev)
908 			goto err_bridge;
909 
910 		mock_companion(adev, &pdev->dev);
911 		rc = platform_device_add(pdev);
912 		if (rc) {
913 			platform_device_put(pdev);
914 			goto err_bridge;
915 		}
916 
917 		cxl_host_bridge[i] = pdev;
918 		mock_pci_bus[i].bridge = &pdev->dev;
919 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
920 				       "physical_node");
921 		if (rc)
922 			goto err_bridge;
923 	}
924 
925 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
926 		struct platform_device *bridge =
927 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
928 		struct platform_device *pdev;
929 
930 		pdev = platform_device_alloc("cxl_root_port", i);
931 		if (!pdev)
932 			goto err_port;
933 		pdev->dev.parent = &bridge->dev;
934 
935 		rc = platform_device_add(pdev);
936 		if (rc) {
937 			platform_device_put(pdev);
938 			goto err_port;
939 		}
940 		cxl_root_port[i] = pdev;
941 	}
942 
943 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
944 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
945 		struct platform_device *root_port = cxl_root_port[i];
946 		struct platform_device *pdev;
947 
948 		pdev = platform_device_alloc("cxl_switch_uport", i);
949 		if (!pdev)
950 			goto err_uport;
951 		pdev->dev.parent = &root_port->dev;
952 
953 		rc = platform_device_add(pdev);
954 		if (rc) {
955 			platform_device_put(pdev);
956 			goto err_uport;
957 		}
958 		cxl_switch_uport[i] = pdev;
959 	}
960 
961 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
962 		struct platform_device *uport =
963 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
964 		struct platform_device *pdev;
965 
966 		pdev = platform_device_alloc("cxl_switch_dport", i);
967 		if (!pdev)
968 			goto err_dport;
969 		pdev->dev.parent = &uport->dev;
970 
971 		rc = platform_device_add(pdev);
972 		if (rc) {
973 			platform_device_put(pdev);
974 			goto err_dport;
975 		}
976 		cxl_switch_dport[i] = pdev;
977 	}
978 
979 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
980 		struct platform_device *dport = cxl_switch_dport[i];
981 		struct platform_device *pdev;
982 
983 		pdev = platform_device_alloc("cxl_mem", i);
984 		if (!pdev)
985 			goto err_mem;
986 		pdev->dev.parent = &dport->dev;
987 		set_dev_node(&pdev->dev, i % 2);
988 
989 		rc = platform_device_add(pdev);
990 		if (rc) {
991 			platform_device_put(pdev);
992 			goto err_mem;
993 		}
994 		cxl_mem[i] = pdev;
995 	}
996 
997 	rc = cxl_single_init();
998 	if (rc)
999 		goto err_mem;
1000 
1001 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1002 	if (!cxl_acpi)
1003 		goto err_single;
1004 
1005 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1006 	acpi0017_mock.dev.bus = &platform_bus_type;
1007 
1008 	rc = platform_device_add(cxl_acpi);
1009 	if (rc)
1010 		goto err_add;
1011 
1012 	return 0;
1013 
1014 err_add:
1015 	platform_device_put(cxl_acpi);
1016 err_single:
1017 	cxl_single_exit();
1018 err_mem:
1019 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1020 		platform_device_unregister(cxl_mem[i]);
1021 err_dport:
1022 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1023 		platform_device_unregister(cxl_switch_dport[i]);
1024 err_uport:
1025 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1026 		platform_device_unregister(cxl_switch_uport[i]);
1027 err_port:
1028 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1029 		platform_device_unregister(cxl_root_port[i]);
1030 err_bridge:
1031 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1032 		struct platform_device *pdev = cxl_host_bridge[i];
1033 
1034 		if (!pdev)
1035 			continue;
1036 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1037 		platform_device_unregister(cxl_host_bridge[i]);
1038 	}
1039 err_populate:
1040 	depopulate_all_mock_resources();
1041 err_gen_pool_add:
1042 	gen_pool_destroy(cxl_mock_pool);
1043 err_gen_pool_create:
1044 	unregister_cxl_mock_ops(&cxl_mock_ops);
1045 	return rc;
1046 }
1047 
1048 static __exit void cxl_test_exit(void)
1049 {
1050 	int i;
1051 
1052 	platform_device_unregister(cxl_acpi);
1053 	cxl_single_exit();
1054 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1055 		platform_device_unregister(cxl_mem[i]);
1056 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1057 		platform_device_unregister(cxl_switch_dport[i]);
1058 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1059 		platform_device_unregister(cxl_switch_uport[i]);
1060 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1061 		platform_device_unregister(cxl_root_port[i]);
1062 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1063 		struct platform_device *pdev = cxl_host_bridge[i];
1064 
1065 		if (!pdev)
1066 			continue;
1067 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1068 		platform_device_unregister(cxl_host_bridge[i]);
1069 	}
1070 	depopulate_all_mock_resources();
1071 	gen_pool_destroy(cxl_mock_pool);
1072 	unregister_cxl_mock_ops(&cxl_mock_ops);
1073 }
1074 
1075 module_init(cxl_test_init);
1076 module_exit(cxl_test_exit);
1077 MODULE_LICENSE("GPL v2");
1078 MODULE_IMPORT_NS(ACPI);
1079 MODULE_IMPORT_NS(CXL);
1080