xref: /openbmc/linux/tools/testing/cxl/test/cxl.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 #include "mock.h"
13 
14 #define NR_CXL_HOST_BRIDGES 2
15 #define NR_CXL_SINGLE_HOST 1
16 #define NR_CXL_ROOT_PORTS 2
17 #define NR_CXL_SWITCH_PORTS 2
18 #define NR_CXL_PORT_DECODERS 8
19 
20 static struct platform_device *cxl_acpi;
21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
22 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
23 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
24 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
25 #define NR_MEM_MULTI \
26 	(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
27 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
28 
29 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
30 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
31 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
32 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
33 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
34 
35 struct platform_device *cxl_mem[NR_MEM_MULTI];
36 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
37 
38 
39 static inline bool is_multi_bridge(struct device *dev)
40 {
41 	int i;
42 
43 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
44 		if (&cxl_host_bridge[i]->dev == dev)
45 			return true;
46 	return false;
47 }
48 
49 static inline bool is_single_bridge(struct device *dev)
50 {
51 	int i;
52 
53 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
54 		if (&cxl_hb_single[i]->dev == dev)
55 			return true;
56 	return false;
57 }
58 
59 static struct acpi_device acpi0017_mock;
60 static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = {
61 	[0] = {
62 		.handle = &host_bridge[0],
63 	},
64 	[1] = {
65 		.handle = &host_bridge[1],
66 	},
67 	[2] = {
68 		.handle = &host_bridge[2],
69 	},
70 
71 };
72 
73 static bool is_mock_dev(struct device *dev)
74 {
75 	int i;
76 
77 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
78 		if (dev == &cxl_mem[i]->dev)
79 			return true;
80 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
81 		if (dev == &cxl_mem_single[i]->dev)
82 			return true;
83 	if (dev == &cxl_acpi->dev)
84 		return true;
85 	return false;
86 }
87 
88 static bool is_mock_adev(struct acpi_device *adev)
89 {
90 	int i;
91 
92 	if (adev == &acpi0017_mock)
93 		return true;
94 
95 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
96 		if (adev == &host_bridge[i])
97 			return true;
98 
99 	return false;
100 }
101 
102 static struct {
103 	struct acpi_table_cedt cedt;
104 	struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST];
105 	struct {
106 		struct acpi_cedt_cfmws cfmws;
107 		u32 target[1];
108 	} cfmws0;
109 	struct {
110 		struct acpi_cedt_cfmws cfmws;
111 		u32 target[2];
112 	} cfmws1;
113 	struct {
114 		struct acpi_cedt_cfmws cfmws;
115 		u32 target[1];
116 	} cfmws2;
117 	struct {
118 		struct acpi_cedt_cfmws cfmws;
119 		u32 target[2];
120 	} cfmws3;
121 	struct {
122 		struct acpi_cedt_cfmws cfmws;
123 		u32 target[1];
124 	} cfmws4;
125 } __packed mock_cedt = {
126 	.cedt = {
127 		.header = {
128 			.signature = "CEDT",
129 			.length = sizeof(mock_cedt),
130 			.revision = 1,
131 		},
132 	},
133 	.chbs[0] = {
134 		.header = {
135 			.type = ACPI_CEDT_TYPE_CHBS,
136 			.length = sizeof(mock_cedt.chbs[0]),
137 		},
138 		.uid = 0,
139 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
140 	},
141 	.chbs[1] = {
142 		.header = {
143 			.type = ACPI_CEDT_TYPE_CHBS,
144 			.length = sizeof(mock_cedt.chbs[0]),
145 		},
146 		.uid = 1,
147 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
148 	},
149 	.chbs[2] = {
150 		.header = {
151 			.type = ACPI_CEDT_TYPE_CHBS,
152 			.length = sizeof(mock_cedt.chbs[0]),
153 		},
154 		.uid = 2,
155 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
156 	},
157 	.cfmws0 = {
158 		.cfmws = {
159 			.header = {
160 				.type = ACPI_CEDT_TYPE_CFMWS,
161 				.length = sizeof(mock_cedt.cfmws0),
162 			},
163 			.interleave_ways = 0,
164 			.granularity = 4,
165 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
166 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
167 			.qtg_id = 0,
168 			.window_size = SZ_256M * 4UL,
169 		},
170 		.target = { 0 },
171 	},
172 	.cfmws1 = {
173 		.cfmws = {
174 			.header = {
175 				.type = ACPI_CEDT_TYPE_CFMWS,
176 				.length = sizeof(mock_cedt.cfmws1),
177 			},
178 			.interleave_ways = 1,
179 			.granularity = 4,
180 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
181 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
182 			.qtg_id = 1,
183 			.window_size = SZ_256M * 8UL,
184 		},
185 		.target = { 0, 1, },
186 	},
187 	.cfmws2 = {
188 		.cfmws = {
189 			.header = {
190 				.type = ACPI_CEDT_TYPE_CFMWS,
191 				.length = sizeof(mock_cedt.cfmws2),
192 			},
193 			.interleave_ways = 0,
194 			.granularity = 4,
195 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
196 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
197 			.qtg_id = 2,
198 			.window_size = SZ_256M * 4UL,
199 		},
200 		.target = { 0 },
201 	},
202 	.cfmws3 = {
203 		.cfmws = {
204 			.header = {
205 				.type = ACPI_CEDT_TYPE_CFMWS,
206 				.length = sizeof(mock_cedt.cfmws3),
207 			},
208 			.interleave_ways = 1,
209 			.granularity = 4,
210 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
211 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
212 			.qtg_id = 3,
213 			.window_size = SZ_256M * 8UL,
214 		},
215 		.target = { 0, 1, },
216 	},
217 	.cfmws4 = {
218 		.cfmws = {
219 			.header = {
220 				.type = ACPI_CEDT_TYPE_CFMWS,
221 				.length = sizeof(mock_cedt.cfmws4),
222 			},
223 			.interleave_ways = 0,
224 			.granularity = 4,
225 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
226 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
227 			.qtg_id = 4,
228 			.window_size = SZ_256M * 4UL,
229 		},
230 		.target = { 2 },
231 	},
232 };
233 
234 struct acpi_cedt_cfmws *mock_cfmws[] = {
235 	[0] = &mock_cedt.cfmws0.cfmws,
236 	[1] = &mock_cedt.cfmws1.cfmws,
237 	[2] = &mock_cedt.cfmws2.cfmws,
238 	[3] = &mock_cedt.cfmws3.cfmws,
239 	[4] = &mock_cedt.cfmws4.cfmws,
240 };
241 
242 struct cxl_mock_res {
243 	struct list_head list;
244 	struct range range;
245 };
246 
247 static LIST_HEAD(mock_res);
248 static DEFINE_MUTEX(mock_res_lock);
249 static struct gen_pool *cxl_mock_pool;
250 
251 static void depopulate_all_mock_resources(void)
252 {
253 	struct cxl_mock_res *res, *_res;
254 
255 	mutex_lock(&mock_res_lock);
256 	list_for_each_entry_safe(res, _res, &mock_res, list) {
257 		gen_pool_free(cxl_mock_pool, res->range.start,
258 			      range_len(&res->range));
259 		list_del(&res->list);
260 		kfree(res);
261 	}
262 	mutex_unlock(&mock_res_lock);
263 }
264 
265 static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
266 {
267 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
268 	struct genpool_data_align data = {
269 		.align = SZ_256M,
270 	};
271 	unsigned long phys;
272 
273 	INIT_LIST_HEAD(&res->list);
274 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
275 				   gen_pool_first_fit_align, &data);
276 	if (!phys)
277 		return NULL;
278 
279 	res->range = (struct range) {
280 		.start = phys,
281 		.end = phys + size - 1,
282 	};
283 	mutex_lock(&mock_res_lock);
284 	list_add(&res->list, &mock_res);
285 	mutex_unlock(&mock_res_lock);
286 
287 	return res;
288 }
289 
290 static int populate_cedt(void)
291 {
292 	struct cxl_mock_res *res;
293 	int i;
294 
295 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
296 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
297 		resource_size_t size;
298 
299 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
300 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
301 		else
302 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
303 
304 		res = alloc_mock_res(size);
305 		if (!res)
306 			return -ENOMEM;
307 		chbs->base = res->range.start;
308 		chbs->length = size;
309 	}
310 
311 	for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
312 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
313 
314 		res = alloc_mock_res(window->window_size);
315 		if (!res)
316 			return -ENOMEM;
317 		window->base_hpa = res->range.start;
318 	}
319 
320 	return 0;
321 }
322 
323 /*
324  * WARNING, this hack assumes the format of 'struct
325  * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
326  * the first struct member is the device being probed by the cxl_acpi
327  * driver.
328  */
329 struct cxl_cedt_context {
330 	struct device *dev;
331 };
332 
333 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
334 				      acpi_tbl_entry_handler_arg handler_arg,
335 				      void *arg)
336 {
337 	struct cxl_cedt_context *ctx = arg;
338 	struct device *dev = ctx->dev;
339 	union acpi_subtable_headers *h;
340 	unsigned long end;
341 	int i;
342 
343 	if (dev != &cxl_acpi->dev)
344 		return acpi_table_parse_cedt(id, handler_arg, arg);
345 
346 	if (id == ACPI_CEDT_TYPE_CHBS)
347 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
348 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
349 			end = (unsigned long)&mock_cedt.chbs[i + 1];
350 			handler_arg(h, arg, end);
351 		}
352 
353 	if (id == ACPI_CEDT_TYPE_CFMWS)
354 		for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
355 			h = (union acpi_subtable_headers *) mock_cfmws[i];
356 			end = (unsigned long) h + mock_cfmws[i]->header.length;
357 			handler_arg(h, arg, end);
358 		}
359 
360 	return 0;
361 }
362 
363 static bool is_mock_bridge(struct device *dev)
364 {
365 	int i;
366 
367 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
368 		if (dev == &cxl_host_bridge[i]->dev)
369 			return true;
370 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
371 		if (dev == &cxl_hb_single[i]->dev)
372 			return true;
373 	return false;
374 }
375 
376 static bool is_mock_port(struct device *dev)
377 {
378 	int i;
379 
380 	if (is_mock_bridge(dev))
381 		return true;
382 
383 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
384 		if (dev == &cxl_root_port[i]->dev)
385 			return true;
386 
387 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
388 		if (dev == &cxl_switch_uport[i]->dev)
389 			return true;
390 
391 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
392 		if (dev == &cxl_switch_dport[i]->dev)
393 			return true;
394 
395 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
396 		if (dev == &cxl_root_single[i]->dev)
397 			return true;
398 
399 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
400 		if (dev == &cxl_swu_single[i]->dev)
401 			return true;
402 
403 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
404 		if (dev == &cxl_swd_single[i]->dev)
405 			return true;
406 
407 	if (is_cxl_memdev(dev))
408 		return is_mock_dev(dev->parent);
409 
410 	return false;
411 }
412 
413 static int host_bridge_index(struct acpi_device *adev)
414 {
415 	return adev - host_bridge;
416 }
417 
418 static struct acpi_device *find_host_bridge(acpi_handle handle)
419 {
420 	int i;
421 
422 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
423 		if (handle == host_bridge[i].handle)
424 			return &host_bridge[i];
425 	return NULL;
426 }
427 
428 static acpi_status
429 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
430 			   struct acpi_object_list *arguments,
431 			   unsigned long long *data)
432 {
433 	struct acpi_device *adev = find_host_bridge(handle);
434 
435 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
436 		return acpi_evaluate_integer(handle, pathname, arguments, data);
437 
438 	*data = host_bridge_index(adev);
439 	return AE_OK;
440 }
441 
442 static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES];
443 static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = {
444 	[0] = {
445 		.bus = &mock_pci_bus[0],
446 	},
447 	[1] = {
448 		.bus = &mock_pci_bus[1],
449 	},
450 };
451 
452 static bool is_mock_bus(struct pci_bus *bus)
453 {
454 	int i;
455 
456 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
457 		if (bus == &mock_pci_bus[i])
458 			return true;
459 	return false;
460 }
461 
462 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
463 {
464 	struct acpi_device *adev = find_host_bridge(handle);
465 
466 	if (!adev)
467 		return acpi_pci_find_root(handle);
468 	return &mock_pci_root[host_bridge_index(adev)];
469 }
470 
471 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port)
472 {
473 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
474 
475 	if (!cxlhdm)
476 		return ERR_PTR(-ENOMEM);
477 
478 	cxlhdm->port = port;
479 	return cxlhdm;
480 }
481 
482 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
483 {
484 	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
485 	return -EOPNOTSUPP;
486 }
487 
488 
489 struct target_map_ctx {
490 	int *target_map;
491 	int index;
492 	int target_count;
493 };
494 
495 static int map_targets(struct device *dev, void *data)
496 {
497 	struct platform_device *pdev = to_platform_device(dev);
498 	struct target_map_ctx *ctx = data;
499 
500 	ctx->target_map[ctx->index++] = pdev->id;
501 
502 	if (ctx->index > ctx->target_count) {
503 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
504 		return -ENXIO;
505 	}
506 
507 	return 0;
508 }
509 
510 static int mock_decoder_commit(struct cxl_decoder *cxld)
511 {
512 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
513 	int id = cxld->id;
514 
515 	if (cxld->flags & CXL_DECODER_F_ENABLE)
516 		return 0;
517 
518 	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
519 	if (port->commit_end + 1 != id) {
520 		dev_dbg(&port->dev,
521 			"%s: out of order commit, expected decoder%d.%d\n",
522 			dev_name(&cxld->dev), port->id, port->commit_end + 1);
523 		return -EBUSY;
524 	}
525 
526 	port->commit_end++;
527 	cxld->flags |= CXL_DECODER_F_ENABLE;
528 
529 	return 0;
530 }
531 
532 static int mock_decoder_reset(struct cxl_decoder *cxld)
533 {
534 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
535 	int id = cxld->id;
536 
537 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
538 		return 0;
539 
540 	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
541 	if (port->commit_end != id) {
542 		dev_dbg(&port->dev,
543 			"%s: out of order reset, expected decoder%d.%d\n",
544 			dev_name(&cxld->dev), port->id, port->commit_end);
545 		return -EBUSY;
546 	}
547 
548 	port->commit_end--;
549 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
550 
551 	return 0;
552 }
553 
554 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
555 {
556 	struct cxl_port *port = cxlhdm->port;
557 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
558 	int target_count, i;
559 
560 	if (is_cxl_endpoint(port))
561 		target_count = 0;
562 	else if (is_cxl_root(parent_port))
563 		target_count = NR_CXL_ROOT_PORTS;
564 	else
565 		target_count = NR_CXL_SWITCH_PORTS;
566 
567 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
568 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
569 		struct target_map_ctx ctx = {
570 			.target_map = target_map,
571 			.target_count = target_count,
572 		};
573 		struct cxl_decoder *cxld;
574 		int rc;
575 
576 		if (target_count) {
577 			struct cxl_switch_decoder *cxlsd;
578 
579 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
580 			if (IS_ERR(cxlsd)) {
581 				dev_warn(&port->dev,
582 					 "Failed to allocate the decoder\n");
583 				return PTR_ERR(cxlsd);
584 			}
585 			cxld = &cxlsd->cxld;
586 		} else {
587 			struct cxl_endpoint_decoder *cxled;
588 
589 			cxled = cxl_endpoint_decoder_alloc(port);
590 
591 			if (IS_ERR(cxled)) {
592 				dev_warn(&port->dev,
593 					 "Failed to allocate the decoder\n");
594 				return PTR_ERR(cxled);
595 			}
596 			cxld = &cxled->cxld;
597 		}
598 
599 		cxld->hpa_range = (struct range) {
600 			.start = 0,
601 			.end = -1,
602 		};
603 
604 		cxld->interleave_ways = min_not_zero(target_count, 1);
605 		cxld->interleave_granularity = SZ_4K;
606 		cxld->target_type = CXL_DECODER_EXPANDER;
607 		cxld->commit = mock_decoder_commit;
608 		cxld->reset = mock_decoder_reset;
609 
610 		if (target_count) {
611 			rc = device_for_each_child(port->uport, &ctx,
612 						   map_targets);
613 			if (rc) {
614 				put_device(&cxld->dev);
615 				return rc;
616 			}
617 		}
618 
619 		rc = cxl_decoder_add_locked(cxld, target_map);
620 		if (rc) {
621 			put_device(&cxld->dev);
622 			dev_err(&port->dev, "Failed to add decoder\n");
623 			return rc;
624 		}
625 
626 		rc = cxl_decoder_autoremove(&port->dev, cxld);
627 		if (rc)
628 			return rc;
629 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
630 	}
631 
632 	return 0;
633 }
634 
635 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
636 {
637 	struct device *dev = &port->dev;
638 	struct platform_device **array;
639 	int i, array_size;
640 
641 	if (port->depth == 1) {
642 		if (is_multi_bridge(port->uport)) {
643 			array_size = ARRAY_SIZE(cxl_root_port);
644 			array = cxl_root_port;
645 		} else if (is_single_bridge(port->uport)) {
646 			array_size = ARRAY_SIZE(cxl_root_single);
647 			array = cxl_root_single;
648 		} else {
649 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
650 				dev_name(port->uport));
651 			return -ENXIO;
652 		}
653 	} else if (port->depth == 2) {
654 		struct cxl_port *parent = to_cxl_port(port->dev.parent);
655 
656 		if (is_multi_bridge(parent->uport)) {
657 			array_size = ARRAY_SIZE(cxl_switch_dport);
658 			array = cxl_switch_dport;
659 		} else if (is_single_bridge(parent->uport)) {
660 			array_size = ARRAY_SIZE(cxl_swd_single);
661 			array = cxl_swd_single;
662 		} else {
663 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
664 				dev_name(port->uport));
665 			return -ENXIO;
666 		}
667 	} else {
668 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
669 			      port->depth);
670 		return -ENXIO;
671 	}
672 
673 	for (i = 0; i < array_size; i++) {
674 		struct platform_device *pdev = array[i];
675 		struct cxl_dport *dport;
676 
677 		if (pdev->dev.parent != port->uport) {
678 			dev_dbg(&port->dev, "%s: mismatch parent %s\n",
679 				dev_name(port->uport),
680 				dev_name(pdev->dev.parent));
681 			continue;
682 		}
683 
684 		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
685 					   CXL_RESOURCE_NONE);
686 
687 		if (IS_ERR(dport)) {
688 			dev_err(dev, "failed to add dport: %s (%ld)\n",
689 				dev_name(&pdev->dev), PTR_ERR(dport));
690 			return PTR_ERR(dport);
691 		}
692 
693 		dev_dbg(dev, "add dport%d: %s\n", pdev->id,
694 			dev_name(&pdev->dev));
695 	}
696 
697 	return 0;
698 }
699 
700 static struct cxl_mock_ops cxl_mock_ops = {
701 	.is_mock_adev = is_mock_adev,
702 	.is_mock_bridge = is_mock_bridge,
703 	.is_mock_bus = is_mock_bus,
704 	.is_mock_port = is_mock_port,
705 	.is_mock_dev = is_mock_dev,
706 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
707 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
708 	.acpi_pci_find_root = mock_acpi_pci_find_root,
709 	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
710 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
711 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
712 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
713 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
714 };
715 
716 static void mock_companion(struct acpi_device *adev, struct device *dev)
717 {
718 	device_initialize(&adev->dev);
719 	fwnode_init(&adev->fwnode, NULL);
720 	dev->fwnode = &adev->fwnode;
721 	adev->fwnode.dev = dev;
722 }
723 
724 #ifndef SZ_64G
725 #define SZ_64G (SZ_32G * 2)
726 #endif
727 
728 #ifndef SZ_512G
729 #define SZ_512G (SZ_64G * 8)
730 #endif
731 
732 static __init int cxl_single_init(void)
733 {
734 	int i, rc;
735 
736 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
737 		struct acpi_device *adev =
738 			&host_bridge[NR_CXL_HOST_BRIDGES + i];
739 		struct platform_device *pdev;
740 
741 		pdev = platform_device_alloc("cxl_host_bridge",
742 					     NR_CXL_HOST_BRIDGES + i);
743 		if (!pdev)
744 			goto err_bridge;
745 
746 		mock_companion(adev, &pdev->dev);
747 		rc = platform_device_add(pdev);
748 		if (rc) {
749 			platform_device_put(pdev);
750 			goto err_bridge;
751 		}
752 
753 		cxl_hb_single[i] = pdev;
754 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
755 				       "physical_node");
756 		if (rc)
757 			goto err_bridge;
758 	}
759 
760 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
761 		struct platform_device *bridge =
762 			cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
763 		struct platform_device *pdev;
764 
765 		pdev = platform_device_alloc("cxl_root_port",
766 					     NR_MULTI_ROOT + i);
767 		if (!pdev)
768 			goto err_port;
769 		pdev->dev.parent = &bridge->dev;
770 
771 		rc = platform_device_add(pdev);
772 		if (rc) {
773 			platform_device_put(pdev);
774 			goto err_port;
775 		}
776 		cxl_root_single[i] = pdev;
777 	}
778 
779 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
780 		struct platform_device *root_port = cxl_root_single[i];
781 		struct platform_device *pdev;
782 
783 		pdev = platform_device_alloc("cxl_switch_uport",
784 					     NR_MULTI_ROOT + i);
785 		if (!pdev)
786 			goto err_uport;
787 		pdev->dev.parent = &root_port->dev;
788 
789 		rc = platform_device_add(pdev);
790 		if (rc) {
791 			platform_device_put(pdev);
792 			goto err_uport;
793 		}
794 		cxl_swu_single[i] = pdev;
795 	}
796 
797 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
798 		struct platform_device *uport =
799 			cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
800 		struct platform_device *pdev;
801 
802 		pdev = platform_device_alloc("cxl_switch_dport",
803 					     i + NR_MEM_MULTI);
804 		if (!pdev)
805 			goto err_dport;
806 		pdev->dev.parent = &uport->dev;
807 
808 		rc = platform_device_add(pdev);
809 		if (rc) {
810 			platform_device_put(pdev);
811 			goto err_dport;
812 		}
813 		cxl_swd_single[i] = pdev;
814 	}
815 
816 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
817 		struct platform_device *dport = cxl_swd_single[i];
818 		struct platform_device *pdev;
819 
820 		pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
821 		if (!pdev)
822 			goto err_mem;
823 		pdev->dev.parent = &dport->dev;
824 		set_dev_node(&pdev->dev, i % 2);
825 
826 		rc = platform_device_add(pdev);
827 		if (rc) {
828 			platform_device_put(pdev);
829 			goto err_mem;
830 		}
831 		cxl_mem_single[i] = pdev;
832 	}
833 
834 	return 0;
835 
836 err_mem:
837 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
838 		platform_device_unregister(cxl_mem_single[i]);
839 err_dport:
840 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
841 		platform_device_unregister(cxl_swd_single[i]);
842 err_uport:
843 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
844 		platform_device_unregister(cxl_swu_single[i]);
845 err_port:
846 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
847 		platform_device_unregister(cxl_root_single[i]);
848 err_bridge:
849 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
850 		struct platform_device *pdev = cxl_hb_single[i];
851 
852 		if (!pdev)
853 			continue;
854 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
855 		platform_device_unregister(cxl_hb_single[i]);
856 	}
857 
858 	return rc;
859 }
860 
861 static void cxl_single_exit(void)
862 {
863 	int i;
864 
865 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
866 		platform_device_unregister(cxl_mem_single[i]);
867 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
868 		platform_device_unregister(cxl_swd_single[i]);
869 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
870 		platform_device_unregister(cxl_swu_single[i]);
871 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
872 		platform_device_unregister(cxl_root_single[i]);
873 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
874 		struct platform_device *pdev = cxl_hb_single[i];
875 
876 		if (!pdev)
877 			continue;
878 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
879 		platform_device_unregister(cxl_hb_single[i]);
880 	}
881 }
882 
883 static __init int cxl_test_init(void)
884 {
885 	int rc, i;
886 
887 	register_cxl_mock_ops(&cxl_mock_ops);
888 
889 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
890 	if (!cxl_mock_pool) {
891 		rc = -ENOMEM;
892 		goto err_gen_pool_create;
893 	}
894 
895 	rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
896 			  SZ_64G, NUMA_NO_NODE);
897 	if (rc)
898 		goto err_gen_pool_add;
899 
900 	rc = populate_cedt();
901 	if (rc)
902 		goto err_populate;
903 
904 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
905 		struct acpi_device *adev = &host_bridge[i];
906 		struct platform_device *pdev;
907 
908 		pdev = platform_device_alloc("cxl_host_bridge", i);
909 		if (!pdev)
910 			goto err_bridge;
911 
912 		mock_companion(adev, &pdev->dev);
913 		rc = platform_device_add(pdev);
914 		if (rc) {
915 			platform_device_put(pdev);
916 			goto err_bridge;
917 		}
918 
919 		cxl_host_bridge[i] = pdev;
920 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
921 				       "physical_node");
922 		if (rc)
923 			goto err_bridge;
924 	}
925 
926 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
927 		struct platform_device *bridge =
928 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
929 		struct platform_device *pdev;
930 
931 		pdev = platform_device_alloc("cxl_root_port", i);
932 		if (!pdev)
933 			goto err_port;
934 		pdev->dev.parent = &bridge->dev;
935 
936 		rc = platform_device_add(pdev);
937 		if (rc) {
938 			platform_device_put(pdev);
939 			goto err_port;
940 		}
941 		cxl_root_port[i] = pdev;
942 	}
943 
944 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
945 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
946 		struct platform_device *root_port = cxl_root_port[i];
947 		struct platform_device *pdev;
948 
949 		pdev = platform_device_alloc("cxl_switch_uport", i);
950 		if (!pdev)
951 			goto err_uport;
952 		pdev->dev.parent = &root_port->dev;
953 
954 		rc = platform_device_add(pdev);
955 		if (rc) {
956 			platform_device_put(pdev);
957 			goto err_uport;
958 		}
959 		cxl_switch_uport[i] = pdev;
960 	}
961 
962 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
963 		struct platform_device *uport =
964 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
965 		struct platform_device *pdev;
966 
967 		pdev = platform_device_alloc("cxl_switch_dport", i);
968 		if (!pdev)
969 			goto err_dport;
970 		pdev->dev.parent = &uport->dev;
971 
972 		rc = platform_device_add(pdev);
973 		if (rc) {
974 			platform_device_put(pdev);
975 			goto err_dport;
976 		}
977 		cxl_switch_dport[i] = pdev;
978 	}
979 
980 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
981 		struct platform_device *dport = cxl_switch_dport[i];
982 		struct platform_device *pdev;
983 
984 		pdev = platform_device_alloc("cxl_mem", i);
985 		if (!pdev)
986 			goto err_mem;
987 		pdev->dev.parent = &dport->dev;
988 		set_dev_node(&pdev->dev, i % 2);
989 
990 		rc = platform_device_add(pdev);
991 		if (rc) {
992 			platform_device_put(pdev);
993 			goto err_mem;
994 		}
995 		cxl_mem[i] = pdev;
996 	}
997 
998 	rc = cxl_single_init();
999 	if (rc)
1000 		goto err_mem;
1001 
1002 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1003 	if (!cxl_acpi)
1004 		goto err_single;
1005 
1006 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1007 	acpi0017_mock.dev.bus = &platform_bus_type;
1008 
1009 	rc = platform_device_add(cxl_acpi);
1010 	if (rc)
1011 		goto err_add;
1012 
1013 	return 0;
1014 
1015 err_add:
1016 	platform_device_put(cxl_acpi);
1017 err_single:
1018 	cxl_single_exit();
1019 err_mem:
1020 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1021 		platform_device_unregister(cxl_mem[i]);
1022 err_dport:
1023 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1024 		platform_device_unregister(cxl_switch_dport[i]);
1025 err_uport:
1026 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1027 		platform_device_unregister(cxl_switch_uport[i]);
1028 err_port:
1029 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1030 		platform_device_unregister(cxl_root_port[i]);
1031 err_bridge:
1032 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1033 		struct platform_device *pdev = cxl_host_bridge[i];
1034 
1035 		if (!pdev)
1036 			continue;
1037 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1038 		platform_device_unregister(cxl_host_bridge[i]);
1039 	}
1040 err_populate:
1041 	depopulate_all_mock_resources();
1042 err_gen_pool_add:
1043 	gen_pool_destroy(cxl_mock_pool);
1044 err_gen_pool_create:
1045 	unregister_cxl_mock_ops(&cxl_mock_ops);
1046 	return rc;
1047 }
1048 
1049 static __exit void cxl_test_exit(void)
1050 {
1051 	int i;
1052 
1053 	platform_device_unregister(cxl_acpi);
1054 	cxl_single_exit();
1055 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1056 		platform_device_unregister(cxl_mem[i]);
1057 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1058 		platform_device_unregister(cxl_switch_dport[i]);
1059 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1060 		platform_device_unregister(cxl_switch_uport[i]);
1061 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1062 		platform_device_unregister(cxl_root_port[i]);
1063 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1064 		struct platform_device *pdev = cxl_host_bridge[i];
1065 
1066 		if (!pdev)
1067 			continue;
1068 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1069 		platform_device_unregister(cxl_host_bridge[i]);
1070 	}
1071 	depopulate_all_mock_resources();
1072 	gen_pool_destroy(cxl_mock_pool);
1073 	unregister_cxl_mock_ops(&cxl_mock_ops);
1074 }
1075 
1076 module_init(cxl_test_init);
1077 module_exit(cxl_test_exit);
1078 MODULE_LICENSE("GPL v2");
1079 MODULE_IMPORT_NS(ACPI);
1080 MODULE_IMPORT_NS(CXL);
1081