xref: /openbmc/linux/drivers/cxl/pmem.c (revision 50371be6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include <linux/nd.h>
11 #include "cxlmem.h"
12 #include "cxl.h"
13 
14 /*
15  * Ordered workqueue for cxl nvdimm device arrival and departure
16  * to coordinate bus rescans when a bridge arrives and trigger remove
17  * operations when the bridge is removed.
18  */
19 static struct workqueue_struct *cxl_pmem_wq;
20 
21 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
22 
23 static void clear_exclusive(void *cxlds)
24 {
25 	clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
26 }
27 
28 static void unregister_nvdimm(void *nvdimm)
29 {
30 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
31 	struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
32 	struct cxl_pmem_region *cxlr_pmem;
33 	unsigned long index;
34 
35 	device_lock(&cxl_nvb->dev);
36 	dev_set_drvdata(&cxl_nvd->dev, NULL);
37 	xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) {
38 		get_device(&cxlr_pmem->dev);
39 		device_unlock(&cxl_nvb->dev);
40 
41 		device_release_driver(&cxlr_pmem->dev);
42 		put_device(&cxlr_pmem->dev);
43 
44 		device_lock(&cxl_nvb->dev);
45 	}
46 	device_unlock(&cxl_nvb->dev);
47 
48 	nvdimm_delete(nvdimm);
49 	cxl_nvd->bridge = NULL;
50 }
51 
52 static int cxl_nvdimm_probe(struct device *dev)
53 {
54 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
55 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
56 	unsigned long flags = 0, cmd_mask = 0;
57 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
58 	struct cxl_nvdimm_bridge *cxl_nvb;
59 	struct nvdimm *nvdimm;
60 	int rc;
61 
62 	cxl_nvb = cxl_find_nvdimm_bridge(dev);
63 	if (!cxl_nvb)
64 		return -ENXIO;
65 
66 	device_lock(&cxl_nvb->dev);
67 	if (!cxl_nvb->nvdimm_bus) {
68 		rc = -ENXIO;
69 		goto out;
70 	}
71 
72 	set_exclusive_cxl_commands(cxlds, exclusive_cmds);
73 	rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
74 	if (rc)
75 		goto out;
76 
77 	set_bit(NDD_LABELING, &flags);
78 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
79 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
80 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
81 	nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
82 			       cmd_mask, 0, NULL);
83 	if (!nvdimm) {
84 		rc = -ENOMEM;
85 		goto out;
86 	}
87 
88 	dev_set_drvdata(dev, nvdimm);
89 	cxl_nvd->bridge = cxl_nvb;
90 	rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
91 out:
92 	device_unlock(&cxl_nvb->dev);
93 	put_device(&cxl_nvb->dev);
94 
95 	return rc;
96 }
97 
98 static struct cxl_driver cxl_nvdimm_driver = {
99 	.name = "cxl_nvdimm",
100 	.probe = cxl_nvdimm_probe,
101 	.id = CXL_DEVICE_NVDIMM,
102 };
103 
104 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
105 				    struct nd_cmd_get_config_size *cmd,
106 				    unsigned int buf_len)
107 {
108 	if (sizeof(*cmd) > buf_len)
109 		return -EINVAL;
110 
111 	*cmd = (struct nd_cmd_get_config_size) {
112 		 .config_size = cxlds->lsa_size,
113 		 .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
114 	};
115 
116 	return 0;
117 }
118 
119 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
120 				    struct nd_cmd_get_config_data_hdr *cmd,
121 				    unsigned int buf_len)
122 {
123 	struct cxl_mbox_get_lsa get_lsa;
124 	int rc;
125 
126 	if (sizeof(*cmd) > buf_len)
127 		return -EINVAL;
128 	if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
129 		return -EINVAL;
130 
131 	get_lsa = (struct cxl_mbox_get_lsa) {
132 		.offset = cpu_to_le32(cmd->in_offset),
133 		.length = cpu_to_le32(cmd->in_length),
134 	};
135 
136 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
137 			       sizeof(get_lsa), cmd->out_buf, cmd->in_length);
138 	cmd->status = 0;
139 
140 	return rc;
141 }
142 
143 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
144 				    struct nd_cmd_set_config_hdr *cmd,
145 				    unsigned int buf_len)
146 {
147 	struct cxl_mbox_set_lsa *set_lsa;
148 	int rc;
149 
150 	if (sizeof(*cmd) > buf_len)
151 		return -EINVAL;
152 
153 	/* 4-byte status follows the input data in the payload */
154 	if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len)
155 		return -EINVAL;
156 
157 	set_lsa =
158 		kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
159 	if (!set_lsa)
160 		return -ENOMEM;
161 
162 	*set_lsa = (struct cxl_mbox_set_lsa) {
163 		.offset = cpu_to_le32(cmd->in_offset),
164 	};
165 	memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
166 
167 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
168 			       struct_size(set_lsa, data, cmd->in_length),
169 			       NULL, 0);
170 
171 	/*
172 	 * Set "firmware" status (4-packed bytes at the end of the input
173 	 * payload.
174 	 */
175 	put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
176 	kvfree(set_lsa);
177 
178 	return rc;
179 }
180 
181 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
182 			       void *buf, unsigned int buf_len)
183 {
184 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
185 	unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
186 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
187 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
188 
189 	if (!test_bit(cmd, &cmd_mask))
190 		return -ENOTTY;
191 
192 	switch (cmd) {
193 	case ND_CMD_GET_CONFIG_SIZE:
194 		return cxl_pmem_get_config_size(cxlds, buf, buf_len);
195 	case ND_CMD_GET_CONFIG_DATA:
196 		return cxl_pmem_get_config_data(cxlds, buf, buf_len);
197 	case ND_CMD_SET_CONFIG_DATA:
198 		return cxl_pmem_set_config_data(cxlds, buf, buf_len);
199 	default:
200 		return -ENOTTY;
201 	}
202 }
203 
204 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
205 			struct nvdimm *nvdimm, unsigned int cmd, void *buf,
206 			unsigned int buf_len, int *cmd_rc)
207 {
208 	/*
209 	 * No firmware response to translate, let the transport error
210 	 * code take precedence.
211 	 */
212 	*cmd_rc = 0;
213 
214 	if (!nvdimm)
215 		return -ENOTTY;
216 	return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
217 }
218 
219 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
220 {
221 	if (cxl_nvb->nvdimm_bus)
222 		return true;
223 	cxl_nvb->nvdimm_bus =
224 		nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
225 	return cxl_nvb->nvdimm_bus != NULL;
226 }
227 
228 static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
229 {
230 	struct cxl_nvdimm *cxl_nvd;
231 
232 	if (!is_cxl_nvdimm(dev))
233 		return 0;
234 
235 	cxl_nvd = to_cxl_nvdimm(dev);
236 	if (cxl_nvd->bridge != cxl_nvb)
237 		return 0;
238 
239 	device_release_driver(dev);
240 	return 0;
241 }
242 
243 static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
244 {
245 	struct cxl_pmem_region *cxlr_pmem;
246 
247 	if (!is_cxl_pmem_region(dev))
248 		return 0;
249 
250 	cxlr_pmem = to_cxl_pmem_region(dev);
251 	if (cxlr_pmem->bridge != cxl_nvb)
252 		return 0;
253 
254 	device_release_driver(dev);
255 	return 0;
256 }
257 
258 static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
259 			       struct nvdimm_bus *nvdimm_bus)
260 {
261 	if (!nvdimm_bus)
262 		return;
263 
264 	/*
265 	 * Set the state of cxl_nvdimm devices to unbound / idle before
266 	 * nvdimm_bus_unregister() rips the nvdimm objects out from
267 	 * underneath them.
268 	 */
269 	bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
270 			 cxl_pmem_region_release_driver);
271 	bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
272 			 cxl_nvdimm_release_driver);
273 	nvdimm_bus_unregister(nvdimm_bus);
274 }
275 
276 static void cxl_nvb_update_state(struct work_struct *work)
277 {
278 	struct cxl_nvdimm_bridge *cxl_nvb =
279 		container_of(work, typeof(*cxl_nvb), state_work);
280 	struct nvdimm_bus *victim_bus = NULL;
281 	bool release = false, rescan = false;
282 
283 	device_lock(&cxl_nvb->dev);
284 	switch (cxl_nvb->state) {
285 	case CXL_NVB_ONLINE:
286 		if (!online_nvdimm_bus(cxl_nvb)) {
287 			dev_err(&cxl_nvb->dev,
288 				"failed to establish nvdimm bus\n");
289 			release = true;
290 		} else
291 			rescan = true;
292 		break;
293 	case CXL_NVB_OFFLINE:
294 	case CXL_NVB_DEAD:
295 		victim_bus = cxl_nvb->nvdimm_bus;
296 		cxl_nvb->nvdimm_bus = NULL;
297 		break;
298 	default:
299 		break;
300 	}
301 	device_unlock(&cxl_nvb->dev);
302 
303 	if (release)
304 		device_release_driver(&cxl_nvb->dev);
305 	if (rescan) {
306 		int rc = bus_rescan_devices(&cxl_bus_type);
307 
308 		dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
309 	}
310 	offline_nvdimm_bus(cxl_nvb, victim_bus);
311 
312 	put_device(&cxl_nvb->dev);
313 }
314 
315 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
316 {
317 	/*
318 	 * Take a reference that the workqueue will drop if new work
319 	 * gets queued.
320 	 */
321 	get_device(&cxl_nvb->dev);
322 	if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
323 		put_device(&cxl_nvb->dev);
324 }
325 
326 static void cxl_nvdimm_bridge_remove(struct device *dev)
327 {
328 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
329 
330 	if (cxl_nvb->state == CXL_NVB_ONLINE)
331 		cxl_nvb->state = CXL_NVB_OFFLINE;
332 	cxl_nvdimm_bridge_state_work(cxl_nvb);
333 }
334 
335 static int cxl_nvdimm_bridge_probe(struct device *dev)
336 {
337 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
338 
339 	if (cxl_nvb->state == CXL_NVB_DEAD)
340 		return -ENXIO;
341 
342 	if (cxl_nvb->state == CXL_NVB_NEW) {
343 		cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
344 			.provider_name = "CXL",
345 			.module = THIS_MODULE,
346 			.ndctl = cxl_pmem_ctl,
347 		};
348 
349 		INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
350 	}
351 
352 	cxl_nvb->state = CXL_NVB_ONLINE;
353 	cxl_nvdimm_bridge_state_work(cxl_nvb);
354 
355 	return 0;
356 }
357 
358 static struct cxl_driver cxl_nvdimm_bridge_driver = {
359 	.name = "cxl_nvdimm_bridge",
360 	.probe = cxl_nvdimm_bridge_probe,
361 	.remove = cxl_nvdimm_bridge_remove,
362 	.id = CXL_DEVICE_NVDIMM_BRIDGE,
363 };
364 
365 static int match_cxl_nvdimm(struct device *dev, void *data)
366 {
367 	return is_cxl_nvdimm(dev);
368 }
369 
370 static void unregister_nvdimm_region(void *nd_region)
371 {
372 	nvdimm_region_delete(nd_region);
373 }
374 
375 static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd,
376 				 struct cxl_pmem_region *cxlr_pmem)
377 {
378 	int rc;
379 
380 	rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem,
381 		       cxlr_pmem, GFP_KERNEL);
382 	if (rc)
383 		return rc;
384 
385 	get_device(&cxlr_pmem->dev);
386 	return 0;
387 }
388 
389 static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd,
390 				  struct cxl_pmem_region *cxlr_pmem)
391 {
392 	/*
393 	 * It is possible this is called without a corresponding
394 	 * cxl_nvdimm_add_region for @cxlr_pmem
395 	 */
396 	cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem);
397 	if (cxlr_pmem)
398 		put_device(&cxlr_pmem->dev);
399 }
400 
401 static void release_mappings(void *data)
402 {
403 	int i;
404 	struct cxl_pmem_region *cxlr_pmem = data;
405 	struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge;
406 
407 	device_lock(&cxl_nvb->dev);
408 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
409 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
410 		struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
411 
412 		cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem);
413 	}
414 	device_unlock(&cxl_nvb->dev);
415 }
416 
417 static void cxlr_pmem_remove_resource(void *res)
418 {
419 	remove_resource(res);
420 }
421 
422 struct cxl_pmem_region_info {
423 	u64 offset;
424 	u64 serial;
425 };
426 
427 static int cxl_pmem_region_probe(struct device *dev)
428 {
429 	struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
430 	struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
431 	struct cxl_region *cxlr = cxlr_pmem->cxlr;
432 	struct cxl_pmem_region_info *info = NULL;
433 	struct cxl_nvdimm_bridge *cxl_nvb;
434 	struct nd_interleave_set *nd_set;
435 	struct nd_region_desc ndr_desc;
436 	struct cxl_nvdimm *cxl_nvd;
437 	struct nvdimm *nvdimm;
438 	struct resource *res;
439 	int rc, i = 0;
440 
441 	cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
442 	if (!cxl_nvb) {
443 		dev_dbg(dev, "bridge not found\n");
444 		return -ENXIO;
445 	}
446 	cxlr_pmem->bridge = cxl_nvb;
447 
448 	device_lock(&cxl_nvb->dev);
449 	if (!cxl_nvb->nvdimm_bus) {
450 		dev_dbg(dev, "nvdimm bus not found\n");
451 		rc = -ENXIO;
452 		goto out_nvb;
453 	}
454 
455 	memset(&mappings, 0, sizeof(mappings));
456 	memset(&ndr_desc, 0, sizeof(ndr_desc));
457 
458 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
459 	if (!res) {
460 		rc = -ENOMEM;
461 		goto out_nvb;
462 	}
463 
464 	res->name = "Persistent Memory";
465 	res->start = cxlr_pmem->hpa_range.start;
466 	res->end = cxlr_pmem->hpa_range.end;
467 	res->flags = IORESOURCE_MEM;
468 	res->desc = IORES_DESC_PERSISTENT_MEMORY;
469 
470 	rc = insert_resource(&iomem_resource, res);
471 	if (rc)
472 		goto out_nvb;
473 
474 	rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
475 	if (rc)
476 		goto out_nvb;
477 
478 	ndr_desc.res = res;
479 	ndr_desc.provider_data = cxlr_pmem;
480 
481 	ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
482 	ndr_desc.target_node = phys_to_target_node(res->start);
483 	if (ndr_desc.target_node == NUMA_NO_NODE) {
484 		ndr_desc.target_node = ndr_desc.numa_node;
485 		dev_dbg(&cxlr->dev, "changing target node from %d to %d",
486 			NUMA_NO_NODE, ndr_desc.target_node);
487 	}
488 
489 	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
490 	if (!nd_set) {
491 		rc = -ENOMEM;
492 		goto out_nvb;
493 	}
494 
495 	ndr_desc.memregion = cxlr->id;
496 	set_bit(ND_REGION_CXL, &ndr_desc.flags);
497 	set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
498 
499 	info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
500 	if (!info) {
501 		rc = -ENOMEM;
502 		goto out_nvb;
503 	}
504 
505 	rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem);
506 	if (rc)
507 		goto out_nvd;
508 
509 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
510 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
511 		struct cxl_memdev *cxlmd = m->cxlmd;
512 		struct cxl_dev_state *cxlds = cxlmd->cxlds;
513 		struct device *d;
514 
515 		d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
516 		if (!d) {
517 			dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
518 				dev_name(&cxlmd->dev));
519 			rc = -ENODEV;
520 			goto out_nvd;
521 		}
522 
523 		/* safe to drop ref now with bridge lock held */
524 		put_device(d);
525 
526 		cxl_nvd = to_cxl_nvdimm(d);
527 		nvdimm = dev_get_drvdata(&cxl_nvd->dev);
528 		if (!nvdimm) {
529 			dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
530 				dev_name(&cxlmd->dev));
531 			rc = -ENODEV;
532 			goto out_nvd;
533 		}
534 
535 		/*
536 		 * Pin the region per nvdimm device as those may be released
537 		 * out-of-order with respect to the region, and a single nvdimm
538 		 * maybe associated with multiple regions
539 		 */
540 		rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem);
541 		if (rc)
542 			goto out_nvd;
543 		m->cxl_nvd = cxl_nvd;
544 		mappings[i] = (struct nd_mapping_desc) {
545 			.nvdimm = nvdimm,
546 			.start = m->start,
547 			.size = m->size,
548 			.position = i,
549 		};
550 		info[i].offset = m->start;
551 		info[i].serial = cxlds->serial;
552 	}
553 	ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
554 	ndr_desc.mapping = mappings;
555 
556 	/*
557 	 * TODO enable CXL labels which skip the need for 'interleave-set cookie'
558 	 */
559 	nd_set->cookie1 =
560 		nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
561 	nd_set->cookie2 = nd_set->cookie1;
562 	ndr_desc.nd_set = nd_set;
563 
564 	cxlr_pmem->nd_region =
565 		nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
566 	if (!cxlr_pmem->nd_region) {
567 		rc = -ENOMEM;
568 		goto out_nvd;
569 	}
570 
571 	rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
572 				      cxlr_pmem->nd_region);
573 out_nvd:
574 	kfree(info);
575 out_nvb:
576 	device_unlock(&cxl_nvb->dev);
577 	put_device(&cxl_nvb->dev);
578 
579 	return rc;
580 }
581 
582 static struct cxl_driver cxl_pmem_region_driver = {
583 	.name = "cxl_pmem_region",
584 	.probe = cxl_pmem_region_probe,
585 	.id = CXL_DEVICE_PMEM_REGION,
586 };
587 
588 /*
589  * Return all bridges to the CXL_NVB_NEW state to invalidate any
590  * ->state_work referring to the now destroyed cxl_pmem_wq.
591  */
592 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
593 {
594 	struct cxl_nvdimm_bridge *cxl_nvb;
595 
596 	if (!is_cxl_nvdimm_bridge(dev))
597 		return 0;
598 
599 	cxl_nvb = to_cxl_nvdimm_bridge(dev);
600 	device_lock(dev);
601 	cxl_nvb->state = CXL_NVB_NEW;
602 	device_unlock(dev);
603 
604 	return 0;
605 }
606 
607 static void destroy_cxl_pmem_wq(void)
608 {
609 	destroy_workqueue(cxl_pmem_wq);
610 	bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
611 }
612 
613 static __init int cxl_pmem_init(void)
614 {
615 	int rc;
616 
617 	set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
618 	set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
619 
620 	cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
621 	if (!cxl_pmem_wq)
622 		return -ENXIO;
623 
624 	rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
625 	if (rc)
626 		goto err_bridge;
627 
628 	rc = cxl_driver_register(&cxl_nvdimm_driver);
629 	if (rc)
630 		goto err_nvdimm;
631 
632 	rc = cxl_driver_register(&cxl_pmem_region_driver);
633 	if (rc)
634 		goto err_region;
635 
636 	return 0;
637 
638 err_region:
639 	cxl_driver_unregister(&cxl_nvdimm_driver);
640 err_nvdimm:
641 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
642 err_bridge:
643 	destroy_cxl_pmem_wq();
644 	return rc;
645 }
646 
647 static __exit void cxl_pmem_exit(void)
648 {
649 	cxl_driver_unregister(&cxl_pmem_region_driver);
650 	cxl_driver_unregister(&cxl_nvdimm_driver);
651 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
652 	destroy_cxl_pmem_wq();
653 }
654 
655 MODULE_LICENSE("GPL v2");
656 module_init(cxl_pmem_init);
657 module_exit(cxl_pmem_exit);
658 MODULE_IMPORT_NS(CXL);
659 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
660 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
661 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
662