xref: /openbmc/linux/drivers/cxl/core/pmem.c (revision 25b892b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <cxlmem.h>
6 #include <cxl.h>
7 #include "core.h"
8 
9 /**
10  * DOC: cxl pmem
11  *
12  * The core CXL PMEM infrastructure supports persistent memory
13  * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
14  * 'bridge' device is added at the root of a CXL device topology if
15  * platform firmware advertises at least one persistent memory capable
16  * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
17  * device. Then for each cxl_memdev in the CXL device topology a bridge
18  * device is added to host a LIBNVDIMM dimm object. When these bridges
19  * are registered native LIBNVDIMM uapis are translated to CXL
20  * operations, for example, namespace label access commands.
21  */
22 
23 static void cxl_nvdimm_bridge_release(struct device *dev)
24 {
25 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
26 
27 	kfree(cxl_nvb);
28 }
29 
30 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
31 	&cxl_base_attribute_group,
32 	NULL,
33 };
34 
35 const struct device_type cxl_nvdimm_bridge_type = {
36 	.name = "cxl_nvdimm_bridge",
37 	.release = cxl_nvdimm_bridge_release,
38 	.groups = cxl_nvdimm_bridge_attribute_groups,
39 };
40 
41 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
42 {
43 	if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
44 			  "not a cxl_nvdimm_bridge device\n"))
45 		return NULL;
46 	return container_of(dev, struct cxl_nvdimm_bridge, dev);
47 }
48 EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
49 
50 static struct cxl_nvdimm_bridge *
51 cxl_nvdimm_bridge_alloc(struct cxl_port *port)
52 {
53 	struct cxl_nvdimm_bridge *cxl_nvb;
54 	struct device *dev;
55 
56 	cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
57 	if (!cxl_nvb)
58 		return ERR_PTR(-ENOMEM);
59 
60 	dev = &cxl_nvb->dev;
61 	cxl_nvb->port = port;
62 	cxl_nvb->state = CXL_NVB_NEW;
63 	device_initialize(dev);
64 	device_set_pm_not_required(dev);
65 	dev->parent = &port->dev;
66 	dev->bus = &cxl_bus_type;
67 	dev->type = &cxl_nvdimm_bridge_type;
68 
69 	return cxl_nvb;
70 }
71 
72 static void unregister_nvb(void *_cxl_nvb)
73 {
74 	struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
75 	bool flush;
76 
77 	/*
78 	 * If the bridge was ever activated then there might be in-flight state
79 	 * work to flush. Once the state has been changed to 'dead' then no new
80 	 * work can be queued by user-triggered bind.
81 	 */
82 	device_lock(&cxl_nvb->dev);
83 	flush = cxl_nvb->state != CXL_NVB_NEW;
84 	cxl_nvb->state = CXL_NVB_DEAD;
85 	device_unlock(&cxl_nvb->dev);
86 
87 	/*
88 	 * Even though the device core will trigger device_release_driver()
89 	 * before the unregister, it does not know about the fact that
90 	 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
91 	 * release not and flush it before tearing down the nvdimm device
92 	 * hierarchy.
93 	 */
94 	device_release_driver(&cxl_nvb->dev);
95 	if (flush)
96 		flush_work(&cxl_nvb->state_work);
97 	device_unregister(&cxl_nvb->dev);
98 }
99 
100 /**
101  * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
102  * @host: platform firmware root device
103  * @port: CXL port at the root of a CXL topology
104  *
105  * Return: bridge device that can host cxl_nvdimm objects
106  */
107 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
108 						     struct cxl_port *port)
109 {
110 	struct cxl_nvdimm_bridge *cxl_nvb;
111 	struct device *dev;
112 	int rc;
113 
114 	if (!IS_ENABLED(CONFIG_CXL_PMEM))
115 		return ERR_PTR(-ENXIO);
116 
117 	cxl_nvb = cxl_nvdimm_bridge_alloc(port);
118 	if (IS_ERR(cxl_nvb))
119 		return cxl_nvb;
120 
121 	dev = &cxl_nvb->dev;
122 	rc = dev_set_name(dev, "nvdimm-bridge");
123 	if (rc)
124 		goto err;
125 
126 	rc = device_add(dev);
127 	if (rc)
128 		goto err;
129 
130 	rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
131 	if (rc)
132 		return ERR_PTR(rc);
133 
134 	return cxl_nvb;
135 
136 err:
137 	put_device(dev);
138 	return ERR_PTR(rc);
139 }
140 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
141 
142 static void cxl_nvdimm_release(struct device *dev)
143 {
144 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
145 
146 	kfree(cxl_nvd);
147 }
148 
149 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
150 	&cxl_base_attribute_group,
151 	NULL,
152 };
153 
154 const struct device_type cxl_nvdimm_type = {
155 	.name = "cxl_nvdimm",
156 	.release = cxl_nvdimm_release,
157 	.groups = cxl_nvdimm_attribute_groups,
158 };
159 
160 bool is_cxl_nvdimm(struct device *dev)
161 {
162 	return dev->type == &cxl_nvdimm_type;
163 }
164 EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
165 
166 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
167 {
168 	if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
169 			  "not a cxl_nvdimm device\n"))
170 		return NULL;
171 	return container_of(dev, struct cxl_nvdimm, dev);
172 }
173 EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
174 
175 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
176 {
177 	struct cxl_nvdimm *cxl_nvd;
178 	struct device *dev;
179 
180 	cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
181 	if (!cxl_nvd)
182 		return ERR_PTR(-ENOMEM);
183 
184 	dev = &cxl_nvd->dev;
185 	cxl_nvd->cxlmd = cxlmd;
186 	device_initialize(dev);
187 	device_set_pm_not_required(dev);
188 	dev->parent = &cxlmd->dev;
189 	dev->bus = &cxl_bus_type;
190 	dev->type = &cxl_nvdimm_type;
191 
192 	return cxl_nvd;
193 }
194 
195 /**
196  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
197  * @host: same host as @cxlmd
198  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
199  *
200  * Return: 0 on success negative error code on failure.
201  */
202 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
203 {
204 	struct cxl_nvdimm *cxl_nvd;
205 	struct device *dev;
206 	int rc;
207 
208 	cxl_nvd = cxl_nvdimm_alloc(cxlmd);
209 	if (IS_ERR(cxl_nvd))
210 		return PTR_ERR(cxl_nvd);
211 
212 	dev = &cxl_nvd->dev;
213 	rc = dev_set_name(dev, "pmem%d", cxlmd->id);
214 	if (rc)
215 		goto err;
216 
217 	rc = device_add(dev);
218 	if (rc)
219 		goto err;
220 
221 	dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
222 		dev_name(dev));
223 
224 	return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
225 
226 err:
227 	put_device(dev);
228 	return rc;
229 }
230 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
231