xref: /openbmc/linux/drivers/cxl/pmem.c (revision fbb6b31a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include "cxlmem.h"
11 #include "cxl.h"
12 
13 /*
14  * Ordered workqueue for cxl nvdimm device arrival and departure
15  * to coordinate bus rescans when a bridge arrives and trigger remove
16  * operations when the bridge is removed.
17  */
18 static struct workqueue_struct *cxl_pmem_wq;
19 
20 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
21 
22 static void clear_exclusive(void *cxlds)
23 {
24 	clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
25 }
26 
27 static void unregister_nvdimm(void *nvdimm)
28 {
29 	nvdimm_delete(nvdimm);
30 }
31 
32 static int cxl_nvdimm_probe(struct device *dev)
33 {
34 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
35 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
36 	unsigned long flags = 0, cmd_mask = 0;
37 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
38 	struct cxl_nvdimm_bridge *cxl_nvb;
39 	struct nvdimm *nvdimm;
40 	int rc;
41 
42 	cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
43 	if (!cxl_nvb)
44 		return -ENXIO;
45 
46 	cxl_device_lock(&cxl_nvb->dev);
47 	if (!cxl_nvb->nvdimm_bus) {
48 		rc = -ENXIO;
49 		goto out;
50 	}
51 
52 	set_exclusive_cxl_commands(cxlds, exclusive_cmds);
53 	rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
54 	if (rc)
55 		goto out;
56 
57 	set_bit(NDD_LABELING, &flags);
58 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
59 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
60 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
61 	nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
62 			       cmd_mask, 0, NULL);
63 	if (!nvdimm) {
64 		rc = -ENOMEM;
65 		goto out;
66 	}
67 
68 	dev_set_drvdata(dev, nvdimm);
69 	rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
70 out:
71 	cxl_device_unlock(&cxl_nvb->dev);
72 	put_device(&cxl_nvb->dev);
73 
74 	return rc;
75 }
76 
77 static struct cxl_driver cxl_nvdimm_driver = {
78 	.name = "cxl_nvdimm",
79 	.probe = cxl_nvdimm_probe,
80 	.id = CXL_DEVICE_NVDIMM,
81 };
82 
83 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
84 				    struct nd_cmd_get_config_size *cmd,
85 				    unsigned int buf_len)
86 {
87 	if (sizeof(*cmd) > buf_len)
88 		return -EINVAL;
89 
90 	*cmd = (struct nd_cmd_get_config_size) {
91 		 .config_size = cxlds->lsa_size,
92 		 .max_xfer = cxlds->payload_size,
93 	};
94 
95 	return 0;
96 }
97 
98 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
99 				    struct nd_cmd_get_config_data_hdr *cmd,
100 				    unsigned int buf_len)
101 {
102 	struct cxl_mbox_get_lsa get_lsa;
103 	int rc;
104 
105 	if (sizeof(*cmd) > buf_len)
106 		return -EINVAL;
107 	if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
108 		return -EINVAL;
109 
110 	get_lsa = (struct cxl_mbox_get_lsa) {
111 		.offset = cmd->in_offset,
112 		.length = cmd->in_length,
113 	};
114 
115 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
116 			       sizeof(get_lsa), cmd->out_buf, cmd->in_length);
117 	cmd->status = 0;
118 
119 	return rc;
120 }
121 
122 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
123 				    struct nd_cmd_set_config_hdr *cmd,
124 				    unsigned int buf_len)
125 {
126 	struct cxl_mbox_set_lsa *set_lsa;
127 	int rc;
128 
129 	if (sizeof(*cmd) > buf_len)
130 		return -EINVAL;
131 
132 	/* 4-byte status follows the input data in the payload */
133 	if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
134 		return -EINVAL;
135 
136 	set_lsa =
137 		kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
138 	if (!set_lsa)
139 		return -ENOMEM;
140 
141 	*set_lsa = (struct cxl_mbox_set_lsa) {
142 		.offset = cmd->in_offset,
143 	};
144 	memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
145 
146 	rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
147 			       struct_size(set_lsa, data, cmd->in_length),
148 			       NULL, 0);
149 
150 	/*
151 	 * Set "firmware" status (4-packed bytes at the end of the input
152 	 * payload.
153 	 */
154 	put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
155 	kvfree(set_lsa);
156 
157 	return rc;
158 }
159 
160 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
161 			       void *buf, unsigned int buf_len)
162 {
163 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
164 	unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
165 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
166 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
167 
168 	if (!test_bit(cmd, &cmd_mask))
169 		return -ENOTTY;
170 
171 	switch (cmd) {
172 	case ND_CMD_GET_CONFIG_SIZE:
173 		return cxl_pmem_get_config_size(cxlds, buf, buf_len);
174 	case ND_CMD_GET_CONFIG_DATA:
175 		return cxl_pmem_get_config_data(cxlds, buf, buf_len);
176 	case ND_CMD_SET_CONFIG_DATA:
177 		return cxl_pmem_set_config_data(cxlds, buf, buf_len);
178 	default:
179 		return -ENOTTY;
180 	}
181 }
182 
183 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
184 			struct nvdimm *nvdimm, unsigned int cmd, void *buf,
185 			unsigned int buf_len, int *cmd_rc)
186 {
187 	/*
188 	 * No firmware response to translate, let the transport error
189 	 * code take precedence.
190 	 */
191 	*cmd_rc = 0;
192 
193 	if (!nvdimm)
194 		return -ENOTTY;
195 	return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
196 }
197 
198 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
199 {
200 	if (cxl_nvb->nvdimm_bus)
201 		return true;
202 	cxl_nvb->nvdimm_bus =
203 		nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
204 	return cxl_nvb->nvdimm_bus != NULL;
205 }
206 
207 static int cxl_nvdimm_release_driver(struct device *dev, void *data)
208 {
209 	if (!is_cxl_nvdimm(dev))
210 		return 0;
211 	device_release_driver(dev);
212 	return 0;
213 }
214 
215 static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
216 {
217 	if (!nvdimm_bus)
218 		return;
219 
220 	/*
221 	 * Set the state of cxl_nvdimm devices to unbound / idle before
222 	 * nvdimm_bus_unregister() rips the nvdimm objects out from
223 	 * underneath them.
224 	 */
225 	bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
226 	nvdimm_bus_unregister(nvdimm_bus);
227 }
228 
229 static void cxl_nvb_update_state(struct work_struct *work)
230 {
231 	struct cxl_nvdimm_bridge *cxl_nvb =
232 		container_of(work, typeof(*cxl_nvb), state_work);
233 	struct nvdimm_bus *victim_bus = NULL;
234 	bool release = false, rescan = false;
235 
236 	cxl_device_lock(&cxl_nvb->dev);
237 	switch (cxl_nvb->state) {
238 	case CXL_NVB_ONLINE:
239 		if (!online_nvdimm_bus(cxl_nvb)) {
240 			dev_err(&cxl_nvb->dev,
241 				"failed to establish nvdimm bus\n");
242 			release = true;
243 		} else
244 			rescan = true;
245 		break;
246 	case CXL_NVB_OFFLINE:
247 	case CXL_NVB_DEAD:
248 		victim_bus = cxl_nvb->nvdimm_bus;
249 		cxl_nvb->nvdimm_bus = NULL;
250 		break;
251 	default:
252 		break;
253 	}
254 	cxl_device_unlock(&cxl_nvb->dev);
255 
256 	if (release)
257 		device_release_driver(&cxl_nvb->dev);
258 	if (rescan) {
259 		int rc = bus_rescan_devices(&cxl_bus_type);
260 
261 		dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
262 	}
263 	offline_nvdimm_bus(victim_bus);
264 
265 	put_device(&cxl_nvb->dev);
266 }
267 
268 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
269 {
270 	/*
271 	 * Take a reference that the workqueue will drop if new work
272 	 * gets queued.
273 	 */
274 	get_device(&cxl_nvb->dev);
275 	if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
276 		put_device(&cxl_nvb->dev);
277 }
278 
279 static void cxl_nvdimm_bridge_remove(struct device *dev)
280 {
281 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
282 
283 	if (cxl_nvb->state == CXL_NVB_ONLINE)
284 		cxl_nvb->state = CXL_NVB_OFFLINE;
285 	cxl_nvdimm_bridge_state_work(cxl_nvb);
286 }
287 
288 static int cxl_nvdimm_bridge_probe(struct device *dev)
289 {
290 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
291 
292 	if (cxl_nvb->state == CXL_NVB_DEAD)
293 		return -ENXIO;
294 
295 	if (cxl_nvb->state == CXL_NVB_NEW) {
296 		cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
297 			.provider_name = "CXL",
298 			.module = THIS_MODULE,
299 			.ndctl = cxl_pmem_ctl,
300 		};
301 
302 		INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
303 	}
304 
305 	cxl_nvb->state = CXL_NVB_ONLINE;
306 	cxl_nvdimm_bridge_state_work(cxl_nvb);
307 
308 	return 0;
309 }
310 
311 static struct cxl_driver cxl_nvdimm_bridge_driver = {
312 	.name = "cxl_nvdimm_bridge",
313 	.probe = cxl_nvdimm_bridge_probe,
314 	.remove = cxl_nvdimm_bridge_remove,
315 	.id = CXL_DEVICE_NVDIMM_BRIDGE,
316 };
317 
318 /*
319  * Return all bridges to the CXL_NVB_NEW state to invalidate any
320  * ->state_work referring to the now destroyed cxl_pmem_wq.
321  */
322 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
323 {
324 	struct cxl_nvdimm_bridge *cxl_nvb;
325 
326 	if (!is_cxl_nvdimm_bridge(dev))
327 		return 0;
328 
329 	cxl_nvb = to_cxl_nvdimm_bridge(dev);
330 	cxl_device_lock(dev);
331 	cxl_nvb->state = CXL_NVB_NEW;
332 	cxl_device_unlock(dev);
333 
334 	return 0;
335 }
336 
337 static void destroy_cxl_pmem_wq(void)
338 {
339 	destroy_workqueue(cxl_pmem_wq);
340 	bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
341 }
342 
343 static __init int cxl_pmem_init(void)
344 {
345 	int rc;
346 
347 	set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
348 	set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
349 	set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
350 
351 	cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
352 	if (!cxl_pmem_wq)
353 		return -ENXIO;
354 
355 	rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
356 	if (rc)
357 		goto err_bridge;
358 
359 	rc = cxl_driver_register(&cxl_nvdimm_driver);
360 	if (rc)
361 		goto err_nvdimm;
362 
363 	return 0;
364 
365 err_nvdimm:
366 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
367 err_bridge:
368 	destroy_cxl_pmem_wq();
369 	return rc;
370 }
371 
372 static __exit void cxl_pmem_exit(void)
373 {
374 	cxl_driver_unregister(&cxl_nvdimm_driver);
375 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
376 	destroy_cxl_pmem_wq();
377 }
378 
379 MODULE_LICENSE("GPL v2");
380 module_init(cxl_pmem_init);
381 module_exit(cxl_pmem_exit);
382 MODULE_IMPORT_NS(CXL);
383 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
384 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
385