xref: /openbmc/linux/drivers/cxl/pmem.c (revision 901181b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include "cxlmem.h"
11 #include "cxl.h"
12 
13 /*
14  * Ordered workqueue for cxl nvdimm device arrival and departure
15  * to coordinate bus rescans when a bridge arrives and trigger remove
16  * operations when the bridge is removed.
17  */
18 static struct workqueue_struct *cxl_pmem_wq;
19 
20 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
21 
22 static void clear_exclusive(void *cxlm)
23 {
24 	clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
25 }
26 
27 static void unregister_nvdimm(void *nvdimm)
28 {
29 	nvdimm_delete(nvdimm);
30 }
31 
32 static int cxl_nvdimm_probe(struct device *dev)
33 {
34 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
35 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
36 	unsigned long flags = 0, cmd_mask = 0;
37 	struct cxl_mem *cxlm = cxlmd->cxlm;
38 	struct cxl_nvdimm_bridge *cxl_nvb;
39 	struct nvdimm *nvdimm;
40 	int rc;
41 
42 	cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
43 	if (!cxl_nvb)
44 		return -ENXIO;
45 
46 	device_lock(&cxl_nvb->dev);
47 	if (!cxl_nvb->nvdimm_bus) {
48 		rc = -ENXIO;
49 		goto out;
50 	}
51 
52 	set_exclusive_cxl_commands(cxlm, exclusive_cmds);
53 	rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
54 	if (rc)
55 		goto out;
56 
57 	set_bit(NDD_LABELING, &flags);
58 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
59 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
60 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
61 	nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
62 			       cmd_mask, 0, NULL);
63 	if (!nvdimm) {
64 		rc = -ENOMEM;
65 		goto out;
66 	}
67 
68 	dev_set_drvdata(dev, nvdimm);
69 	rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
70 out:
71 	device_unlock(&cxl_nvb->dev);
72 	put_device(&cxl_nvb->dev);
73 
74 	return rc;
75 }
76 
77 static struct cxl_driver cxl_nvdimm_driver = {
78 	.name = "cxl_nvdimm",
79 	.probe = cxl_nvdimm_probe,
80 	.id = CXL_DEVICE_NVDIMM,
81 };
82 
83 static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
84 				    struct nd_cmd_get_config_size *cmd,
85 				    unsigned int buf_len)
86 {
87 	if (sizeof(*cmd) > buf_len)
88 		return -EINVAL;
89 
90 	*cmd = (struct nd_cmd_get_config_size) {
91 		 .config_size = cxlm->lsa_size,
92 		 .max_xfer = cxlm->payload_size,
93 	};
94 
95 	return 0;
96 }
97 
98 static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
99 				    struct nd_cmd_get_config_data_hdr *cmd,
100 				    unsigned int buf_len)
101 {
102 	struct cxl_mbox_get_lsa get_lsa;
103 	int rc;
104 
105 	if (sizeof(*cmd) > buf_len)
106 		return -EINVAL;
107 	if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
108 		return -EINVAL;
109 
110 	get_lsa = (struct cxl_mbox_get_lsa) {
111 		.offset = cmd->in_offset,
112 		.length = cmd->in_length,
113 	};
114 
115 	rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
116 				   sizeof(get_lsa), cmd->out_buf,
117 				   cmd->in_length);
118 	cmd->status = 0;
119 
120 	return rc;
121 }
122 
123 static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
124 				    struct nd_cmd_set_config_hdr *cmd,
125 				    unsigned int buf_len)
126 {
127 	struct cxl_mbox_set_lsa *set_lsa;
128 	int rc;
129 
130 	if (sizeof(*cmd) > buf_len)
131 		return -EINVAL;
132 
133 	/* 4-byte status follows the input data in the payload */
134 	if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
135 		return -EINVAL;
136 
137 	set_lsa =
138 		kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
139 	if (!set_lsa)
140 		return -ENOMEM;
141 
142 	*set_lsa = (struct cxl_mbox_set_lsa) {
143 		.offset = cmd->in_offset,
144 	};
145 	memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
146 
147 	rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
148 				   struct_size(set_lsa, data, cmd->in_length),
149 				   NULL, 0);
150 
151 	/*
152 	 * Set "firmware" status (4-packed bytes at the end of the input
153 	 * payload.
154 	 */
155 	put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
156 	kvfree(set_lsa);
157 
158 	return rc;
159 }
160 
161 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
162 			       void *buf, unsigned int buf_len)
163 {
164 	struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
165 	unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
166 	struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
167 	struct cxl_mem *cxlm = cxlmd->cxlm;
168 
169 	if (!test_bit(cmd, &cmd_mask))
170 		return -ENOTTY;
171 
172 	switch (cmd) {
173 	case ND_CMD_GET_CONFIG_SIZE:
174 		return cxl_pmem_get_config_size(cxlm, buf, buf_len);
175 	case ND_CMD_GET_CONFIG_DATA:
176 		return cxl_pmem_get_config_data(cxlm, buf, buf_len);
177 	case ND_CMD_SET_CONFIG_DATA:
178 		return cxl_pmem_set_config_data(cxlm, buf, buf_len);
179 	default:
180 		return -ENOTTY;
181 	}
182 }
183 
184 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
185 			struct nvdimm *nvdimm, unsigned int cmd, void *buf,
186 			unsigned int buf_len, int *cmd_rc)
187 {
188 	/*
189 	 * No firmware response to translate, let the transport error
190 	 * code take precedence.
191 	 */
192 	*cmd_rc = 0;
193 
194 	if (!nvdimm)
195 		return -ENOTTY;
196 	return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
197 }
198 
199 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
200 {
201 	if (cxl_nvb->nvdimm_bus)
202 		return true;
203 	cxl_nvb->nvdimm_bus =
204 		nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
205 	return cxl_nvb->nvdimm_bus != NULL;
206 }
207 
208 static int cxl_nvdimm_release_driver(struct device *dev, void *data)
209 {
210 	if (!is_cxl_nvdimm(dev))
211 		return 0;
212 	device_release_driver(dev);
213 	return 0;
214 }
215 
216 static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
217 {
218 	if (!nvdimm_bus)
219 		return;
220 
221 	/*
222 	 * Set the state of cxl_nvdimm devices to unbound / idle before
223 	 * nvdimm_bus_unregister() rips the nvdimm objects out from
224 	 * underneath them.
225 	 */
226 	bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
227 	nvdimm_bus_unregister(nvdimm_bus);
228 }
229 
230 static void cxl_nvb_update_state(struct work_struct *work)
231 {
232 	struct cxl_nvdimm_bridge *cxl_nvb =
233 		container_of(work, typeof(*cxl_nvb), state_work);
234 	struct nvdimm_bus *victim_bus = NULL;
235 	bool release = false, rescan = false;
236 
237 	device_lock(&cxl_nvb->dev);
238 	switch (cxl_nvb->state) {
239 	case CXL_NVB_ONLINE:
240 		if (!online_nvdimm_bus(cxl_nvb)) {
241 			dev_err(&cxl_nvb->dev,
242 				"failed to establish nvdimm bus\n");
243 			release = true;
244 		} else
245 			rescan = true;
246 		break;
247 	case CXL_NVB_OFFLINE:
248 	case CXL_NVB_DEAD:
249 		victim_bus = cxl_nvb->nvdimm_bus;
250 		cxl_nvb->nvdimm_bus = NULL;
251 		break;
252 	default:
253 		break;
254 	}
255 	device_unlock(&cxl_nvb->dev);
256 
257 	if (release)
258 		device_release_driver(&cxl_nvb->dev);
259 	if (rescan) {
260 		int rc = bus_rescan_devices(&cxl_bus_type);
261 
262 		dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
263 	}
264 	offline_nvdimm_bus(victim_bus);
265 
266 	put_device(&cxl_nvb->dev);
267 }
268 
269 static void cxl_nvdimm_bridge_remove(struct device *dev)
270 {
271 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
272 
273 	if (cxl_nvb->state == CXL_NVB_ONLINE)
274 		cxl_nvb->state = CXL_NVB_OFFLINE;
275 	if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
276 		get_device(&cxl_nvb->dev);
277 }
278 
279 static int cxl_nvdimm_bridge_probe(struct device *dev)
280 {
281 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
282 
283 	if (cxl_nvb->state == CXL_NVB_DEAD)
284 		return -ENXIO;
285 
286 	if (cxl_nvb->state == CXL_NVB_NEW) {
287 		cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
288 			.provider_name = "CXL",
289 			.module = THIS_MODULE,
290 			.ndctl = cxl_pmem_ctl,
291 		};
292 
293 		INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
294 	}
295 
296 	cxl_nvb->state = CXL_NVB_ONLINE;
297 	if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
298 		get_device(&cxl_nvb->dev);
299 
300 	return 0;
301 }
302 
303 static struct cxl_driver cxl_nvdimm_bridge_driver = {
304 	.name = "cxl_nvdimm_bridge",
305 	.probe = cxl_nvdimm_bridge_probe,
306 	.remove = cxl_nvdimm_bridge_remove,
307 	.id = CXL_DEVICE_NVDIMM_BRIDGE,
308 };
309 
310 static __init int cxl_pmem_init(void)
311 {
312 	int rc;
313 
314 	set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
315 	set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
316 	set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
317 
318 	cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
319 	if (!cxl_pmem_wq)
320 		return -ENXIO;
321 
322 	rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
323 	if (rc)
324 		goto err_bridge;
325 
326 	rc = cxl_driver_register(&cxl_nvdimm_driver);
327 	if (rc)
328 		goto err_nvdimm;
329 
330 	return 0;
331 
332 err_nvdimm:
333 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
334 err_bridge:
335 	destroy_workqueue(cxl_pmem_wq);
336 	return rc;
337 }
338 
339 static __exit void cxl_pmem_exit(void)
340 {
341 	cxl_driver_unregister(&cxl_nvdimm_driver);
342 	cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
343 	destroy_workqueue(cxl_pmem_wq);
344 }
345 
346 MODULE_LICENSE("GPL v2");
347 module_init(cxl_pmem_init);
348 module_exit(cxl_pmem_exit);
349 MODULE_IMPORT_NS(CXL);
350 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
351 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
352