xref: /openbmc/linux/drivers/cxl/core/memdev.c (revision 48dcdbb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/device.h>
6 #include <linux/slab.h>
7 #include <linux/idr.h>
8 #include <linux/pci.h>
9 #include <cxlmem.h>
10 #include "trace.h"
11 #include "core.h"
12 
13 static DECLARE_RWSEM(cxl_memdev_rwsem);
14 
15 /*
16  * An entire PCI topology full of devices should be enough for any
17  * config
18  */
19 #define CXL_MEM_MAX_DEVS 65536
20 
21 static int cxl_mem_major;
22 static DEFINE_IDA(cxl_memdev_ida);
23 
24 static void cxl_memdev_release(struct device *dev)
25 {
26 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
27 
28 	ida_free(&cxl_memdev_ida, cxlmd->id);
29 	kfree(cxlmd);
30 }
31 
32 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
33 				kgid_t *gid)
34 {
35 	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
36 }
37 
38 static ssize_t firmware_version_show(struct device *dev,
39 				     struct device_attribute *attr, char *buf)
40 {
41 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
42 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
43 
44 	return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
45 }
46 static DEVICE_ATTR_RO(firmware_version);
47 
48 static ssize_t payload_max_show(struct device *dev,
49 				struct device_attribute *attr, char *buf)
50 {
51 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
52 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
53 
54 	return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
55 }
56 static DEVICE_ATTR_RO(payload_max);
57 
58 static ssize_t label_storage_size_show(struct device *dev,
59 				       struct device_attribute *attr, char *buf)
60 {
61 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
62 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
63 
64 	return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
65 }
66 static DEVICE_ATTR_RO(label_storage_size);
67 
68 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
69 			     char *buf)
70 {
71 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
72 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
73 	unsigned long long len = resource_size(&cxlds->ram_res);
74 
75 	return sysfs_emit(buf, "%#llx\n", len);
76 }
77 
78 static struct device_attribute dev_attr_ram_size =
79 	__ATTR(size, 0444, ram_size_show, NULL);
80 
81 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
82 			      char *buf)
83 {
84 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
85 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
86 	unsigned long long len = resource_size(&cxlds->pmem_res);
87 
88 	return sysfs_emit(buf, "%#llx\n", len);
89 }
90 
91 static struct device_attribute dev_attr_pmem_size =
92 	__ATTR(size, 0444, pmem_size_show, NULL);
93 
94 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
95 			   char *buf)
96 {
97 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
98 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
99 
100 	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
101 }
102 static DEVICE_ATTR_RO(serial);
103 
104 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
105 			      char *buf)
106 {
107 	return sprintf(buf, "%d\n", dev_to_node(dev));
108 }
109 static DEVICE_ATTR_RO(numa_node);
110 
111 static ssize_t security_state_show(struct device *dev,
112 				   struct device_attribute *attr,
113 				   char *buf)
114 {
115 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
116 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
117 	unsigned long state = cxlds->security.state;
118 	u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
119 	u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
120 	u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
121 
122 	if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
123 		return sysfs_emit(buf, "sanitize\n");
124 
125 	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
126 		return sysfs_emit(buf, "disabled\n");
127 	if (state & CXL_PMEM_SEC_STATE_FROZEN ||
128 	    state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
129 	    state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
130 		return sysfs_emit(buf, "frozen\n");
131 	if (state & CXL_PMEM_SEC_STATE_LOCKED)
132 		return sysfs_emit(buf, "locked\n");
133 	else
134 		return sysfs_emit(buf, "unlocked\n");
135 }
136 static struct device_attribute dev_attr_security_state =
137 	__ATTR(state, 0444, security_state_show, NULL);
138 
139 static ssize_t security_sanitize_store(struct device *dev,
140 				       struct device_attribute *attr,
141 				       const char *buf, size_t len)
142 {
143 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
144 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
145 	struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
146 	ssize_t rc;
147 	bool sanitize;
148 
149 	if (kstrtobool(buf, &sanitize) || !sanitize)
150 		return -EINVAL;
151 
152 	if (!port || !is_cxl_endpoint(port))
153 		return -EINVAL;
154 
155 	/* ensure no regions are mapped to this memdev */
156 	if (port->commit_end != -1)
157 		return -EBUSY;
158 
159 	rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE);
160 
161 	return rc ? rc : len;
162 }
163 static struct device_attribute dev_attr_security_sanitize =
164 	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
165 
166 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
167 {
168 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
169 	u64 offset, length;
170 	int rc = 0;
171 
172 	/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
173 	if (resource_size(&cxlds->pmem_res)) {
174 		offset = cxlds->pmem_res.start;
175 		length = resource_size(&cxlds->pmem_res);
176 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
177 		if (rc)
178 			return rc;
179 	}
180 	if (resource_size(&cxlds->ram_res)) {
181 		offset = cxlds->ram_res.start;
182 		length = resource_size(&cxlds->ram_res);
183 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
184 		/*
185 		 * Invalid Physical Address is not an error for
186 		 * volatile addresses. Device support is optional.
187 		 */
188 		if (rc == -EFAULT)
189 			rc = 0;
190 	}
191 	return rc;
192 }
193 
194 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
195 {
196 	struct cxl_port *port;
197 	int rc;
198 
199 	port = dev_get_drvdata(&cxlmd->dev);
200 	if (!port || !is_cxl_endpoint(port))
201 		return -EINVAL;
202 
203 	rc = down_read_interruptible(&cxl_dpa_rwsem);
204 	if (rc)
205 		return rc;
206 
207 	if (port->commit_end == -1) {
208 		/* No regions mapped to this memdev */
209 		rc = cxl_get_poison_by_memdev(cxlmd);
210 	} else {
211 		/* Regions mapped, collect poison by endpoint */
212 		rc =  cxl_get_poison_by_endpoint(port);
213 	}
214 	up_read(&cxl_dpa_rwsem);
215 
216 	return rc;
217 }
218 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
219 
220 struct cxl_dpa_to_region_context {
221 	struct cxl_region *cxlr;
222 	u64 dpa;
223 };
224 
225 static int __cxl_dpa_to_region(struct device *dev, void *arg)
226 {
227 	struct cxl_dpa_to_region_context *ctx = arg;
228 	struct cxl_endpoint_decoder *cxled;
229 	u64 dpa = ctx->dpa;
230 
231 	if (!is_endpoint_decoder(dev))
232 		return 0;
233 
234 	cxled = to_cxl_endpoint_decoder(dev);
235 	if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
236 		return 0;
237 
238 	if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
239 		return 0;
240 
241 	dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
242 		dev_name(&cxled->cxld.region->dev));
243 
244 	ctx->cxlr = cxled->cxld.region;
245 
246 	return 1;
247 }
248 
249 static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
250 {
251 	struct cxl_dpa_to_region_context ctx;
252 	struct cxl_port *port;
253 
254 	ctx = (struct cxl_dpa_to_region_context) {
255 		.dpa = dpa,
256 	};
257 	port = dev_get_drvdata(&cxlmd->dev);
258 	if (port && is_cxl_endpoint(port) && port->commit_end != -1)
259 		device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
260 
261 	return ctx.cxlr;
262 }
263 
264 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
265 {
266 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
267 
268 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
269 		return 0;
270 
271 	if (!resource_size(&cxlds->dpa_res)) {
272 		dev_dbg(cxlds->dev, "device has no dpa resource\n");
273 		return -EINVAL;
274 	}
275 	if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
276 		dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
277 			dpa, &cxlds->dpa_res);
278 		return -EINVAL;
279 	}
280 	if (!IS_ALIGNED(dpa, 64)) {
281 		dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
282 		return -EINVAL;
283 	}
284 
285 	return 0;
286 }
287 
288 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
289 {
290 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
291 	struct cxl_mbox_inject_poison inject;
292 	struct cxl_poison_record record;
293 	struct cxl_mbox_cmd mbox_cmd;
294 	struct cxl_region *cxlr;
295 	int rc;
296 
297 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
298 		return 0;
299 
300 	rc = down_read_interruptible(&cxl_dpa_rwsem);
301 	if (rc)
302 		return rc;
303 
304 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
305 	if (rc)
306 		goto out;
307 
308 	inject.address = cpu_to_le64(dpa);
309 	mbox_cmd = (struct cxl_mbox_cmd) {
310 		.opcode = CXL_MBOX_OP_INJECT_POISON,
311 		.size_in = sizeof(inject),
312 		.payload_in = &inject,
313 	};
314 	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
315 	if (rc)
316 		goto out;
317 
318 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
319 	if (cxlr)
320 		dev_warn_once(cxlds->dev,
321 			      "poison inject dpa:%#llx region: %s\n", dpa,
322 			      dev_name(&cxlr->dev));
323 
324 	record = (struct cxl_poison_record) {
325 		.address = cpu_to_le64(dpa),
326 		.length = cpu_to_le32(1),
327 	};
328 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
329 out:
330 	up_read(&cxl_dpa_rwsem);
331 
332 	return rc;
333 }
334 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
335 
336 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
337 {
338 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
339 	struct cxl_mbox_clear_poison clear;
340 	struct cxl_poison_record record;
341 	struct cxl_mbox_cmd mbox_cmd;
342 	struct cxl_region *cxlr;
343 	int rc;
344 
345 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
346 		return 0;
347 
348 	rc = down_read_interruptible(&cxl_dpa_rwsem);
349 	if (rc)
350 		return rc;
351 
352 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
353 	if (rc)
354 		goto out;
355 
356 	/*
357 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
358 	 * is defined to accept 64 bytes of write-data, along with the
359 	 * address to clear. This driver uses zeroes as write-data.
360 	 */
361 	clear = (struct cxl_mbox_clear_poison) {
362 		.address = cpu_to_le64(dpa)
363 	};
364 
365 	mbox_cmd = (struct cxl_mbox_cmd) {
366 		.opcode = CXL_MBOX_OP_CLEAR_POISON,
367 		.size_in = sizeof(clear),
368 		.payload_in = &clear,
369 	};
370 
371 	rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
372 	if (rc)
373 		goto out;
374 
375 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
376 	if (cxlr)
377 		dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n",
378 			      dpa, dev_name(&cxlr->dev));
379 
380 	record = (struct cxl_poison_record) {
381 		.address = cpu_to_le64(dpa),
382 		.length = cpu_to_le32(1),
383 	};
384 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
385 out:
386 	up_read(&cxl_dpa_rwsem);
387 
388 	return rc;
389 }
390 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
391 
392 static struct attribute *cxl_memdev_attributes[] = {
393 	&dev_attr_serial.attr,
394 	&dev_attr_firmware_version.attr,
395 	&dev_attr_payload_max.attr,
396 	&dev_attr_label_storage_size.attr,
397 	&dev_attr_numa_node.attr,
398 	NULL,
399 };
400 
401 static struct attribute *cxl_memdev_pmem_attributes[] = {
402 	&dev_attr_pmem_size.attr,
403 	NULL,
404 };
405 
406 static struct attribute *cxl_memdev_ram_attributes[] = {
407 	&dev_attr_ram_size.attr,
408 	NULL,
409 };
410 
411 static struct attribute *cxl_memdev_security_attributes[] = {
412 	&dev_attr_security_state.attr,
413 	&dev_attr_security_sanitize.attr,
414 	NULL,
415 };
416 
417 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
418 				  int n)
419 {
420 	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
421 		return 0;
422 	return a->mode;
423 }
424 
425 static struct attribute_group cxl_memdev_attribute_group = {
426 	.attrs = cxl_memdev_attributes,
427 	.is_visible = cxl_memdev_visible,
428 };
429 
430 static struct attribute_group cxl_memdev_ram_attribute_group = {
431 	.name = "ram",
432 	.attrs = cxl_memdev_ram_attributes,
433 };
434 
435 static struct attribute_group cxl_memdev_pmem_attribute_group = {
436 	.name = "pmem",
437 	.attrs = cxl_memdev_pmem_attributes,
438 };
439 
440 static struct attribute_group cxl_memdev_security_attribute_group = {
441 	.name = "security",
442 	.attrs = cxl_memdev_security_attributes,
443 };
444 
445 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
446 	&cxl_memdev_attribute_group,
447 	&cxl_memdev_ram_attribute_group,
448 	&cxl_memdev_pmem_attribute_group,
449 	&cxl_memdev_security_attribute_group,
450 	NULL,
451 };
452 
453 static const struct device_type cxl_memdev_type = {
454 	.name = "cxl_memdev",
455 	.release = cxl_memdev_release,
456 	.devnode = cxl_memdev_devnode,
457 	.groups = cxl_memdev_attribute_groups,
458 };
459 
460 bool is_cxl_memdev(const struct device *dev)
461 {
462 	return dev->type == &cxl_memdev_type;
463 }
464 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
465 
466 /**
467  * set_exclusive_cxl_commands() - atomically disable user cxl commands
468  * @cxlds: The device state to operate on
469  * @cmds: bitmap of commands to mark exclusive
470  *
471  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
472  * invocations of the ioctl path and then disable future execution of
473  * commands with the command ids set in @cmds.
474  */
475 void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
476 {
477 	down_write(&cxl_memdev_rwsem);
478 	bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
479 		  CXL_MEM_COMMAND_ID_MAX);
480 	up_write(&cxl_memdev_rwsem);
481 }
482 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
483 
484 /**
485  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
486  * @cxlds: The device state to modify
487  * @cmds: bitmap of commands to mark available for userspace
488  */
489 void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
490 {
491 	down_write(&cxl_memdev_rwsem);
492 	bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
493 		      CXL_MEM_COMMAND_ID_MAX);
494 	up_write(&cxl_memdev_rwsem);
495 }
496 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
497 
498 static void cxl_memdev_security_shutdown(struct device *dev)
499 {
500 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
501 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
502 
503 	if (cxlds->security.poll)
504 		cancel_delayed_work_sync(&cxlds->security.poll_dwork);
505 }
506 
507 static void cxl_memdev_shutdown(struct device *dev)
508 {
509 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
510 
511 	down_write(&cxl_memdev_rwsem);
512 	cxl_memdev_security_shutdown(dev);
513 	cxlmd->cxlds = NULL;
514 	up_write(&cxl_memdev_rwsem);
515 }
516 
517 static void cxl_memdev_unregister(void *_cxlmd)
518 {
519 	struct cxl_memdev *cxlmd = _cxlmd;
520 	struct device *dev = &cxlmd->dev;
521 
522 	cxl_memdev_shutdown(dev);
523 	cdev_device_del(&cxlmd->cdev, dev);
524 	put_device(dev);
525 }
526 
527 static void detach_memdev(struct work_struct *work)
528 {
529 	struct cxl_memdev *cxlmd;
530 
531 	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
532 	device_release_driver(&cxlmd->dev);
533 	put_device(&cxlmd->dev);
534 }
535 
536 static struct lock_class_key cxl_memdev_key;
537 
538 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
539 					   const struct file_operations *fops)
540 {
541 	struct cxl_memdev *cxlmd;
542 	struct device *dev;
543 	struct cdev *cdev;
544 	int rc;
545 
546 	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
547 	if (!cxlmd)
548 		return ERR_PTR(-ENOMEM);
549 
550 	rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
551 	if (rc < 0)
552 		goto err;
553 	cxlmd->id = rc;
554 	cxlmd->depth = -1;
555 
556 	dev = &cxlmd->dev;
557 	device_initialize(dev);
558 	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
559 	dev->parent = cxlds->dev;
560 	dev->bus = &cxl_bus_type;
561 	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
562 	dev->type = &cxl_memdev_type;
563 	device_set_pm_not_required(dev);
564 	INIT_WORK(&cxlmd->detach_work, detach_memdev);
565 
566 	cdev = &cxlmd->cdev;
567 	cdev_init(cdev, fops);
568 	return cxlmd;
569 
570 err:
571 	kfree(cxlmd);
572 	return ERR_PTR(rc);
573 }
574 
575 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
576 			       unsigned long arg)
577 {
578 	switch (cmd) {
579 	case CXL_MEM_QUERY_COMMANDS:
580 		return cxl_query_cmd(cxlmd, (void __user *)arg);
581 	case CXL_MEM_SEND_COMMAND:
582 		return cxl_send_cmd(cxlmd, (void __user *)arg);
583 	default:
584 		return -ENOTTY;
585 	}
586 }
587 
588 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
589 			     unsigned long arg)
590 {
591 	struct cxl_memdev *cxlmd = file->private_data;
592 	int rc = -ENXIO;
593 
594 	down_read(&cxl_memdev_rwsem);
595 	if (cxlmd->cxlds)
596 		rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
597 	up_read(&cxl_memdev_rwsem);
598 
599 	return rc;
600 }
601 
602 static int cxl_memdev_open(struct inode *inode, struct file *file)
603 {
604 	struct cxl_memdev *cxlmd =
605 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
606 
607 	get_device(&cxlmd->dev);
608 	file->private_data = cxlmd;
609 
610 	return 0;
611 }
612 
613 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
614 {
615 	struct cxl_memdev *cxlmd =
616 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
617 
618 	put_device(&cxlmd->dev);
619 
620 	return 0;
621 }
622 
623 static const struct file_operations cxl_memdev_fops = {
624 	.owner = THIS_MODULE,
625 	.unlocked_ioctl = cxl_memdev_ioctl,
626 	.open = cxl_memdev_open,
627 	.release = cxl_memdev_release_file,
628 	.compat_ioctl = compat_ptr_ioctl,
629 	.llseek = noop_llseek,
630 };
631 
632 static void put_sanitize(void *data)
633 {
634 	struct cxl_dev_state *cxlds = data;
635 
636 	sysfs_put(cxlds->security.sanitize_node);
637 }
638 
639 static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
640 {
641 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
642 	struct device *dev = &cxlmd->dev;
643 	struct kernfs_node *sec;
644 
645 	sec = sysfs_get_dirent(dev->kobj.sd, "security");
646 	if (!sec) {
647 		dev_err(dev, "sysfs_get_dirent 'security' failed\n");
648 		return -ENODEV;
649 	}
650 	cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state");
651 	sysfs_put(sec);
652 	if (!cxlds->security.sanitize_node) {
653 		dev_err(dev, "sysfs_get_dirent 'state' failed\n");
654 		return -ENODEV;
655 	}
656 
657 	return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds);
658  }
659 
660 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
661 {
662 	struct cxl_memdev *cxlmd;
663 	struct device *dev;
664 	struct cdev *cdev;
665 	int rc;
666 
667 	cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
668 	if (IS_ERR(cxlmd))
669 		return cxlmd;
670 
671 	dev = &cxlmd->dev;
672 	rc = dev_set_name(dev, "mem%d", cxlmd->id);
673 	if (rc)
674 		goto err;
675 
676 	/*
677 	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
678 	 * needed as this is ordered with cdev_add() publishing the device.
679 	 */
680 	cxlmd->cxlds = cxlds;
681 	cxlds->cxlmd = cxlmd;
682 
683 	cdev = &cxlmd->cdev;
684 	rc = cdev_device_add(cdev, dev);
685 	if (rc)
686 		goto err;
687 
688 	rc = cxl_memdev_security_init(cxlmd);
689 	if (rc)
690 		goto err;
691 
692 	rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
693 	if (rc)
694 		return ERR_PTR(rc);
695 	return cxlmd;
696 
697 err:
698 	/*
699 	 * The cdev was briefly live, shutdown any ioctl operations that
700 	 * saw that state.
701 	 */
702 	cxl_memdev_shutdown(dev);
703 	put_device(dev);
704 	return ERR_PTR(rc);
705 }
706 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
707 
708 __init int cxl_memdev_init(void)
709 {
710 	dev_t devt;
711 	int rc;
712 
713 	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
714 	if (rc)
715 		return rc;
716 
717 	cxl_mem_major = MAJOR(devt);
718 
719 	return 0;
720 }
721 
722 void cxl_memdev_exit(void)
723 {
724 	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
725 }
726