xref: /openbmc/linux/drivers/cxl/core/memdev.c (revision f14c1a14)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/firmware.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
8 #include <linux/idr.h>
9 #include <linux/pci.h>
10 #include <cxlmem.h>
11 #include "trace.h"
12 #include "core.h"
13 
14 static DECLARE_RWSEM(cxl_memdev_rwsem);
15 
16 /*
17  * An entire PCI topology full of devices should be enough for any
18  * config
19  */
20 #define CXL_MEM_MAX_DEVS 65536
21 
22 static int cxl_mem_major;
23 static DEFINE_IDA(cxl_memdev_ida);
24 
25 static void cxl_memdev_release(struct device *dev)
26 {
27 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
28 
29 	ida_free(&cxl_memdev_ida, cxlmd->id);
30 	kfree(cxlmd);
31 }
32 
33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
34 				kgid_t *gid)
35 {
36 	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
37 }
38 
39 static ssize_t firmware_version_show(struct device *dev,
40 				     struct device_attribute *attr, char *buf)
41 {
42 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
43 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
44 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
45 
46 	if (!mds)
47 		return sysfs_emit(buf, "\n");
48 	return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
49 }
50 static DEVICE_ATTR_RO(firmware_version);
51 
52 static ssize_t payload_max_show(struct device *dev,
53 				struct device_attribute *attr, char *buf)
54 {
55 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
56 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
57 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
58 
59 	if (!mds)
60 		return sysfs_emit(buf, "\n");
61 	return sysfs_emit(buf, "%zu\n", mds->payload_size);
62 }
63 static DEVICE_ATTR_RO(payload_max);
64 
65 static ssize_t label_storage_size_show(struct device *dev,
66 				       struct device_attribute *attr, char *buf)
67 {
68 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
69 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
70 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
71 
72 	if (!mds)
73 		return sysfs_emit(buf, "\n");
74 	return sysfs_emit(buf, "%zu\n", mds->lsa_size);
75 }
76 static DEVICE_ATTR_RO(label_storage_size);
77 
78 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
79 			     char *buf)
80 {
81 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
82 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
83 	unsigned long long len = resource_size(&cxlds->ram_res);
84 
85 	return sysfs_emit(buf, "%#llx\n", len);
86 }
87 
88 static struct device_attribute dev_attr_ram_size =
89 	__ATTR(size, 0444, ram_size_show, NULL);
90 
91 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
92 			      char *buf)
93 {
94 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
95 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
96 	unsigned long long len = resource_size(&cxlds->pmem_res);
97 
98 	return sysfs_emit(buf, "%#llx\n", len);
99 }
100 
101 static struct device_attribute dev_attr_pmem_size =
102 	__ATTR(size, 0444, pmem_size_show, NULL);
103 
104 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
105 			   char *buf)
106 {
107 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
108 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
109 
110 	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
111 }
112 static DEVICE_ATTR_RO(serial);
113 
114 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
115 			      char *buf)
116 {
117 	return sprintf(buf, "%d\n", dev_to_node(dev));
118 }
119 static DEVICE_ATTR_RO(numa_node);
120 
121 static ssize_t security_state_show(struct device *dev,
122 				   struct device_attribute *attr,
123 				   char *buf)
124 {
125 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
126 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
127 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
128 	u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
129 	u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
130 	u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
131 	unsigned long state = mds->security.state;
132 
133 	if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
134 		return sysfs_emit(buf, "sanitize\n");
135 
136 	if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
137 		return sysfs_emit(buf, "disabled\n");
138 	if (state & CXL_PMEM_SEC_STATE_FROZEN ||
139 	    state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
140 	    state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
141 		return sysfs_emit(buf, "frozen\n");
142 	if (state & CXL_PMEM_SEC_STATE_LOCKED)
143 		return sysfs_emit(buf, "locked\n");
144 	else
145 		return sysfs_emit(buf, "unlocked\n");
146 }
147 static struct device_attribute dev_attr_security_state =
148 	__ATTR(state, 0444, security_state_show, NULL);
149 
150 static ssize_t security_sanitize_store(struct device *dev,
151 				       struct device_attribute *attr,
152 				       const char *buf, size_t len)
153 {
154 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
155 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
156 	struct cxl_port *port = cxlmd->endpoint;
157 	bool sanitize;
158 	ssize_t rc;
159 
160 	if (kstrtobool(buf, &sanitize) || !sanitize)
161 		return -EINVAL;
162 
163 	if (!port || !is_cxl_endpoint(port))
164 		return -EINVAL;
165 
166 	/* ensure no regions are mapped to this memdev */
167 	if (port->commit_end != -1)
168 		return -EBUSY;
169 
170 	rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
171 
172 	return rc ? rc : len;
173 }
174 static struct device_attribute dev_attr_security_sanitize =
175 	__ATTR(sanitize, 0200, NULL, security_sanitize_store);
176 
177 static ssize_t security_erase_store(struct device *dev,
178 				    struct device_attribute *attr,
179 				    const char *buf, size_t len)
180 {
181 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
182 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
183 	struct cxl_port *port = cxlmd->endpoint;
184 	ssize_t rc;
185 	bool erase;
186 
187 	if (kstrtobool(buf, &erase) || !erase)
188 		return -EINVAL;
189 
190 	if (!port || !is_cxl_endpoint(port))
191 		return -EINVAL;
192 
193 	/* ensure no regions are mapped to this memdev */
194 	if (port->commit_end != -1)
195 		return -EBUSY;
196 
197 	rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
198 
199 	return rc ? rc : len;
200 }
201 static struct device_attribute dev_attr_security_erase =
202 	__ATTR(erase, 0200, NULL, security_erase_store);
203 
204 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
205 {
206 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
207 	u64 offset, length;
208 	int rc = 0;
209 
210 	/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
211 	if (resource_size(&cxlds->pmem_res)) {
212 		offset = cxlds->pmem_res.start;
213 		length = resource_size(&cxlds->pmem_res);
214 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
215 		if (rc)
216 			return rc;
217 	}
218 	if (resource_size(&cxlds->ram_res)) {
219 		offset = cxlds->ram_res.start;
220 		length = resource_size(&cxlds->ram_res);
221 		rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
222 		/*
223 		 * Invalid Physical Address is not an error for
224 		 * volatile addresses. Device support is optional.
225 		 */
226 		if (rc == -EFAULT)
227 			rc = 0;
228 	}
229 	return rc;
230 }
231 
232 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
233 {
234 	struct cxl_port *port;
235 	int rc;
236 
237 	port = cxlmd->endpoint;
238 	if (!port || !is_cxl_endpoint(port))
239 		return -EINVAL;
240 
241 	rc = down_read_interruptible(&cxl_dpa_rwsem);
242 	if (rc)
243 		return rc;
244 
245 	if (port->commit_end == -1) {
246 		/* No regions mapped to this memdev */
247 		rc = cxl_get_poison_by_memdev(cxlmd);
248 	} else {
249 		/* Regions mapped, collect poison by endpoint */
250 		rc =  cxl_get_poison_by_endpoint(port);
251 	}
252 	up_read(&cxl_dpa_rwsem);
253 
254 	return rc;
255 }
256 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
257 
258 struct cxl_dpa_to_region_context {
259 	struct cxl_region *cxlr;
260 	u64 dpa;
261 };
262 
263 static int __cxl_dpa_to_region(struct device *dev, void *arg)
264 {
265 	struct cxl_dpa_to_region_context *ctx = arg;
266 	struct cxl_endpoint_decoder *cxled;
267 	u64 dpa = ctx->dpa;
268 
269 	if (!is_endpoint_decoder(dev))
270 		return 0;
271 
272 	cxled = to_cxl_endpoint_decoder(dev);
273 	if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
274 		return 0;
275 
276 	if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
277 		return 0;
278 
279 	dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
280 		dev_name(&cxled->cxld.region->dev));
281 
282 	ctx->cxlr = cxled->cxld.region;
283 
284 	return 1;
285 }
286 
287 static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
288 {
289 	struct cxl_dpa_to_region_context ctx;
290 	struct cxl_port *port;
291 
292 	ctx = (struct cxl_dpa_to_region_context) {
293 		.dpa = dpa,
294 	};
295 	port = cxlmd->endpoint;
296 	if (port && is_cxl_endpoint(port) && port->commit_end != -1)
297 		device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
298 
299 	return ctx.cxlr;
300 }
301 
302 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
303 {
304 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
305 
306 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
307 		return 0;
308 
309 	if (!resource_size(&cxlds->dpa_res)) {
310 		dev_dbg(cxlds->dev, "device has no dpa resource\n");
311 		return -EINVAL;
312 	}
313 	if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
314 		dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
315 			dpa, &cxlds->dpa_res);
316 		return -EINVAL;
317 	}
318 	if (!IS_ALIGNED(dpa, 64)) {
319 		dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
320 		return -EINVAL;
321 	}
322 
323 	return 0;
324 }
325 
326 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
327 {
328 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
329 	struct cxl_mbox_inject_poison inject;
330 	struct cxl_poison_record record;
331 	struct cxl_mbox_cmd mbox_cmd;
332 	struct cxl_region *cxlr;
333 	int rc;
334 
335 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
336 		return 0;
337 
338 	rc = down_read_interruptible(&cxl_dpa_rwsem);
339 	if (rc)
340 		return rc;
341 
342 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
343 	if (rc)
344 		goto out;
345 
346 	inject.address = cpu_to_le64(dpa);
347 	mbox_cmd = (struct cxl_mbox_cmd) {
348 		.opcode = CXL_MBOX_OP_INJECT_POISON,
349 		.size_in = sizeof(inject),
350 		.payload_in = &inject,
351 	};
352 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
353 	if (rc)
354 		goto out;
355 
356 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
357 	if (cxlr)
358 		dev_warn_once(mds->cxlds.dev,
359 			      "poison inject dpa:%#llx region: %s\n", dpa,
360 			      dev_name(&cxlr->dev));
361 
362 	record = (struct cxl_poison_record) {
363 		.address = cpu_to_le64(dpa),
364 		.length = cpu_to_le32(1),
365 	};
366 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
367 out:
368 	up_read(&cxl_dpa_rwsem);
369 
370 	return rc;
371 }
372 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
373 
374 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
375 {
376 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
377 	struct cxl_mbox_clear_poison clear;
378 	struct cxl_poison_record record;
379 	struct cxl_mbox_cmd mbox_cmd;
380 	struct cxl_region *cxlr;
381 	int rc;
382 
383 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
384 		return 0;
385 
386 	rc = down_read_interruptible(&cxl_dpa_rwsem);
387 	if (rc)
388 		return rc;
389 
390 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
391 	if (rc)
392 		goto out;
393 
394 	/*
395 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
396 	 * is defined to accept 64 bytes of write-data, along with the
397 	 * address to clear. This driver uses zeroes as write-data.
398 	 */
399 	clear = (struct cxl_mbox_clear_poison) {
400 		.address = cpu_to_le64(dpa)
401 	};
402 
403 	mbox_cmd = (struct cxl_mbox_cmd) {
404 		.opcode = CXL_MBOX_OP_CLEAR_POISON,
405 		.size_in = sizeof(clear),
406 		.payload_in = &clear,
407 	};
408 
409 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
410 	if (rc)
411 		goto out;
412 
413 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
414 	if (cxlr)
415 		dev_warn_once(mds->cxlds.dev,
416 			      "poison clear dpa:%#llx region: %s\n", dpa,
417 			      dev_name(&cxlr->dev));
418 
419 	record = (struct cxl_poison_record) {
420 		.address = cpu_to_le64(dpa),
421 		.length = cpu_to_le32(1),
422 	};
423 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
424 out:
425 	up_read(&cxl_dpa_rwsem);
426 
427 	return rc;
428 }
429 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
430 
431 static struct attribute *cxl_memdev_attributes[] = {
432 	&dev_attr_serial.attr,
433 	&dev_attr_firmware_version.attr,
434 	&dev_attr_payload_max.attr,
435 	&dev_attr_label_storage_size.attr,
436 	&dev_attr_numa_node.attr,
437 	NULL,
438 };
439 
440 static struct attribute *cxl_memdev_pmem_attributes[] = {
441 	&dev_attr_pmem_size.attr,
442 	NULL,
443 };
444 
445 static struct attribute *cxl_memdev_ram_attributes[] = {
446 	&dev_attr_ram_size.attr,
447 	NULL,
448 };
449 
450 static struct attribute *cxl_memdev_security_attributes[] = {
451 	&dev_attr_security_state.attr,
452 	&dev_attr_security_sanitize.attr,
453 	&dev_attr_security_erase.attr,
454 	NULL,
455 };
456 
457 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
458 				  int n)
459 {
460 	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
461 		return 0;
462 	return a->mode;
463 }
464 
465 static struct attribute_group cxl_memdev_attribute_group = {
466 	.attrs = cxl_memdev_attributes,
467 	.is_visible = cxl_memdev_visible,
468 };
469 
470 static struct attribute_group cxl_memdev_ram_attribute_group = {
471 	.name = "ram",
472 	.attrs = cxl_memdev_ram_attributes,
473 };
474 
475 static struct attribute_group cxl_memdev_pmem_attribute_group = {
476 	.name = "pmem",
477 	.attrs = cxl_memdev_pmem_attributes,
478 };
479 
480 static struct attribute_group cxl_memdev_security_attribute_group = {
481 	.name = "security",
482 	.attrs = cxl_memdev_security_attributes,
483 };
484 
485 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
486 	&cxl_memdev_attribute_group,
487 	&cxl_memdev_ram_attribute_group,
488 	&cxl_memdev_pmem_attribute_group,
489 	&cxl_memdev_security_attribute_group,
490 	NULL,
491 };
492 
493 static const struct device_type cxl_memdev_type = {
494 	.name = "cxl_memdev",
495 	.release = cxl_memdev_release,
496 	.devnode = cxl_memdev_devnode,
497 	.groups = cxl_memdev_attribute_groups,
498 };
499 
500 bool is_cxl_memdev(const struct device *dev)
501 {
502 	return dev->type == &cxl_memdev_type;
503 }
504 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
505 
506 /**
507  * set_exclusive_cxl_commands() - atomically disable user cxl commands
508  * @mds: The device state to operate on
509  * @cmds: bitmap of commands to mark exclusive
510  *
511  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
512  * invocations of the ioctl path and then disable future execution of
513  * commands with the command ids set in @cmds.
514  */
515 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
516 				unsigned long *cmds)
517 {
518 	down_write(&cxl_memdev_rwsem);
519 	bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
520 		  CXL_MEM_COMMAND_ID_MAX);
521 	up_write(&cxl_memdev_rwsem);
522 }
523 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
524 
525 /**
526  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
527  * @mds: The device state to modify
528  * @cmds: bitmap of commands to mark available for userspace
529  */
530 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
531 				  unsigned long *cmds)
532 {
533 	down_write(&cxl_memdev_rwsem);
534 	bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
535 		      CXL_MEM_COMMAND_ID_MAX);
536 	up_write(&cxl_memdev_rwsem);
537 }
538 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
539 
540 static void cxl_memdev_security_shutdown(struct device *dev)
541 {
542 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
543 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
544 
545 	if (mds->security.poll)
546 		cancel_delayed_work_sync(&mds->security.poll_dwork);
547 }
548 
549 static void cxl_memdev_shutdown(struct device *dev)
550 {
551 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
552 
553 	down_write(&cxl_memdev_rwsem);
554 	cxl_memdev_security_shutdown(dev);
555 	cxlmd->cxlds = NULL;
556 	up_write(&cxl_memdev_rwsem);
557 }
558 
559 static void cxl_memdev_unregister(void *_cxlmd)
560 {
561 	struct cxl_memdev *cxlmd = _cxlmd;
562 	struct device *dev = &cxlmd->dev;
563 
564 	cxl_memdev_shutdown(dev);
565 	cdev_device_del(&cxlmd->cdev, dev);
566 	put_device(dev);
567 }
568 
569 static void detach_memdev(struct work_struct *work)
570 {
571 	struct cxl_memdev *cxlmd;
572 
573 	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
574 	device_release_driver(&cxlmd->dev);
575 	put_device(&cxlmd->dev);
576 }
577 
578 static struct lock_class_key cxl_memdev_key;
579 
580 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
581 					   const struct file_operations *fops)
582 {
583 	struct cxl_memdev *cxlmd;
584 	struct device *dev;
585 	struct cdev *cdev;
586 	int rc;
587 
588 	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
589 	if (!cxlmd)
590 		return ERR_PTR(-ENOMEM);
591 
592 	rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
593 	if (rc < 0)
594 		goto err;
595 	cxlmd->id = rc;
596 	cxlmd->depth = -1;
597 
598 	dev = &cxlmd->dev;
599 	device_initialize(dev);
600 	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
601 	dev->parent = cxlds->dev;
602 	dev->bus = &cxl_bus_type;
603 	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
604 	dev->type = &cxl_memdev_type;
605 	device_set_pm_not_required(dev);
606 	INIT_WORK(&cxlmd->detach_work, detach_memdev);
607 
608 	cdev = &cxlmd->cdev;
609 	cdev_init(cdev, fops);
610 	return cxlmd;
611 
612 err:
613 	kfree(cxlmd);
614 	return ERR_PTR(rc);
615 }
616 
617 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
618 			       unsigned long arg)
619 {
620 	switch (cmd) {
621 	case CXL_MEM_QUERY_COMMANDS:
622 		return cxl_query_cmd(cxlmd, (void __user *)arg);
623 	case CXL_MEM_SEND_COMMAND:
624 		return cxl_send_cmd(cxlmd, (void __user *)arg);
625 	default:
626 		return -ENOTTY;
627 	}
628 }
629 
630 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
631 			     unsigned long arg)
632 {
633 	struct cxl_memdev *cxlmd = file->private_data;
634 	struct cxl_dev_state *cxlds;
635 	int rc = -ENXIO;
636 
637 	down_read(&cxl_memdev_rwsem);
638 	cxlds = cxlmd->cxlds;
639 	if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
640 		rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
641 	up_read(&cxl_memdev_rwsem);
642 
643 	return rc;
644 }
645 
646 static int cxl_memdev_open(struct inode *inode, struct file *file)
647 {
648 	struct cxl_memdev *cxlmd =
649 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
650 
651 	get_device(&cxlmd->dev);
652 	file->private_data = cxlmd;
653 
654 	return 0;
655 }
656 
657 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
658 {
659 	struct cxl_memdev *cxlmd =
660 		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
661 
662 	put_device(&cxlmd->dev);
663 
664 	return 0;
665 }
666 
667 /**
668  * cxl_mem_get_fw_info - Get Firmware info
669  * @mds: The device data for the operation
670  *
671  * Retrieve firmware info for the device specified.
672  *
673  * Return: 0 if no error: or the result of the mailbox command.
674  *
675  * See CXL-3.0 8.2.9.3.1 Get FW Info
676  */
677 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
678 {
679 	struct cxl_mbox_get_fw_info info;
680 	struct cxl_mbox_cmd mbox_cmd;
681 	int rc;
682 
683 	mbox_cmd = (struct cxl_mbox_cmd) {
684 		.opcode = CXL_MBOX_OP_GET_FW_INFO,
685 		.size_out = sizeof(info),
686 		.payload_out = &info,
687 	};
688 
689 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
690 	if (rc < 0)
691 		return rc;
692 
693 	mds->fw.num_slots = info.num_slots;
694 	mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
695 				       info.slot_info);
696 
697 	return 0;
698 }
699 
700 /**
701  * cxl_mem_activate_fw - Activate Firmware
702  * @mds: The device data for the operation
703  * @slot: slot number to activate
704  *
705  * Activate firmware in a given slot for the device specified.
706  *
707  * Return: 0 if no error: or the result of the mailbox command.
708  *
709  * See CXL-3.0 8.2.9.3.3 Activate FW
710  */
711 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
712 {
713 	struct cxl_mbox_activate_fw activate;
714 	struct cxl_mbox_cmd mbox_cmd;
715 
716 	if (slot == 0 || slot > mds->fw.num_slots)
717 		return -EINVAL;
718 
719 	mbox_cmd = (struct cxl_mbox_cmd) {
720 		.opcode = CXL_MBOX_OP_ACTIVATE_FW,
721 		.size_in = sizeof(activate),
722 		.payload_in = &activate,
723 	};
724 
725 	/* Only offline activation supported for now */
726 	activate.action = CXL_FW_ACTIVATE_OFFLINE;
727 	activate.slot = slot;
728 
729 	return cxl_internal_send_cmd(mds, &mbox_cmd);
730 }
731 
732 /**
733  * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
734  * @mds: The device data for the operation
735  *
736  * Abort an in-progress firmware transfer for the device specified.
737  *
738  * Return: 0 if no error: or the result of the mailbox command.
739  *
740  * See CXL-3.0 8.2.9.3.2 Transfer FW
741  */
742 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
743 {
744 	struct cxl_mbox_transfer_fw *transfer;
745 	struct cxl_mbox_cmd mbox_cmd;
746 	int rc;
747 
748 	transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
749 	if (!transfer)
750 		return -ENOMEM;
751 
752 	/* Set a 1s poll interval and a total wait time of 30s */
753 	mbox_cmd = (struct cxl_mbox_cmd) {
754 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
755 		.size_in = sizeof(*transfer),
756 		.payload_in = transfer,
757 		.poll_interval_ms = 1000,
758 		.poll_count = 30,
759 	};
760 
761 	transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
762 
763 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
764 	kfree(transfer);
765 	return rc;
766 }
767 
768 static void cxl_fw_cleanup(struct fw_upload *fwl)
769 {
770 	struct cxl_memdev_state *mds = fwl->dd_handle;
771 
772 	mds->fw.next_slot = 0;
773 }
774 
775 static int cxl_fw_do_cancel(struct fw_upload *fwl)
776 {
777 	struct cxl_memdev_state *mds = fwl->dd_handle;
778 	struct cxl_dev_state *cxlds = &mds->cxlds;
779 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
780 	int rc;
781 
782 	rc = cxl_mem_abort_fw_xfer(mds);
783 	if (rc < 0)
784 		dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
785 
786 	return FW_UPLOAD_ERR_CANCELED;
787 }
788 
789 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
790 					 u32 size)
791 {
792 	struct cxl_memdev_state *mds = fwl->dd_handle;
793 	struct cxl_mbox_transfer_fw *transfer;
794 
795 	if (!size)
796 		return FW_UPLOAD_ERR_INVALID_SIZE;
797 
798 	mds->fw.oneshot = struct_size(transfer, data, size) <
799 			    mds->payload_size;
800 
801 	if (cxl_mem_get_fw_info(mds))
802 		return FW_UPLOAD_ERR_HW_ERROR;
803 
804 	/*
805 	 * So far no state has been changed, hence no other cleanup is
806 	 * necessary. Simply return the cancelled status.
807 	 */
808 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
809 		return FW_UPLOAD_ERR_CANCELED;
810 
811 	return FW_UPLOAD_ERR_NONE;
812 }
813 
814 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
815 				       u32 offset, u32 size, u32 *written)
816 {
817 	struct cxl_memdev_state *mds = fwl->dd_handle;
818 	struct cxl_dev_state *cxlds = &mds->cxlds;
819 	struct cxl_memdev *cxlmd = cxlds->cxlmd;
820 	struct cxl_mbox_transfer_fw *transfer;
821 	struct cxl_mbox_cmd mbox_cmd;
822 	u32 cur_size, remaining;
823 	size_t size_in;
824 	int rc;
825 
826 	*written = 0;
827 
828 	/* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
829 	if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
830 		dev_err(&cxlmd->dev,
831 			"misaligned offset for FW transfer slice (%u)\n",
832 			offset);
833 		return FW_UPLOAD_ERR_RW_ERROR;
834 	}
835 
836 	/*
837 	 * Pick transfer size based on mds->payload_size @size must bw 128-byte
838 	 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
839 	 * sizeof(*transfer) is 128.  These constraints imply that @cur_size
840 	 * will always be 128b aligned.
841 	 */
842 	cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
843 
844 	remaining = size - cur_size;
845 	size_in = struct_size(transfer, data, cur_size);
846 
847 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
848 		return cxl_fw_do_cancel(fwl);
849 
850 	/*
851 	 * Slot numbers are 1-indexed
852 	 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
853 	 * Check for rollover using modulo, and 1-index it by adding 1
854 	 */
855 	mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
856 
857 	/* Do the transfer via mailbox cmd */
858 	transfer = kzalloc(size_in, GFP_KERNEL);
859 	if (!transfer)
860 		return FW_UPLOAD_ERR_RW_ERROR;
861 
862 	transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
863 	memcpy(transfer->data, data + offset, cur_size);
864 	if (mds->fw.oneshot) {
865 		transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
866 		transfer->slot = mds->fw.next_slot;
867 	} else {
868 		if (offset == 0) {
869 			transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
870 		} else if (remaining == 0) {
871 			transfer->action = CXL_FW_TRANSFER_ACTION_END;
872 			transfer->slot = mds->fw.next_slot;
873 		} else {
874 			transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
875 		}
876 	}
877 
878 	mbox_cmd = (struct cxl_mbox_cmd) {
879 		.opcode = CXL_MBOX_OP_TRANSFER_FW,
880 		.size_in = size_in,
881 		.payload_in = transfer,
882 		.poll_interval_ms = 1000,
883 		.poll_count = 30,
884 	};
885 
886 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
887 	if (rc < 0) {
888 		rc = FW_UPLOAD_ERR_RW_ERROR;
889 		goto out_free;
890 	}
891 
892 	*written = cur_size;
893 
894 	/* Activate FW if oneshot or if the last slice was written */
895 	if (mds->fw.oneshot || remaining == 0) {
896 		dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
897 			mds->fw.next_slot);
898 		rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
899 		if (rc < 0) {
900 			dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
901 				rc);
902 			rc = FW_UPLOAD_ERR_HW_ERROR;
903 			goto out_free;
904 		}
905 	}
906 
907 	rc = FW_UPLOAD_ERR_NONE;
908 
909 out_free:
910 	kfree(transfer);
911 	return rc;
912 }
913 
914 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
915 {
916 	struct cxl_memdev_state *mds = fwl->dd_handle;
917 
918 	/*
919 	 * cxl_internal_send_cmd() handles background operations synchronously.
920 	 * No need to wait for completions here - any errors would've been
921 	 * reported and handled during the ->write() call(s).
922 	 * Just check if a cancel request was received, and return success.
923 	 */
924 	if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
925 		return cxl_fw_do_cancel(fwl);
926 
927 	return FW_UPLOAD_ERR_NONE;
928 }
929 
930 static void cxl_fw_cancel(struct fw_upload *fwl)
931 {
932 	struct cxl_memdev_state *mds = fwl->dd_handle;
933 
934 	set_bit(CXL_FW_CANCEL, mds->fw.state);
935 }
936 
937 static const struct fw_upload_ops cxl_memdev_fw_ops = {
938         .prepare = cxl_fw_prepare,
939         .write = cxl_fw_write,
940         .poll_complete = cxl_fw_poll_complete,
941         .cancel = cxl_fw_cancel,
942         .cleanup = cxl_fw_cleanup,
943 };
944 
945 static void devm_cxl_remove_fw_upload(void *fwl)
946 {
947 	firmware_upload_unregister(fwl);
948 }
949 
950 int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
951 {
952 	struct cxl_dev_state *cxlds = &mds->cxlds;
953 	struct device *dev = &cxlds->cxlmd->dev;
954 	struct fw_upload *fwl;
955 	int rc;
956 
957 	if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
958 		return 0;
959 
960 	fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
961 				       &cxl_memdev_fw_ops, mds);
962 	if (IS_ERR(fwl))
963 		return dev_err_probe(dev, PTR_ERR(fwl),
964 				     "Failed to register firmware loader\n");
965 
966 	rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
967 				      fwl);
968 	if (rc)
969 		dev_err(dev,
970 			"Failed to add firmware loader remove action: %d\n",
971 			rc);
972 
973 	return rc;
974 }
975 EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
976 
977 static const struct file_operations cxl_memdev_fops = {
978 	.owner = THIS_MODULE,
979 	.unlocked_ioctl = cxl_memdev_ioctl,
980 	.open = cxl_memdev_open,
981 	.release = cxl_memdev_release_file,
982 	.compat_ioctl = compat_ptr_ioctl,
983 	.llseek = noop_llseek,
984 };
985 
986 static void put_sanitize(void *data)
987 {
988 	struct cxl_memdev_state *mds = data;
989 
990 	sysfs_put(mds->security.sanitize_node);
991 }
992 
993 static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
994 {
995 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
996 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
997 	struct device *dev = &cxlmd->dev;
998 	struct kernfs_node *sec;
999 
1000 	sec = sysfs_get_dirent(dev->kobj.sd, "security");
1001 	if (!sec) {
1002 		dev_err(dev, "sysfs_get_dirent 'security' failed\n");
1003 		return -ENODEV;
1004 	}
1005 	mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1006 	sysfs_put(sec);
1007 	if (!mds->security.sanitize_node) {
1008 		dev_err(dev, "sysfs_get_dirent 'state' failed\n");
1009 		return -ENODEV;
1010 	}
1011 
1012 	return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
1013  }
1014 
1015 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
1016 {
1017 	struct cxl_memdev *cxlmd;
1018 	struct device *dev;
1019 	struct cdev *cdev;
1020 	int rc;
1021 
1022 	cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
1023 	if (IS_ERR(cxlmd))
1024 		return cxlmd;
1025 
1026 	dev = &cxlmd->dev;
1027 	rc = dev_set_name(dev, "mem%d", cxlmd->id);
1028 	if (rc)
1029 		goto err;
1030 
1031 	/*
1032 	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
1033 	 * needed as this is ordered with cdev_add() publishing the device.
1034 	 */
1035 	cxlmd->cxlds = cxlds;
1036 	cxlds->cxlmd = cxlmd;
1037 
1038 	cdev = &cxlmd->cdev;
1039 	rc = cdev_device_add(cdev, dev);
1040 	if (rc)
1041 		goto err;
1042 
1043 	rc = cxl_memdev_security_init(cxlmd);
1044 	if (rc)
1045 		goto err;
1046 
1047 	rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
1048 	if (rc)
1049 		return ERR_PTR(rc);
1050 	return cxlmd;
1051 
1052 err:
1053 	/*
1054 	 * The cdev was briefly live, shutdown any ioctl operations that
1055 	 * saw that state.
1056 	 */
1057 	cxl_memdev_shutdown(dev);
1058 	put_device(dev);
1059 	return ERR_PTR(rc);
1060 }
1061 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
1062 
1063 __init int cxl_memdev_init(void)
1064 {
1065 	dev_t devt;
1066 	int rc;
1067 
1068 	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1069 	if (rc)
1070 		return rc;
1071 
1072 	cxl_mem_major = MAJOR(devt);
1073 
1074 	return 0;
1075 }
1076 
1077 void cxl_memdev_exit(void)
1078 {
1079 	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1080 }
1081