xref: /openbmc/linux/drivers/nvdimm/security.c (revision 75cc1867)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
8 #include <linux/io.h>
9 #include <linux/mm.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
15 #include "nd-core.h"
16 #include "nd.h"
17 
18 #define NVDIMM_BASE_KEY		0
19 #define NVDIMM_NEW_KEY		1
20 
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
24 
25 static const char zero_key[NVDIMM_PASSPHRASE_LEN];
26 
27 static void *key_data(struct key *key)
28 {
29 	struct encrypted_key_payload *epayload = dereference_key_locked(key);
30 
31 	lockdep_assert_held_read(&key->sem);
32 
33 	return epayload->decrypted_data;
34 }
35 
36 static void nvdimm_put_key(struct key *key)
37 {
38 	if (!key)
39 		return;
40 
41 	up_read(&key->sem);
42 	key_put(key);
43 }
44 
45 /*
46  * Retrieve kernel key for DIMM and request from user space if
47  * necessary. Returns a key held for read and must be put by
48  * nvdimm_put_key() before the usage goes out of scope.
49  */
50 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
51 {
52 	struct key *key = NULL;
53 	static const char NVDIMM_PREFIX[] = "nvdimm:";
54 	char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55 	struct device *dev = &nvdimm->dev;
56 
57 	sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58 	key = request_key(&key_type_encrypted, desc, "");
59 	if (IS_ERR(key)) {
60 		if (PTR_ERR(key) == -ENOKEY)
61 			dev_dbg(dev, "request_key() found no key\n");
62 		else
63 			dev_dbg(dev, "request_key() upcall failed\n");
64 		key = NULL;
65 	} else {
66 		struct encrypted_key_payload *epayload;
67 
68 		down_read(&key->sem);
69 		epayload = dereference_key_locked(key);
70 		if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
71 			up_read(&key->sem);
72 			key_put(key);
73 			key = NULL;
74 		}
75 	}
76 
77 	return key;
78 }
79 
80 static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
81 		struct key **key)
82 {
83 	*key = nvdimm_request_key(nvdimm);
84 	if (!*key)
85 		return zero_key;
86 
87 	return key_data(*key);
88 }
89 
90 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91 		key_serial_t id, int subclass)
92 {
93 	key_ref_t keyref;
94 	struct key *key;
95 	struct encrypted_key_payload *epayload;
96 	struct device *dev = &nvdimm->dev;
97 
98 	keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
99 	if (IS_ERR(keyref))
100 		return NULL;
101 
102 	key = key_ref_to_ptr(keyref);
103 	if (key->type != &key_type_encrypted) {
104 		key_put(key);
105 		return NULL;
106 	}
107 
108 	dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
109 
110 	down_read_nested(&key->sem, subclass);
111 	epayload = dereference_key_locked(key);
112 	if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
113 		up_read(&key->sem);
114 		key_put(key);
115 		key = NULL;
116 	}
117 	return key;
118 }
119 
120 static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121 		key_serial_t id, int subclass, struct key **key)
122 {
123 	*key = NULL;
124 	if (id == 0) {
125 		if (subclass == NVDIMM_BASE_KEY)
126 			return zero_key;
127 		else
128 			return NULL;
129 	}
130 
131 	*key = nvdimm_lookup_user_key(nvdimm, id, subclass);
132 	if (!*key)
133 		return NULL;
134 
135 	return key_data(*key);
136 }
137 
138 
139 static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
140 {
141 	struct key *key;
142 	int rc;
143 	const void *data;
144 
145 	if (!nvdimm->sec.ops->change_key)
146 		return -EOPNOTSUPP;
147 
148 	data = nvdimm_get_key_payload(nvdimm, &key);
149 
150 	/*
151 	 * Send the same key to the hardware as new and old key to
152 	 * verify that the key is good.
153 	 */
154 	rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
155 	if (rc < 0) {
156 		nvdimm_put_key(key);
157 		return rc;
158 	}
159 
160 	nvdimm_put_key(key);
161 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
162 	return 0;
163 }
164 
165 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
166 {
167 	struct device *dev = &nvdimm->dev;
168 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
169 	struct key *key;
170 	const void *data;
171 	int rc;
172 
173 	/* The bus lock should be held at the top level of the call stack */
174 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
175 
176 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177 			|| !nvdimm->sec.flags)
178 		return -EIO;
179 
180 	/* cxl_test needs this to pre-populate the security state */
181 	if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
182 		nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
183 
184 	/* No need to go further if security is disabled */
185 	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
186 		return 0;
187 
188 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
189 		dev_dbg(dev, "Security operation in progress.\n");
190 		return -EBUSY;
191 	}
192 
193 	/*
194 	 * If the pre-OS has unlocked the DIMM, attempt to send the key
195 	 * from request_key() to the hardware for verification.  Failure
196 	 * to revalidate the key against the hardware results in a
197 	 * freeze of the security configuration. I.e. if the OS does not
198 	 * have the key, security is being managed pre-OS.
199 	 */
200 	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
201 		if (!key_revalidate)
202 			return 0;
203 
204 		return nvdimm_key_revalidate(nvdimm);
205 	} else
206 		data = nvdimm_get_key_payload(nvdimm, &key);
207 
208 	rc = nvdimm->sec.ops->unlock(nvdimm, data);
209 	dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
210 			rc == 0 ? "success" : "fail");
211 	if (rc == 0)
212 		set_bit(NDD_INCOHERENT, &nvdimm->flags);
213 
214 	nvdimm_put_key(key);
215 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
216 	return rc;
217 }
218 
219 int nvdimm_security_unlock(struct device *dev)
220 {
221 	struct nvdimm *nvdimm = to_nvdimm(dev);
222 	int rc;
223 
224 	nvdimm_bus_lock(dev);
225 	rc = __nvdimm_security_unlock(nvdimm);
226 	nvdimm_bus_unlock(dev);
227 	return rc;
228 }
229 
230 static int check_security_state(struct nvdimm *nvdimm)
231 {
232 	struct device *dev = &nvdimm->dev;
233 
234 	if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
235 		dev_dbg(dev, "Incorrect security state: %#lx\n",
236 				nvdimm->sec.flags);
237 		return -EIO;
238 	}
239 
240 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
241 		dev_dbg(dev, "Security operation in progress.\n");
242 		return -EBUSY;
243 	}
244 
245 	return 0;
246 }
247 
248 static int security_disable(struct nvdimm *nvdimm, unsigned int keyid,
249 			    enum nvdimm_passphrase_type pass_type)
250 {
251 	struct device *dev = &nvdimm->dev;
252 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
253 	struct key *key;
254 	int rc;
255 	const void *data;
256 
257 	/* The bus lock should be held at the top level of the call stack */
258 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
259 
260 	if (!nvdimm->sec.ops || !nvdimm->sec.flags)
261 		return -EOPNOTSUPP;
262 
263 	if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable)
264 		return -EOPNOTSUPP;
265 
266 	if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master)
267 		return -EOPNOTSUPP;
268 
269 	rc = check_security_state(nvdimm);
270 	if (rc)
271 		return rc;
272 
273 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
274 			NVDIMM_BASE_KEY, &key);
275 	if (!data)
276 		return -ENOKEY;
277 
278 	if (pass_type == NVDIMM_MASTER) {
279 		rc = nvdimm->sec.ops->disable_master(nvdimm, data);
280 		dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key),
281 			rc == 0 ? "success" : "fail");
282 	} else {
283 		rc = nvdimm->sec.ops->disable(nvdimm, data);
284 		dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
285 			rc == 0 ? "success" : "fail");
286 	}
287 
288 	nvdimm_put_key(key);
289 	if (pass_type == NVDIMM_MASTER)
290 		nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
291 	else
292 		nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
293 	return rc;
294 }
295 
296 static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
297 		unsigned int new_keyid,
298 		enum nvdimm_passphrase_type pass_type)
299 {
300 	struct device *dev = &nvdimm->dev;
301 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
302 	struct key *key, *newkey;
303 	int rc;
304 	const void *data, *newdata;
305 
306 	/* The bus lock should be held at the top level of the call stack */
307 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
308 
309 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
310 			|| !nvdimm->sec.flags)
311 		return -EOPNOTSUPP;
312 
313 	rc = check_security_state(nvdimm);
314 	if (rc)
315 		return rc;
316 
317 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
318 			NVDIMM_BASE_KEY, &key);
319 	if (!data)
320 		return -ENOKEY;
321 
322 	newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
323 			NVDIMM_NEW_KEY, &newkey);
324 	if (!newdata) {
325 		nvdimm_put_key(key);
326 		return -ENOKEY;
327 	}
328 
329 	rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
330 	dev_dbg(dev, "key: %d %d update%s: %s\n",
331 			key_serial(key), key_serial(newkey),
332 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
333 			rc == 0 ? "success" : "fail");
334 
335 	nvdimm_put_key(newkey);
336 	nvdimm_put_key(key);
337 	if (pass_type == NVDIMM_MASTER)
338 		nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
339 				NVDIMM_MASTER);
340 	else
341 		nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
342 				NVDIMM_USER);
343 	return rc;
344 }
345 
346 static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
347 		enum nvdimm_passphrase_type pass_type)
348 {
349 	struct device *dev = &nvdimm->dev;
350 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
351 	struct key *key = NULL;
352 	int rc;
353 	const void *data;
354 
355 	/* The bus lock should be held at the top level of the call stack */
356 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
357 
358 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
359 			|| !nvdimm->sec.flags)
360 		return -EOPNOTSUPP;
361 
362 	rc = check_security_state(nvdimm);
363 	if (rc)
364 		return rc;
365 
366 	if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
367 			&& pass_type == NVDIMM_MASTER) {
368 		dev_dbg(dev,
369 			"Attempt to secure erase in wrong master state.\n");
370 		return -EOPNOTSUPP;
371 	}
372 
373 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
374 			NVDIMM_BASE_KEY, &key);
375 	if (!data)
376 		return -ENOKEY;
377 
378 	rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
379 	if (rc == 0)
380 		set_bit(NDD_INCOHERENT, &nvdimm->flags);
381 	dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
382 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
383 			rc == 0 ? "success" : "fail");
384 
385 	nvdimm_put_key(key);
386 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
387 	return rc;
388 }
389 
390 static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
391 {
392 	struct device *dev = &nvdimm->dev;
393 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
394 	struct key *key = NULL;
395 	int rc;
396 	const void *data;
397 
398 	/* The bus lock should be held at the top level of the call stack */
399 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
400 
401 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
402 			|| !nvdimm->sec.flags)
403 		return -EOPNOTSUPP;
404 
405 	rc = check_security_state(nvdimm);
406 	if (rc)
407 		return rc;
408 
409 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
410 			NVDIMM_BASE_KEY, &key);
411 	if (!data)
412 		return -ENOKEY;
413 
414 	rc = nvdimm->sec.ops->overwrite(nvdimm, data);
415 	if (rc == 0)
416 		set_bit(NDD_INCOHERENT, &nvdimm->flags);
417 	dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
418 			rc == 0 ? "success" : "fail");
419 
420 	nvdimm_put_key(key);
421 	if (rc == 0) {
422 		set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
423 		set_bit(NDD_WORK_PENDING, &nvdimm->flags);
424 		set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
425 		/*
426 		 * Make sure we don't lose device while doing overwrite
427 		 * query.
428 		 */
429 		get_device(dev);
430 		queue_delayed_work(system_wq, &nvdimm->dwork, 0);
431 	}
432 
433 	return rc;
434 }
435 
436 static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
437 {
438 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
439 	int rc;
440 	unsigned int tmo;
441 
442 	/* The bus lock should be held at the top level of the call stack */
443 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
444 
445 	/*
446 	 * Abort and release device if we no longer have the overwrite
447 	 * flag set. It means the work has been canceled.
448 	 */
449 	if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
450 		return;
451 
452 	tmo = nvdimm->sec.overwrite_tmo;
453 
454 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
455 			|| !nvdimm->sec.flags)
456 		return;
457 
458 	rc = nvdimm->sec.ops->query_overwrite(nvdimm);
459 	if (rc == -EBUSY) {
460 
461 		/* setup delayed work again */
462 		tmo += 10;
463 		queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
464 		nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
465 		return;
466 	}
467 
468 	if (rc < 0)
469 		dev_dbg(&nvdimm->dev, "overwrite failed\n");
470 	else
471 		dev_dbg(&nvdimm->dev, "overwrite completed\n");
472 
473 	/*
474 	 * Mark the overwrite work done and update dimm security flags,
475 	 * then send a sysfs event notification to wake up userspace
476 	 * poll threads to picked up the changed state.
477 	 */
478 	nvdimm->sec.overwrite_tmo = 0;
479 	clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
480 	clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
481 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
482 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
483 	if (nvdimm->sec.overwrite_state)
484 		sysfs_notify_dirent(nvdimm->sec.overwrite_state);
485 	put_device(&nvdimm->dev);
486 }
487 
488 void nvdimm_security_overwrite_query(struct work_struct *work)
489 {
490 	struct nvdimm *nvdimm =
491 		container_of(work, typeof(*nvdimm), dwork.work);
492 
493 	nvdimm_bus_lock(&nvdimm->dev);
494 	__nvdimm_security_overwrite_query(nvdimm);
495 	nvdimm_bus_unlock(&nvdimm->dev);
496 }
497 
498 #define OPS							\
499 	C( OP_FREEZE,		"freeze",		1),	\
500 	C( OP_DISABLE,		"disable",		2),	\
501 	C( OP_DISABLE_MASTER,	"disable_master",	2),	\
502 	C( OP_UPDATE,		"update",		3),	\
503 	C( OP_ERASE,		"erase",		2),	\
504 	C( OP_OVERWRITE,	"overwrite",		2),	\
505 	C( OP_MASTER_UPDATE,	"master_update",	3),	\
506 	C( OP_MASTER_ERASE,	"master_erase",		2)
507 #undef C
508 #define C(a, b, c) a
509 enum nvdimmsec_op_ids { OPS };
510 #undef C
511 #define C(a, b, c) { b, c }
512 static struct {
513 	const char *name;
514 	int args;
515 } ops[] = { OPS };
516 #undef C
517 
518 #define SEC_CMD_SIZE 32
519 #define KEY_ID_SIZE 10
520 
521 ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
522 {
523 	struct nvdimm *nvdimm = to_nvdimm(dev);
524 	ssize_t rc;
525 	char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
526 		nkeystr[KEY_ID_SIZE+1];
527 	unsigned int key, newkey;
528 	int i;
529 
530 	rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
531 			" %"__stringify(KEY_ID_SIZE)"s"
532 			" %"__stringify(KEY_ID_SIZE)"s",
533 			cmd, keystr, nkeystr);
534 	if (rc < 1)
535 		return -EINVAL;
536 	for (i = 0; i < ARRAY_SIZE(ops); i++)
537 		if (sysfs_streq(cmd, ops[i].name))
538 			break;
539 	if (i >= ARRAY_SIZE(ops))
540 		return -EINVAL;
541 	if (ops[i].args > 1)
542 		rc = kstrtouint(keystr, 0, &key);
543 	if (rc >= 0 && ops[i].args > 2)
544 		rc = kstrtouint(nkeystr, 0, &newkey);
545 	if (rc < 0)
546 		return rc;
547 
548 	if (i == OP_FREEZE) {
549 		dev_dbg(dev, "freeze\n");
550 		rc = nvdimm_security_freeze(nvdimm);
551 	} else if (i == OP_DISABLE) {
552 		dev_dbg(dev, "disable %u\n", key);
553 		rc = security_disable(nvdimm, key, NVDIMM_USER);
554 	} else if (i == OP_DISABLE_MASTER) {
555 		dev_dbg(dev, "disable_master %u\n", key);
556 		rc = security_disable(nvdimm, key, NVDIMM_MASTER);
557 	} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
558 		dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
559 		rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
560 				? NVDIMM_USER : NVDIMM_MASTER);
561 	} else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
562 		dev_dbg(dev, "%s %u\n", ops[i].name, key);
563 		if (atomic_read(&nvdimm->busy)) {
564 			dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
565 			return -EBUSY;
566 		}
567 		rc = security_erase(nvdimm, key, i == OP_ERASE
568 				? NVDIMM_USER : NVDIMM_MASTER);
569 	} else if (i == OP_OVERWRITE) {
570 		dev_dbg(dev, "overwrite %u\n", key);
571 		if (atomic_read(&nvdimm->busy)) {
572 			dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
573 			return -EBUSY;
574 		}
575 		rc = security_overwrite(nvdimm, key);
576 	} else
577 		return -EINVAL;
578 
579 	if (rc == 0)
580 		rc = len;
581 	return rc;
582 }
583