xref: /openbmc/linux/drivers/nvdimm/security.c (revision ccc319dc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
8 #include <linux/io.h>
9 #include <linux/mm.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
15 #include "nd-core.h"
16 #include "nd.h"
17 
18 #define NVDIMM_BASE_KEY		0
19 #define NVDIMM_NEW_KEY		1
20 
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
24 
25 static const char zero_key[NVDIMM_PASSPHRASE_LEN];
26 
27 static void *key_data(struct key *key)
28 {
29 	struct encrypted_key_payload *epayload = dereference_key_locked(key);
30 
31 	lockdep_assert_held_read(&key->sem);
32 
33 	return epayload->decrypted_data;
34 }
35 
36 static void nvdimm_put_key(struct key *key)
37 {
38 	if (!key)
39 		return;
40 
41 	up_read(&key->sem);
42 	key_put(key);
43 }
44 
45 /*
46  * Retrieve kernel key for DIMM and request from user space if
47  * necessary. Returns a key held for read and must be put by
48  * nvdimm_put_key() before the usage goes out of scope.
49  */
50 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
51 {
52 	struct key *key = NULL;
53 	static const char NVDIMM_PREFIX[] = "nvdimm:";
54 	char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55 	struct device *dev = &nvdimm->dev;
56 
57 	sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58 	key = request_key(&key_type_encrypted, desc, "");
59 	if (IS_ERR(key)) {
60 		if (PTR_ERR(key) == -ENOKEY)
61 			dev_dbg(dev, "request_key() found no key\n");
62 		else
63 			dev_dbg(dev, "request_key() upcall failed\n");
64 		key = NULL;
65 	} else {
66 		struct encrypted_key_payload *epayload;
67 
68 		down_read(&key->sem);
69 		epayload = dereference_key_locked(key);
70 		if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
71 			up_read(&key->sem);
72 			key_put(key);
73 			key = NULL;
74 		}
75 	}
76 
77 	return key;
78 }
79 
80 static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
81 		struct key **key)
82 {
83 	*key = nvdimm_request_key(nvdimm);
84 	if (!*key)
85 		return zero_key;
86 
87 	return key_data(*key);
88 }
89 
90 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91 		key_serial_t id, int subclass)
92 {
93 	key_ref_t keyref;
94 	struct key *key;
95 	struct encrypted_key_payload *epayload;
96 	struct device *dev = &nvdimm->dev;
97 
98 	keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
99 	if (IS_ERR(keyref))
100 		return NULL;
101 
102 	key = key_ref_to_ptr(keyref);
103 	if (key->type != &key_type_encrypted) {
104 		key_put(key);
105 		return NULL;
106 	}
107 
108 	dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
109 
110 	down_read_nested(&key->sem, subclass);
111 	epayload = dereference_key_locked(key);
112 	if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
113 		up_read(&key->sem);
114 		key_put(key);
115 		key = NULL;
116 	}
117 	return key;
118 }
119 
120 static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121 		key_serial_t id, int subclass, struct key **key)
122 {
123 	*key = NULL;
124 	if (id == 0) {
125 		if (subclass == NVDIMM_BASE_KEY)
126 			return zero_key;
127 		else
128 			return NULL;
129 	}
130 
131 	*key = nvdimm_lookup_user_key(nvdimm, id, subclass);
132 	if (!*key)
133 		return NULL;
134 
135 	return key_data(*key);
136 }
137 
138 
139 static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
140 {
141 	struct key *key;
142 	int rc;
143 	const void *data;
144 
145 	if (!nvdimm->sec.ops->change_key)
146 		return -EOPNOTSUPP;
147 
148 	data = nvdimm_get_key_payload(nvdimm, &key);
149 
150 	/*
151 	 * Send the same key to the hardware as new and old key to
152 	 * verify that the key is good.
153 	 */
154 	rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
155 	if (rc < 0) {
156 		nvdimm_put_key(key);
157 		return rc;
158 	}
159 
160 	nvdimm_put_key(key);
161 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
162 	return 0;
163 }
164 
165 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
166 {
167 	struct device *dev = &nvdimm->dev;
168 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
169 	struct key *key;
170 	const void *data;
171 	int rc;
172 
173 	/* The bus lock should be held at the top level of the call stack */
174 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
175 
176 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177 			|| !nvdimm->sec.flags)
178 		return -EIO;
179 
180 	/* No need to go further if security is disabled */
181 	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
182 		return 0;
183 
184 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
185 		dev_dbg(dev, "Security operation in progress.\n");
186 		return -EBUSY;
187 	}
188 
189 	/*
190 	 * If the pre-OS has unlocked the DIMM, attempt to send the key
191 	 * from request_key() to the hardware for verification.  Failure
192 	 * to revalidate the key against the hardware results in a
193 	 * freeze of the security configuration. I.e. if the OS does not
194 	 * have the key, security is being managed pre-OS.
195 	 */
196 	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
197 		if (!key_revalidate)
198 			return 0;
199 
200 		return nvdimm_key_revalidate(nvdimm);
201 	} else
202 		data = nvdimm_get_key_payload(nvdimm, &key);
203 
204 	rc = nvdimm->sec.ops->unlock(nvdimm, data);
205 	dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
206 			rc == 0 ? "success" : "fail");
207 
208 	nvdimm_put_key(key);
209 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
210 	return rc;
211 }
212 
213 int nvdimm_security_unlock(struct device *dev)
214 {
215 	struct nvdimm *nvdimm = to_nvdimm(dev);
216 	int rc;
217 
218 	nvdimm_bus_lock(dev);
219 	rc = __nvdimm_security_unlock(nvdimm);
220 	nvdimm_bus_unlock(dev);
221 	return rc;
222 }
223 
224 static int check_security_state(struct nvdimm *nvdimm)
225 {
226 	struct device *dev = &nvdimm->dev;
227 
228 	if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
229 		dev_dbg(dev, "Incorrect security state: %#lx\n",
230 				nvdimm->sec.flags);
231 		return -EIO;
232 	}
233 
234 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
235 		dev_dbg(dev, "Security operation in progress.\n");
236 		return -EBUSY;
237 	}
238 
239 	return 0;
240 }
241 
242 static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
243 {
244 	struct device *dev = &nvdimm->dev;
245 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
246 	struct key *key;
247 	int rc;
248 	const void *data;
249 
250 	/* The bus lock should be held at the top level of the call stack */
251 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
252 
253 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
254 			|| !nvdimm->sec.flags)
255 		return -EOPNOTSUPP;
256 
257 	rc = check_security_state(nvdimm);
258 	if (rc)
259 		return rc;
260 
261 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
262 			NVDIMM_BASE_KEY, &key);
263 	if (!data)
264 		return -ENOKEY;
265 
266 	rc = nvdimm->sec.ops->disable(nvdimm, data);
267 	dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
268 			rc == 0 ? "success" : "fail");
269 
270 	nvdimm_put_key(key);
271 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
272 	return rc;
273 }
274 
275 static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
276 		unsigned int new_keyid,
277 		enum nvdimm_passphrase_type pass_type)
278 {
279 	struct device *dev = &nvdimm->dev;
280 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
281 	struct key *key, *newkey;
282 	int rc;
283 	const void *data, *newdata;
284 
285 	/* The bus lock should be held at the top level of the call stack */
286 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
287 
288 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
289 			|| !nvdimm->sec.flags)
290 		return -EOPNOTSUPP;
291 
292 	rc = check_security_state(nvdimm);
293 	if (rc)
294 		return rc;
295 
296 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
297 			NVDIMM_BASE_KEY, &key);
298 	if (!data)
299 		return -ENOKEY;
300 
301 	newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
302 			NVDIMM_NEW_KEY, &newkey);
303 	if (!newdata) {
304 		nvdimm_put_key(key);
305 		return -ENOKEY;
306 	}
307 
308 	rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
309 	dev_dbg(dev, "key: %d %d update%s: %s\n",
310 			key_serial(key), key_serial(newkey),
311 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
312 			rc == 0 ? "success" : "fail");
313 
314 	nvdimm_put_key(newkey);
315 	nvdimm_put_key(key);
316 	if (pass_type == NVDIMM_MASTER)
317 		nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
318 				NVDIMM_MASTER);
319 	else
320 		nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
321 				NVDIMM_USER);
322 	return rc;
323 }
324 
325 static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
326 		enum nvdimm_passphrase_type pass_type)
327 {
328 	struct device *dev = &nvdimm->dev;
329 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
330 	struct key *key = NULL;
331 	int rc;
332 	const void *data;
333 
334 	/* The bus lock should be held at the top level of the call stack */
335 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
336 
337 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
338 			|| !nvdimm->sec.flags)
339 		return -EOPNOTSUPP;
340 
341 	rc = check_security_state(nvdimm);
342 	if (rc)
343 		return rc;
344 
345 	if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
346 			&& pass_type == NVDIMM_MASTER) {
347 		dev_dbg(dev,
348 			"Attempt to secure erase in wrong master state.\n");
349 		return -EOPNOTSUPP;
350 	}
351 
352 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
353 			NVDIMM_BASE_KEY, &key);
354 	if (!data)
355 		return -ENOKEY;
356 
357 	rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
358 	dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
359 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
360 			rc == 0 ? "success" : "fail");
361 
362 	nvdimm_put_key(key);
363 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
364 	return rc;
365 }
366 
367 static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
368 {
369 	struct device *dev = &nvdimm->dev;
370 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
371 	struct key *key = NULL;
372 	int rc;
373 	const void *data;
374 
375 	/* The bus lock should be held at the top level of the call stack */
376 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
377 
378 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
379 			|| !nvdimm->sec.flags)
380 		return -EOPNOTSUPP;
381 
382 	rc = check_security_state(nvdimm);
383 	if (rc)
384 		return rc;
385 
386 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
387 			NVDIMM_BASE_KEY, &key);
388 	if (!data)
389 		return -ENOKEY;
390 
391 	rc = nvdimm->sec.ops->overwrite(nvdimm, data);
392 	dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
393 			rc == 0 ? "success" : "fail");
394 
395 	nvdimm_put_key(key);
396 	if (rc == 0) {
397 		set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
398 		set_bit(NDD_WORK_PENDING, &nvdimm->flags);
399 		set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
400 		/*
401 		 * Make sure we don't lose device while doing overwrite
402 		 * query.
403 		 */
404 		get_device(dev);
405 		queue_delayed_work(system_wq, &nvdimm->dwork, 0);
406 	}
407 
408 	return rc;
409 }
410 
411 void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
412 {
413 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
414 	int rc;
415 	unsigned int tmo;
416 
417 	/* The bus lock should be held at the top level of the call stack */
418 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
419 
420 	/*
421 	 * Abort and release device if we no longer have the overwrite
422 	 * flag set. It means the work has been canceled.
423 	 */
424 	if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
425 		return;
426 
427 	tmo = nvdimm->sec.overwrite_tmo;
428 
429 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
430 			|| !nvdimm->sec.flags)
431 		return;
432 
433 	rc = nvdimm->sec.ops->query_overwrite(nvdimm);
434 	if (rc == -EBUSY) {
435 
436 		/* setup delayed work again */
437 		tmo += 10;
438 		queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
439 		nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
440 		return;
441 	}
442 
443 	if (rc < 0)
444 		dev_dbg(&nvdimm->dev, "overwrite failed\n");
445 	else
446 		dev_dbg(&nvdimm->dev, "overwrite completed\n");
447 
448 	/*
449 	 * Mark the overwrite work done and update dimm security flags,
450 	 * then send a sysfs event notification to wake up userspace
451 	 * poll threads to picked up the changed state.
452 	 */
453 	nvdimm->sec.overwrite_tmo = 0;
454 	clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
455 	clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
456 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
457 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
458 	if (nvdimm->sec.overwrite_state)
459 		sysfs_notify_dirent(nvdimm->sec.overwrite_state);
460 	put_device(&nvdimm->dev);
461 }
462 
463 void nvdimm_security_overwrite_query(struct work_struct *work)
464 {
465 	struct nvdimm *nvdimm =
466 		container_of(work, typeof(*nvdimm), dwork.work);
467 
468 	nvdimm_bus_lock(&nvdimm->dev);
469 	__nvdimm_security_overwrite_query(nvdimm);
470 	nvdimm_bus_unlock(&nvdimm->dev);
471 }
472 
473 #define OPS							\
474 	C( OP_FREEZE,		"freeze",		1),	\
475 	C( OP_DISABLE,		"disable",		2),	\
476 	C( OP_UPDATE,		"update",		3),	\
477 	C( OP_ERASE,		"erase",		2),	\
478 	C( OP_OVERWRITE,	"overwrite",		2),	\
479 	C( OP_MASTER_UPDATE,	"master_update",	3),	\
480 	C( OP_MASTER_ERASE,	"master_erase",		2)
481 #undef C
482 #define C(a, b, c) a
483 enum nvdimmsec_op_ids { OPS };
484 #undef C
485 #define C(a, b, c) { b, c }
486 static struct {
487 	const char *name;
488 	int args;
489 } ops[] = { OPS };
490 #undef C
491 
492 #define SEC_CMD_SIZE 32
493 #define KEY_ID_SIZE 10
494 
495 ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
496 {
497 	struct nvdimm *nvdimm = to_nvdimm(dev);
498 	ssize_t rc;
499 	char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
500 		nkeystr[KEY_ID_SIZE+1];
501 	unsigned int key, newkey;
502 	int i;
503 
504 	rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
505 			" %"__stringify(KEY_ID_SIZE)"s"
506 			" %"__stringify(KEY_ID_SIZE)"s",
507 			cmd, keystr, nkeystr);
508 	if (rc < 1)
509 		return -EINVAL;
510 	for (i = 0; i < ARRAY_SIZE(ops); i++)
511 		if (sysfs_streq(cmd, ops[i].name))
512 			break;
513 	if (i >= ARRAY_SIZE(ops))
514 		return -EINVAL;
515 	if (ops[i].args > 1)
516 		rc = kstrtouint(keystr, 0, &key);
517 	if (rc >= 0 && ops[i].args > 2)
518 		rc = kstrtouint(nkeystr, 0, &newkey);
519 	if (rc < 0)
520 		return rc;
521 
522 	if (i == OP_FREEZE) {
523 		dev_dbg(dev, "freeze\n");
524 		rc = nvdimm_security_freeze(nvdimm);
525 	} else if (i == OP_DISABLE) {
526 		dev_dbg(dev, "disable %u\n", key);
527 		rc = security_disable(nvdimm, key);
528 	} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
529 		dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
530 		rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
531 				? NVDIMM_USER : NVDIMM_MASTER);
532 	} else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
533 		dev_dbg(dev, "%s %u\n", ops[i].name, key);
534 		if (atomic_read(&nvdimm->busy)) {
535 			dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
536 			return -EBUSY;
537 		}
538 		rc = security_erase(nvdimm, key, i == OP_ERASE
539 				? NVDIMM_USER : NVDIMM_MASTER);
540 	} else if (i == OP_OVERWRITE) {
541 		dev_dbg(dev, "overwrite %u\n", key);
542 		if (atomic_read(&nvdimm->busy)) {
543 			dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
544 			return -EBUSY;
545 		}
546 		rc = security_overwrite(nvdimm, key);
547 	} else
548 		return -EINVAL;
549 
550 	if (rc == 0)
551 		rc = len;
552 	return rc;
553 }
554