xref: /openbmc/linux/drivers/nvdimm/security.c (revision 4f727ece)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/slab.h>
8 #include <linux/io.h>
9 #include <linux/mm.h>
10 #include <linux/cred.h>
11 #include <linux/key.h>
12 #include <linux/key-type.h>
13 #include <keys/user-type.h>
14 #include <keys/encrypted-type.h>
15 #include "nd-core.h"
16 #include "nd.h"
17 
18 #define NVDIMM_BASE_KEY		0
19 #define NVDIMM_NEW_KEY		1
20 
21 static bool key_revalidate = true;
22 module_param(key_revalidate, bool, 0444);
23 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
24 
25 static const char zero_key[NVDIMM_PASSPHRASE_LEN];
26 
27 static void *key_data(struct key *key)
28 {
29 	struct encrypted_key_payload *epayload = dereference_key_locked(key);
30 
31 	lockdep_assert_held_read(&key->sem);
32 
33 	return epayload->decrypted_data;
34 }
35 
36 static void nvdimm_put_key(struct key *key)
37 {
38 	if (!key)
39 		return;
40 
41 	up_read(&key->sem);
42 	key_put(key);
43 }
44 
45 /*
46  * Retrieve kernel key for DIMM and request from user space if
47  * necessary. Returns a key held for read and must be put by
48  * nvdimm_put_key() before the usage goes out of scope.
49  */
50 static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
51 {
52 	struct key *key = NULL;
53 	static const char NVDIMM_PREFIX[] = "nvdimm:";
54 	char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55 	struct device *dev = &nvdimm->dev;
56 
57 	sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58 	key = request_key(&key_type_encrypted, desc, "");
59 	if (IS_ERR(key)) {
60 		if (PTR_ERR(key) == -ENOKEY)
61 			dev_dbg(dev, "request_key() found no key\n");
62 		else
63 			dev_dbg(dev, "request_key() upcall failed\n");
64 		key = NULL;
65 	} else {
66 		struct encrypted_key_payload *epayload;
67 
68 		down_read(&key->sem);
69 		epayload = dereference_key_locked(key);
70 		if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
71 			up_read(&key->sem);
72 			key_put(key);
73 			key = NULL;
74 		}
75 	}
76 
77 	return key;
78 }
79 
80 static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
81 		struct key **key)
82 {
83 	*key = nvdimm_request_key(nvdimm);
84 	if (!*key)
85 		return zero_key;
86 
87 	return key_data(*key);
88 }
89 
90 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91 		key_serial_t id, int subclass)
92 {
93 	key_ref_t keyref;
94 	struct key *key;
95 	struct encrypted_key_payload *epayload;
96 	struct device *dev = &nvdimm->dev;
97 
98 	keyref = lookup_user_key(id, 0, 0);
99 	if (IS_ERR(keyref))
100 		return NULL;
101 
102 	key = key_ref_to_ptr(keyref);
103 	if (key->type != &key_type_encrypted) {
104 		key_put(key);
105 		return NULL;
106 	}
107 
108 	dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
109 
110 	down_read_nested(&key->sem, subclass);
111 	epayload = dereference_key_locked(key);
112 	if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
113 		up_read(&key->sem);
114 		key_put(key);
115 		key = NULL;
116 	}
117 	return key;
118 }
119 
120 static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121 		key_serial_t id, int subclass, struct key **key)
122 {
123 	*key = NULL;
124 	if (id == 0) {
125 		if (subclass == NVDIMM_BASE_KEY)
126 			return zero_key;
127 		else
128 			return NULL;
129 	}
130 
131 	*key = nvdimm_lookup_user_key(nvdimm, id, subclass);
132 	if (!*key)
133 		return NULL;
134 
135 	return key_data(*key);
136 }
137 
138 
139 static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
140 {
141 	struct key *key;
142 	int rc;
143 	const void *data;
144 
145 	if (!nvdimm->sec.ops->change_key)
146 		return -EOPNOTSUPP;
147 
148 	data = nvdimm_get_key_payload(nvdimm, &key);
149 
150 	/*
151 	 * Send the same key to the hardware as new and old key to
152 	 * verify that the key is good.
153 	 */
154 	rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
155 	if (rc < 0) {
156 		nvdimm_put_key(key);
157 		return rc;
158 	}
159 
160 	nvdimm_put_key(key);
161 	nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
162 	return 0;
163 }
164 
165 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
166 {
167 	struct device *dev = &nvdimm->dev;
168 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
169 	struct key *key;
170 	const void *data;
171 	int rc;
172 
173 	/* The bus lock should be held at the top level of the call stack */
174 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
175 
176 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177 			|| nvdimm->sec.state < 0)
178 		return -EIO;
179 
180 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
181 		dev_dbg(dev, "Security operation in progress.\n");
182 		return -EBUSY;
183 	}
184 
185 	/*
186 	 * If the pre-OS has unlocked the DIMM, attempt to send the key
187 	 * from request_key() to the hardware for verification.  Failure
188 	 * to revalidate the key against the hardware results in a
189 	 * freeze of the security configuration. I.e. if the OS does not
190 	 * have the key, security is being managed pre-OS.
191 	 */
192 	if (nvdimm->sec.state == NVDIMM_SECURITY_UNLOCKED) {
193 		if (!key_revalidate)
194 			return 0;
195 
196 		return nvdimm_key_revalidate(nvdimm);
197 	} else
198 		data = nvdimm_get_key_payload(nvdimm, &key);
199 
200 	rc = nvdimm->sec.ops->unlock(nvdimm, data);
201 	dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
202 			rc == 0 ? "success" : "fail");
203 
204 	nvdimm_put_key(key);
205 	nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
206 	return rc;
207 }
208 
209 int nvdimm_security_unlock(struct device *dev)
210 {
211 	struct nvdimm *nvdimm = to_nvdimm(dev);
212 	int rc;
213 
214 	nvdimm_bus_lock(dev);
215 	rc = __nvdimm_security_unlock(nvdimm);
216 	nvdimm_bus_unlock(dev);
217 	return rc;
218 }
219 
220 int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
221 {
222 	struct device *dev = &nvdimm->dev;
223 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
224 	struct key *key;
225 	int rc;
226 	const void *data;
227 
228 	/* The bus lock should be held at the top level of the call stack */
229 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
230 
231 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
232 			|| nvdimm->sec.state < 0)
233 		return -EOPNOTSUPP;
234 
235 	if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
236 		dev_dbg(dev, "Incorrect security state: %d\n",
237 				nvdimm->sec.state);
238 		return -EIO;
239 	}
240 
241 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
242 		dev_dbg(dev, "Security operation in progress.\n");
243 		return -EBUSY;
244 	}
245 
246 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
247 			NVDIMM_BASE_KEY, &key);
248 	if (!data)
249 		return -ENOKEY;
250 
251 	rc = nvdimm->sec.ops->disable(nvdimm, data);
252 	dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
253 			rc == 0 ? "success" : "fail");
254 
255 	nvdimm_put_key(key);
256 	nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
257 	return rc;
258 }
259 
260 int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
261 		unsigned int new_keyid,
262 		enum nvdimm_passphrase_type pass_type)
263 {
264 	struct device *dev = &nvdimm->dev;
265 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
266 	struct key *key, *newkey;
267 	int rc;
268 	const void *data, *newdata;
269 
270 	/* The bus lock should be held at the top level of the call stack */
271 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
272 
273 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
274 			|| nvdimm->sec.state < 0)
275 		return -EOPNOTSUPP;
276 
277 	if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
278 		dev_dbg(dev, "Incorrect security state: %d\n",
279 				nvdimm->sec.state);
280 		return -EIO;
281 	}
282 
283 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
284 			NVDIMM_BASE_KEY, &key);
285 	if (!data)
286 		return -ENOKEY;
287 
288 	newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
289 			NVDIMM_NEW_KEY, &newkey);
290 	if (!newdata) {
291 		nvdimm_put_key(key);
292 		return -ENOKEY;
293 	}
294 
295 	rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
296 	dev_dbg(dev, "key: %d %d update%s: %s\n",
297 			key_serial(key), key_serial(newkey),
298 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
299 			rc == 0 ? "success" : "fail");
300 
301 	nvdimm_put_key(newkey);
302 	nvdimm_put_key(key);
303 	if (pass_type == NVDIMM_MASTER)
304 		nvdimm->sec.ext_state = nvdimm_security_state(nvdimm,
305 				NVDIMM_MASTER);
306 	else
307 		nvdimm->sec.state = nvdimm_security_state(nvdimm,
308 				NVDIMM_USER);
309 	return rc;
310 }
311 
312 int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
313 		enum nvdimm_passphrase_type pass_type)
314 {
315 	struct device *dev = &nvdimm->dev;
316 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
317 	struct key *key = NULL;
318 	int rc;
319 	const void *data;
320 
321 	/* The bus lock should be held at the top level of the call stack */
322 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
323 
324 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
325 			|| nvdimm->sec.state < 0)
326 		return -EOPNOTSUPP;
327 
328 	if (atomic_read(&nvdimm->busy)) {
329 		dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
330 		return -EBUSY;
331 	}
332 
333 	if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
334 		dev_dbg(dev, "Incorrect security state: %d\n",
335 				nvdimm->sec.state);
336 		return -EIO;
337 	}
338 
339 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
340 		dev_dbg(dev, "Security operation in progress.\n");
341 		return -EBUSY;
342 	}
343 
344 	if (nvdimm->sec.ext_state != NVDIMM_SECURITY_UNLOCKED
345 			&& pass_type == NVDIMM_MASTER) {
346 		dev_dbg(dev,
347 			"Attempt to secure erase in wrong master state.\n");
348 		return -EOPNOTSUPP;
349 	}
350 
351 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
352 			NVDIMM_BASE_KEY, &key);
353 	if (!data)
354 		return -ENOKEY;
355 
356 	rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
357 	dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
358 			pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
359 			rc == 0 ? "success" : "fail");
360 
361 	nvdimm_put_key(key);
362 	nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
363 	return rc;
364 }
365 
366 int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
367 {
368 	struct device *dev = &nvdimm->dev;
369 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
370 	struct key *key = NULL;
371 	int rc;
372 	const void *data;
373 
374 	/* The bus lock should be held at the top level of the call stack */
375 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
376 
377 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
378 			|| nvdimm->sec.state < 0)
379 		return -EOPNOTSUPP;
380 
381 	if (atomic_read(&nvdimm->busy)) {
382 		dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
383 		return -EBUSY;
384 	}
385 
386 	if (dev->driver == NULL) {
387 		dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
388 		return -EINVAL;
389 	}
390 
391 	if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
392 		dev_dbg(dev, "Incorrect security state: %d\n",
393 				nvdimm->sec.state);
394 		return -EIO;
395 	}
396 
397 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
398 		dev_dbg(dev, "Security operation in progress.\n");
399 		return -EBUSY;
400 	}
401 
402 	data = nvdimm_get_user_key_payload(nvdimm, keyid,
403 			NVDIMM_BASE_KEY, &key);
404 	if (!data)
405 		return -ENOKEY;
406 
407 	rc = nvdimm->sec.ops->overwrite(nvdimm, data);
408 	dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
409 			rc == 0 ? "success" : "fail");
410 
411 	nvdimm_put_key(key);
412 	if (rc == 0) {
413 		set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
414 		set_bit(NDD_WORK_PENDING, &nvdimm->flags);
415 		nvdimm->sec.state = NVDIMM_SECURITY_OVERWRITE;
416 		/*
417 		 * Make sure we don't lose device while doing overwrite
418 		 * query.
419 		 */
420 		get_device(dev);
421 		queue_delayed_work(system_wq, &nvdimm->dwork, 0);
422 	}
423 
424 	return rc;
425 }
426 
427 void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
428 {
429 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
430 	int rc;
431 	unsigned int tmo;
432 
433 	/* The bus lock should be held at the top level of the call stack */
434 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
435 
436 	/*
437 	 * Abort and release device if we no longer have the overwrite
438 	 * flag set. It means the work has been canceled.
439 	 */
440 	if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
441 		return;
442 
443 	tmo = nvdimm->sec.overwrite_tmo;
444 
445 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
446 			|| nvdimm->sec.state < 0)
447 		return;
448 
449 	rc = nvdimm->sec.ops->query_overwrite(nvdimm);
450 	if (rc == -EBUSY) {
451 
452 		/* setup delayed work again */
453 		tmo += 10;
454 		queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
455 		nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
456 		return;
457 	}
458 
459 	if (rc < 0)
460 		dev_dbg(&nvdimm->dev, "overwrite failed\n");
461 	else
462 		dev_dbg(&nvdimm->dev, "overwrite completed\n");
463 
464 	if (nvdimm->sec.overwrite_state)
465 		sysfs_notify_dirent(nvdimm->sec.overwrite_state);
466 	nvdimm->sec.overwrite_tmo = 0;
467 	clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
468 	clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
469 	put_device(&nvdimm->dev);
470 	nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
471 	nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
472 }
473 
474 void nvdimm_security_overwrite_query(struct work_struct *work)
475 {
476 	struct nvdimm *nvdimm =
477 		container_of(work, typeof(*nvdimm), dwork.work);
478 
479 	nvdimm_bus_lock(&nvdimm->dev);
480 	__nvdimm_security_overwrite_query(nvdimm);
481 	nvdimm_bus_unlock(&nvdimm->dev);
482 }
483