xref: /openbmc/linux/drivers/acpi/nfit/intel.c (revision cbdf59ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <linux/ndctl.h>
5 #include <linux/acpi.h>
6 #include <asm/smp.h>
7 #include "intel.h"
8 #include "nfit.h"
9 
10 static enum nvdimm_security_state intel_security_state(struct nvdimm *nvdimm,
11 		enum nvdimm_passphrase_type ptype)
12 {
13 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
14 	struct {
15 		struct nd_cmd_pkg pkg;
16 		struct nd_intel_get_security_state cmd;
17 	} nd_cmd = {
18 		.pkg = {
19 			.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
20 			.nd_family = NVDIMM_FAMILY_INTEL,
21 			.nd_size_out =
22 				sizeof(struct nd_intel_get_security_state),
23 			.nd_fw_size =
24 				sizeof(struct nd_intel_get_security_state),
25 		},
26 	};
27 	int rc;
28 
29 	if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
30 		return -ENXIO;
31 
32 	/*
33 	 * Short circuit the state retrieval while we are doing overwrite.
34 	 * The DSM spec states that the security state is indeterminate
35 	 * until the overwrite DSM completes.
36 	 */
37 	if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
38 		return NVDIMM_SECURITY_OVERWRITE;
39 
40 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
41 	if (rc < 0)
42 		return rc;
43 	if (nd_cmd.cmd.status)
44 		return -EIO;
45 
46 	/* check and see if security is enabled and locked */
47 	if (ptype == NVDIMM_MASTER) {
48 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
49 			return NVDIMM_SECURITY_UNLOCKED;
50 		else if (nd_cmd.cmd.extended_state &
51 				ND_INTEL_SEC_ESTATE_PLIMIT)
52 			return NVDIMM_SECURITY_FROZEN;
53 	} else {
54 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
55 			return -ENXIO;
56 		else if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
57 			if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
58 				return NVDIMM_SECURITY_LOCKED;
59 			else if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN
60 					|| nd_cmd.cmd.state &
61 					ND_INTEL_SEC_STATE_PLIMIT)
62 				return NVDIMM_SECURITY_FROZEN;
63 			else
64 				return NVDIMM_SECURITY_UNLOCKED;
65 		}
66 	}
67 
68 	/* this should cover master security disabled as well */
69 	return NVDIMM_SECURITY_DISABLED;
70 }
71 
72 static int intel_security_freeze(struct nvdimm *nvdimm)
73 {
74 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
75 	struct {
76 		struct nd_cmd_pkg pkg;
77 		struct nd_intel_freeze_lock cmd;
78 	} nd_cmd = {
79 		.pkg = {
80 			.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
81 			.nd_family = NVDIMM_FAMILY_INTEL,
82 			.nd_size_out = ND_INTEL_STATUS_SIZE,
83 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
84 		},
85 	};
86 	int rc;
87 
88 	if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
89 		return -ENOTTY;
90 
91 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
92 	if (rc < 0)
93 		return rc;
94 	if (nd_cmd.cmd.status)
95 		return -EIO;
96 	return 0;
97 }
98 
99 static int intel_security_change_key(struct nvdimm *nvdimm,
100 		const struct nvdimm_key_data *old_data,
101 		const struct nvdimm_key_data *new_data,
102 		enum nvdimm_passphrase_type ptype)
103 {
104 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
105 	unsigned int cmd = ptype == NVDIMM_MASTER ?
106 		NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
107 		NVDIMM_INTEL_SET_PASSPHRASE;
108 	struct {
109 		struct nd_cmd_pkg pkg;
110 		struct nd_intel_set_passphrase cmd;
111 	} nd_cmd = {
112 		.pkg = {
113 			.nd_family = NVDIMM_FAMILY_INTEL,
114 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
115 			.nd_size_out = ND_INTEL_STATUS_SIZE,
116 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
117 			.nd_command = cmd,
118 		},
119 	};
120 	int rc;
121 
122 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
123 		return -ENOTTY;
124 
125 	memcpy(nd_cmd.cmd.old_pass, old_data->data,
126 			sizeof(nd_cmd.cmd.old_pass));
127 	memcpy(nd_cmd.cmd.new_pass, new_data->data,
128 			sizeof(nd_cmd.cmd.new_pass));
129 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
130 	if (rc < 0)
131 		return rc;
132 
133 	switch (nd_cmd.cmd.status) {
134 	case 0:
135 		return 0;
136 	case ND_INTEL_STATUS_INVALID_PASS:
137 		return -EINVAL;
138 	case ND_INTEL_STATUS_NOT_SUPPORTED:
139 		return -EOPNOTSUPP;
140 	case ND_INTEL_STATUS_INVALID_STATE:
141 	default:
142 		return -EIO;
143 	}
144 }
145 
146 static void nvdimm_invalidate_cache(void);
147 
148 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
149 		const struct nvdimm_key_data *key_data)
150 {
151 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
152 	struct {
153 		struct nd_cmd_pkg pkg;
154 		struct nd_intel_unlock_unit cmd;
155 	} nd_cmd = {
156 		.pkg = {
157 			.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
158 			.nd_family = NVDIMM_FAMILY_INTEL,
159 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
160 			.nd_size_out = ND_INTEL_STATUS_SIZE,
161 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
162 		},
163 	};
164 	int rc;
165 
166 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
167 		return -ENOTTY;
168 
169 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
170 			sizeof(nd_cmd.cmd.passphrase));
171 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
172 	if (rc < 0)
173 		return rc;
174 	switch (nd_cmd.cmd.status) {
175 	case 0:
176 		break;
177 	case ND_INTEL_STATUS_INVALID_PASS:
178 		return -EINVAL;
179 	default:
180 		return -EIO;
181 	}
182 
183 	/* DIMM unlocked, invalidate all CPU caches before we read it */
184 	nvdimm_invalidate_cache();
185 
186 	return 0;
187 }
188 
189 static int intel_security_disable(struct nvdimm *nvdimm,
190 		const struct nvdimm_key_data *key_data)
191 {
192 	int rc;
193 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
194 	struct {
195 		struct nd_cmd_pkg pkg;
196 		struct nd_intel_disable_passphrase cmd;
197 	} nd_cmd = {
198 		.pkg = {
199 			.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
200 			.nd_family = NVDIMM_FAMILY_INTEL,
201 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
202 			.nd_size_out = ND_INTEL_STATUS_SIZE,
203 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
204 		},
205 	};
206 
207 	if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
208 		return -ENOTTY;
209 
210 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
211 			sizeof(nd_cmd.cmd.passphrase));
212 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
213 	if (rc < 0)
214 		return rc;
215 
216 	switch (nd_cmd.cmd.status) {
217 	case 0:
218 		break;
219 	case ND_INTEL_STATUS_INVALID_PASS:
220 		return -EINVAL;
221 	case ND_INTEL_STATUS_INVALID_STATE:
222 	default:
223 		return -ENXIO;
224 	}
225 
226 	return 0;
227 }
228 
229 static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
230 		const struct nvdimm_key_data *key,
231 		enum nvdimm_passphrase_type ptype)
232 {
233 	int rc;
234 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
235 	unsigned int cmd = ptype == NVDIMM_MASTER ?
236 		NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
237 	struct {
238 		struct nd_cmd_pkg pkg;
239 		struct nd_intel_secure_erase cmd;
240 	} nd_cmd = {
241 		.pkg = {
242 			.nd_family = NVDIMM_FAMILY_INTEL,
243 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
244 			.nd_size_out = ND_INTEL_STATUS_SIZE,
245 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
246 			.nd_command = cmd,
247 		},
248 	};
249 
250 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
251 		return -ENOTTY;
252 
253 	/* flush all cache before we erase DIMM */
254 	nvdimm_invalidate_cache();
255 	memcpy(nd_cmd.cmd.passphrase, key->data,
256 			sizeof(nd_cmd.cmd.passphrase));
257 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
258 	if (rc < 0)
259 		return rc;
260 
261 	switch (nd_cmd.cmd.status) {
262 	case 0:
263 		break;
264 	case ND_INTEL_STATUS_NOT_SUPPORTED:
265 		return -EOPNOTSUPP;
266 	case ND_INTEL_STATUS_INVALID_PASS:
267 		return -EINVAL;
268 	case ND_INTEL_STATUS_INVALID_STATE:
269 	default:
270 		return -ENXIO;
271 	}
272 
273 	/* DIMM erased, invalidate all CPU caches before we read it */
274 	nvdimm_invalidate_cache();
275 	return 0;
276 }
277 
278 static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
279 {
280 	int rc;
281 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
282 	struct {
283 		struct nd_cmd_pkg pkg;
284 		struct nd_intel_query_overwrite cmd;
285 	} nd_cmd = {
286 		.pkg = {
287 			.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
288 			.nd_family = NVDIMM_FAMILY_INTEL,
289 			.nd_size_out = ND_INTEL_STATUS_SIZE,
290 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
291 		},
292 	};
293 
294 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
295 		return -ENOTTY;
296 
297 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
298 	if (rc < 0)
299 		return rc;
300 
301 	switch (nd_cmd.cmd.status) {
302 	case 0:
303 		break;
304 	case ND_INTEL_STATUS_OQUERY_INPROGRESS:
305 		return -EBUSY;
306 	default:
307 		return -ENXIO;
308 	}
309 
310 	/* flush all cache before we make the nvdimms available */
311 	nvdimm_invalidate_cache();
312 	return 0;
313 }
314 
315 static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
316 		const struct nvdimm_key_data *nkey)
317 {
318 	int rc;
319 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
320 	struct {
321 		struct nd_cmd_pkg pkg;
322 		struct nd_intel_overwrite cmd;
323 	} nd_cmd = {
324 		.pkg = {
325 			.nd_command = NVDIMM_INTEL_OVERWRITE,
326 			.nd_family = NVDIMM_FAMILY_INTEL,
327 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
328 			.nd_size_out = ND_INTEL_STATUS_SIZE,
329 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
330 		},
331 	};
332 
333 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
334 		return -ENOTTY;
335 
336 	/* flush all cache before we erase DIMM */
337 	nvdimm_invalidate_cache();
338 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
339 			sizeof(nd_cmd.cmd.passphrase));
340 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
341 	if (rc < 0)
342 		return rc;
343 
344 	switch (nd_cmd.cmd.status) {
345 	case 0:
346 		return 0;
347 	case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
348 		return -ENOTSUPP;
349 	case ND_INTEL_STATUS_INVALID_PASS:
350 		return -EINVAL;
351 	case ND_INTEL_STATUS_INVALID_STATE:
352 	default:
353 		return -ENXIO;
354 	}
355 }
356 
357 /*
358  * TODO: define a cross arch wbinvd equivalent when/if
359  * NVDIMM_FAMILY_INTEL command support arrives on another arch.
360  */
361 #ifdef CONFIG_X86
362 static void nvdimm_invalidate_cache(void)
363 {
364 	wbinvd_on_all_cpus();
365 }
366 #else
367 static void nvdimm_invalidate_cache(void)
368 {
369 	WARN_ON_ONCE("cache invalidation required after unlock\n");
370 }
371 #endif
372 
373 static const struct nvdimm_security_ops __intel_security_ops = {
374 	.state = intel_security_state,
375 	.freeze = intel_security_freeze,
376 	.change_key = intel_security_change_key,
377 	.disable = intel_security_disable,
378 #ifdef CONFIG_X86
379 	.unlock = intel_security_unlock,
380 	.erase = intel_security_erase,
381 	.overwrite = intel_security_overwrite,
382 	.query_overwrite = intel_security_query_overwrite,
383 #endif
384 };
385 
386 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
387