1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <crypto/aead.h>
20 #include <linux/scatterlist.h>
21 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
23 #include <uapi/linux/psp-sev.h>
24 
25 #include <asm/svm.h>
26 #include <asm/sev.h>
27 
28 #include "sev-guest.h"
29 
30 #define DEVICE_NAME	"sev-guest"
31 #define AAD_LEN		48
32 #define MSG_HDR_VER	1
33 
34 #define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
35 #define SNP_REQ_RETRY_DELAY		(2*HZ)
36 
37 struct snp_guest_crypto {
38 	struct crypto_aead *tfm;
39 	u8 *iv, *authtag;
40 	int iv_len, a_len;
41 };
42 
43 struct snp_guest_dev {
44 	struct device *dev;
45 	struct miscdevice misc;
46 
47 	void *certs_data;
48 	struct snp_guest_crypto *crypto;
49 	struct snp_guest_msg *request, *response;
50 	struct snp_secrets_page_layout *layout;
51 	struct snp_req_data input;
52 	u32 *os_area_msg_seqno;
53 	u8 *vmpck;
54 };
55 
56 static u32 vmpck_id;
57 module_param(vmpck_id, uint, 0444);
58 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
59 
60 /* Mutex to serialize the shared buffer access and command handling. */
61 static DEFINE_MUTEX(snp_cmd_mutex);
62 
63 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
64 {
65 	char zero_key[VMPCK_KEY_LEN] = {0};
66 
67 	if (snp_dev->vmpck)
68 		return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
69 
70 	return true;
71 }
72 
73 /*
74  * If an error is received from the host or AMD Secure Processor (ASP) there
75  * are two options. Either retry the exact same encrypted request or discontinue
76  * using the VMPCK.
77  *
78  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
79  * encrypt the requests. The IV for this scheme is the sequence number. GCM
80  * cannot tolerate IV reuse.
81  *
82  * The ASP FW v1.51 only increments the sequence numbers on a successful
83  * guest<->ASP back and forth and only accepts messages at its exact sequence
84  * number.
85  *
86  * So if the sequence number were to be reused the encryption scheme is
87  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
88  * will reject the request.
89  */
90 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
91 {
92 	dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
93 		  vmpck_id);
94 	memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
95 	snp_dev->vmpck = NULL;
96 }
97 
98 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
99 {
100 	u64 count;
101 
102 	lockdep_assert_held(&snp_cmd_mutex);
103 
104 	/* Read the current message sequence counter from secrets pages */
105 	count = *snp_dev->os_area_msg_seqno;
106 
107 	return count + 1;
108 }
109 
110 /* Return a non-zero on success */
111 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
112 {
113 	u64 count = __snp_get_msg_seqno(snp_dev);
114 
115 	/*
116 	 * The message sequence counter for the SNP guest request is a  64-bit
117 	 * value but the version 2 of GHCB specification defines a 32-bit storage
118 	 * for it. If the counter exceeds the 32-bit value then return zero.
119 	 * The caller should check the return value, but if the caller happens to
120 	 * not check the value and use it, then the firmware treats zero as an
121 	 * invalid number and will fail the  message request.
122 	 */
123 	if (count >= UINT_MAX) {
124 		dev_err(snp_dev->dev, "request message sequence counter overflow\n");
125 		return 0;
126 	}
127 
128 	return count;
129 }
130 
131 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
132 {
133 	/*
134 	 * The counter is also incremented by the PSP, so increment it by 2
135 	 * and save in secrets page.
136 	 */
137 	*snp_dev->os_area_msg_seqno += 2;
138 }
139 
140 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
141 {
142 	struct miscdevice *dev = file->private_data;
143 
144 	return container_of(dev, struct snp_guest_dev, misc);
145 }
146 
147 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
148 {
149 	struct snp_guest_crypto *crypto;
150 
151 	crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
152 	if (!crypto)
153 		return NULL;
154 
155 	crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
156 	if (IS_ERR(crypto->tfm))
157 		goto e_free;
158 
159 	if (crypto_aead_setkey(crypto->tfm, key, keylen))
160 		goto e_free_crypto;
161 
162 	crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
163 	crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
164 	if (!crypto->iv)
165 		goto e_free_crypto;
166 
167 	if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
168 		if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
169 			dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
170 			goto e_free_iv;
171 		}
172 	}
173 
174 	crypto->a_len = crypto_aead_authsize(crypto->tfm);
175 	crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
176 	if (!crypto->authtag)
177 		goto e_free_iv;
178 
179 	return crypto;
180 
181 e_free_iv:
182 	kfree(crypto->iv);
183 e_free_crypto:
184 	crypto_free_aead(crypto->tfm);
185 e_free:
186 	kfree(crypto);
187 
188 	return NULL;
189 }
190 
191 static void deinit_crypto(struct snp_guest_crypto *crypto)
192 {
193 	crypto_free_aead(crypto->tfm);
194 	kfree(crypto->iv);
195 	kfree(crypto->authtag);
196 	kfree(crypto);
197 }
198 
199 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
200 			   u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
201 {
202 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
203 	struct scatterlist src[3], dst[3];
204 	DECLARE_CRYPTO_WAIT(wait);
205 	struct aead_request *req;
206 	int ret;
207 
208 	req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
209 	if (!req)
210 		return -ENOMEM;
211 
212 	/*
213 	 * AEAD memory operations:
214 	 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
215 	 * |  msg header      |  plaintext       |  hdr->authtag  |
216 	 * | bytes 30h - 5Fh  |    or            |                |
217 	 * |                  |   cipher         |                |
218 	 * +------------------+------------------+----------------+
219 	 */
220 	sg_init_table(src, 3);
221 	sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
222 	sg_set_buf(&src[1], src_buf, hdr->msg_sz);
223 	sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
224 
225 	sg_init_table(dst, 3);
226 	sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
227 	sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
228 	sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
229 
230 	aead_request_set_ad(req, AAD_LEN);
231 	aead_request_set_tfm(req, crypto->tfm);
232 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
233 
234 	aead_request_set_crypt(req, src, dst, len, crypto->iv);
235 	ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
236 
237 	aead_request_free(req);
238 	return ret;
239 }
240 
241 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
242 			 void *plaintext, size_t len)
243 {
244 	struct snp_guest_crypto *crypto = snp_dev->crypto;
245 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
246 
247 	memset(crypto->iv, 0, crypto->iv_len);
248 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
249 
250 	return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
251 }
252 
253 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
254 		       void *plaintext, size_t len)
255 {
256 	struct snp_guest_crypto *crypto = snp_dev->crypto;
257 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
258 
259 	/* Build IV with response buffer sequence number */
260 	memset(crypto->iv, 0, crypto->iv_len);
261 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
262 
263 	return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
264 }
265 
266 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
267 {
268 	struct snp_guest_crypto *crypto = snp_dev->crypto;
269 	struct snp_guest_msg *resp = snp_dev->response;
270 	struct snp_guest_msg *req = snp_dev->request;
271 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
272 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
273 
274 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
275 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
276 
277 	/* Verify that the sequence counter is incremented by 1 */
278 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
279 		return -EBADMSG;
280 
281 	/* Verify response message type and version number. */
282 	if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
283 	    resp_hdr->msg_version != req_hdr->msg_version)
284 		return -EBADMSG;
285 
286 	/*
287 	 * If the message size is greater than our buffer length then return
288 	 * an error.
289 	 */
290 	if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
291 		return -EBADMSG;
292 
293 	/* Decrypt the payload */
294 	return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
295 }
296 
297 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
298 			void *payload, size_t sz)
299 {
300 	struct snp_guest_msg *req = snp_dev->request;
301 	struct snp_guest_msg_hdr *hdr = &req->hdr;
302 
303 	memset(req, 0, sizeof(*req));
304 
305 	hdr->algo = SNP_AEAD_AES_256_GCM;
306 	hdr->hdr_version = MSG_HDR_VER;
307 	hdr->hdr_sz = sizeof(*hdr);
308 	hdr->msg_type = type;
309 	hdr->msg_version = version;
310 	hdr->msg_seqno = seqno;
311 	hdr->msg_vmpck = vmpck_id;
312 	hdr->msg_sz = sz;
313 
314 	/* Verify the sequence number is non-zero */
315 	if (!hdr->msg_seqno)
316 		return -ENOSR;
317 
318 	dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
319 		hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
320 
321 	return __enc_payload(snp_dev, req, payload, sz);
322 }
323 
324 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
325 {
326 	unsigned long err = 0xff, override_err = 0;
327 	unsigned long req_start = jiffies;
328 	unsigned int override_npages = 0;
329 	int rc;
330 
331 retry_request:
332 	/*
333 	 * Call firmware to process the request. In this function the encrypted
334 	 * message enters shared memory with the host. So after this call the
335 	 * sequence number must be incremented or the VMPCK must be deleted to
336 	 * prevent reuse of the IV.
337 	 */
338 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
339 	switch (rc) {
340 	case -ENOSPC:
341 		/*
342 		 * If the extended guest request fails due to having too
343 		 * small of a certificate data buffer, retry the same
344 		 * guest request without the extended data request in
345 		 * order to increment the sequence number and thus avoid
346 		 * IV reuse.
347 		 */
348 		override_npages = snp_dev->input.data_npages;
349 		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
350 
351 		/*
352 		 * Override the error to inform callers the given extended
353 		 * request buffer size was too small and give the caller the
354 		 * required buffer size.
355 		 */
356 		override_err	= SNP_GUEST_REQ_INVALID_LEN;
357 
358 		/*
359 		 * If this call to the firmware succeeds, the sequence number can
360 		 * be incremented allowing for continued use of the VMPCK. If
361 		 * there is an error reflected in the return value, this value
362 		 * is checked further down and the result will be the deletion
363 		 * of the VMPCK and the error code being propagated back to the
364 		 * user as an ioctl() return code.
365 		 */
366 		goto retry_request;
367 
368 	/*
369 	 * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
370 	 * throttled. Retry in the driver to avoid returning and reusing the
371 	 * message sequence number on a different message.
372 	 */
373 	case -EAGAIN:
374 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
375 			rc = -ETIMEDOUT;
376 			break;
377 		}
378 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
379 		goto retry_request;
380 	}
381 
382 	/*
383 	 * Increment the message sequence number. There is no harm in doing
384 	 * this now because decryption uses the value stored in the response
385 	 * structure and any failure will wipe the VMPCK, preventing further
386 	 * use anyway.
387 	 */
388 	snp_inc_msg_seqno(snp_dev);
389 
390 	if (fw_err)
391 		*fw_err = override_err ?: err;
392 
393 	if (override_npages)
394 		snp_dev->input.data_npages = override_npages;
395 
396 	/*
397 	 * If an extended guest request was issued and the supplied certificate
398 	 * buffer was not large enough, a standard guest request was issued to
399 	 * prevent IV reuse. If the standard request was successful, return -EIO
400 	 * back to the caller as would have originally been returned.
401 	 */
402 	if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
403 		return -EIO;
404 
405 	return rc;
406 }
407 
408 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
409 				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
410 				u32 resp_sz, __u64 *fw_err)
411 {
412 	u64 seqno;
413 	int rc;
414 
415 	/* Get message sequence and verify that its a non-zero */
416 	seqno = snp_get_msg_seqno(snp_dev);
417 	if (!seqno)
418 		return -EIO;
419 
420 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
421 
422 	/* Encrypt the userspace provided payload */
423 	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
424 	if (rc)
425 		return rc;
426 
427 	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
428 	if (rc) {
429 		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
430 			return rc;
431 
432 		dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
433 		snp_disable_vmpck(snp_dev);
434 		return rc;
435 	}
436 
437 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
438 	if (rc) {
439 		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
440 		snp_disable_vmpck(snp_dev);
441 		return rc;
442 	}
443 
444 	return 0;
445 }
446 
447 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
448 {
449 	struct snp_guest_crypto *crypto = snp_dev->crypto;
450 	struct snp_report_resp *resp;
451 	struct snp_report_req req;
452 	int rc, resp_len;
453 
454 	lockdep_assert_held(&snp_cmd_mutex);
455 
456 	if (!arg->req_data || !arg->resp_data)
457 		return -EINVAL;
458 
459 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
460 		return -EFAULT;
461 
462 	/*
463 	 * The intermediate response buffer is used while decrypting the
464 	 * response payload. Make sure that it has enough space to cover the
465 	 * authtag.
466 	 */
467 	resp_len = sizeof(resp->data) + crypto->a_len;
468 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
469 	if (!resp)
470 		return -ENOMEM;
471 
472 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
473 				  SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
474 				  resp_len, &arg->fw_err);
475 	if (rc)
476 		goto e_free;
477 
478 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
479 		rc = -EFAULT;
480 
481 e_free:
482 	kfree(resp);
483 	return rc;
484 }
485 
486 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
487 {
488 	struct snp_guest_crypto *crypto = snp_dev->crypto;
489 	struct snp_derived_key_resp resp = {0};
490 	struct snp_derived_key_req req;
491 	int rc, resp_len;
492 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
493 	u8 buf[64 + 16];
494 
495 	lockdep_assert_held(&snp_cmd_mutex);
496 
497 	if (!arg->req_data || !arg->resp_data)
498 		return -EINVAL;
499 
500 	/*
501 	 * The intermediate response buffer is used while decrypting the
502 	 * response payload. Make sure that it has enough space to cover the
503 	 * authtag.
504 	 */
505 	resp_len = sizeof(resp.data) + crypto->a_len;
506 	if (sizeof(buf) < resp_len)
507 		return -ENOMEM;
508 
509 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
510 		return -EFAULT;
511 
512 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
513 				  SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len,
514 				  &arg->fw_err);
515 	if (rc)
516 		return rc;
517 
518 	memcpy(resp.data, buf, sizeof(resp.data));
519 	if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
520 		rc = -EFAULT;
521 
522 	/* The response buffer contains the sensitive data, explicitly clear it. */
523 	memzero_explicit(buf, sizeof(buf));
524 	memzero_explicit(&resp, sizeof(resp));
525 	return rc;
526 }
527 
528 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
529 {
530 	struct snp_guest_crypto *crypto = snp_dev->crypto;
531 	struct snp_ext_report_req req;
532 	struct snp_report_resp *resp;
533 	int ret, npages = 0, resp_len;
534 
535 	lockdep_assert_held(&snp_cmd_mutex);
536 
537 	if (!arg->req_data || !arg->resp_data)
538 		return -EINVAL;
539 
540 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
541 		return -EFAULT;
542 
543 	/* userspace does not want certificate data */
544 	if (!req.certs_len || !req.certs_address)
545 		goto cmd;
546 
547 	if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
548 	    !IS_ALIGNED(req.certs_len, PAGE_SIZE))
549 		return -EINVAL;
550 
551 	if (!access_ok((const void __user *)req.certs_address, req.certs_len))
552 		return -EFAULT;
553 
554 	/*
555 	 * Initialize the intermediate buffer with all zeros. This buffer
556 	 * is used in the guest request message to get the certs blob from
557 	 * the host. If host does not supply any certs in it, then copy
558 	 * zeros to indicate that certificate data was not provided.
559 	 */
560 	memset(snp_dev->certs_data, 0, req.certs_len);
561 	npages = req.certs_len >> PAGE_SHIFT;
562 cmd:
563 	/*
564 	 * The intermediate response buffer is used while decrypting the
565 	 * response payload. Make sure that it has enough space to cover the
566 	 * authtag.
567 	 */
568 	resp_len = sizeof(resp->data) + crypto->a_len;
569 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
570 	if (!resp)
571 		return -ENOMEM;
572 
573 	snp_dev->input.data_npages = npages;
574 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version,
575 				   SNP_MSG_REPORT_REQ, &req.data,
576 				   sizeof(req.data), resp->data, resp_len, &arg->fw_err);
577 
578 	/* If certs length is invalid then copy the returned length */
579 	if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) {
580 		req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
581 
582 		if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
583 			ret = -EFAULT;
584 	}
585 
586 	if (ret)
587 		goto e_free;
588 
589 	if (npages &&
590 	    copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
591 			 req.certs_len)) {
592 		ret = -EFAULT;
593 		goto e_free;
594 	}
595 
596 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
597 		ret = -EFAULT;
598 
599 e_free:
600 	kfree(resp);
601 	return ret;
602 }
603 
604 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
605 {
606 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
607 	void __user *argp = (void __user *)arg;
608 	struct snp_guest_request_ioctl input;
609 	int ret = -ENOTTY;
610 
611 	if (copy_from_user(&input, argp, sizeof(input)))
612 		return -EFAULT;
613 
614 	input.fw_err = 0xff;
615 
616 	/* Message version must be non-zero */
617 	if (!input.msg_version)
618 		return -EINVAL;
619 
620 	mutex_lock(&snp_cmd_mutex);
621 
622 	/* Check if the VMPCK is not empty */
623 	if (is_vmpck_empty(snp_dev)) {
624 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
625 		mutex_unlock(&snp_cmd_mutex);
626 		return -ENOTTY;
627 	}
628 
629 	switch (ioctl) {
630 	case SNP_GET_REPORT:
631 		ret = get_report(snp_dev, &input);
632 		break;
633 	case SNP_GET_DERIVED_KEY:
634 		ret = get_derived_key(snp_dev, &input);
635 		break;
636 	case SNP_GET_EXT_REPORT:
637 		ret = get_ext_report(snp_dev, &input);
638 		break;
639 	default:
640 		break;
641 	}
642 
643 	mutex_unlock(&snp_cmd_mutex);
644 
645 	if (input.fw_err && copy_to_user(argp, &input, sizeof(input)))
646 		return -EFAULT;
647 
648 	return ret;
649 }
650 
651 static void free_shared_pages(void *buf, size_t sz)
652 {
653 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
654 	int ret;
655 
656 	if (!buf)
657 		return;
658 
659 	ret = set_memory_encrypted((unsigned long)buf, npages);
660 	if (ret) {
661 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
662 		return;
663 	}
664 
665 	__free_pages(virt_to_page(buf), get_order(sz));
666 }
667 
668 static void *alloc_shared_pages(struct device *dev, size_t sz)
669 {
670 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
671 	struct page *page;
672 	int ret;
673 
674 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
675 	if (!page)
676 		return NULL;
677 
678 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
679 	if (ret) {
680 		dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
681 		__free_pages(page, get_order(sz));
682 		return NULL;
683 	}
684 
685 	return page_address(page);
686 }
687 
688 static const struct file_operations snp_guest_fops = {
689 	.owner	= THIS_MODULE,
690 	.unlocked_ioctl = snp_guest_ioctl,
691 };
692 
693 static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
694 {
695 	u8 *key = NULL;
696 
697 	switch (id) {
698 	case 0:
699 		*seqno = &layout->os_area.msg_seqno_0;
700 		key = layout->vmpck0;
701 		break;
702 	case 1:
703 		*seqno = &layout->os_area.msg_seqno_1;
704 		key = layout->vmpck1;
705 		break;
706 	case 2:
707 		*seqno = &layout->os_area.msg_seqno_2;
708 		key = layout->vmpck2;
709 		break;
710 	case 3:
711 		*seqno = &layout->os_area.msg_seqno_3;
712 		key = layout->vmpck3;
713 		break;
714 	default:
715 		break;
716 	}
717 
718 	return key;
719 }
720 
721 static int __init sev_guest_probe(struct platform_device *pdev)
722 {
723 	struct snp_secrets_page_layout *layout;
724 	struct sev_guest_platform_data *data;
725 	struct device *dev = &pdev->dev;
726 	struct snp_guest_dev *snp_dev;
727 	struct miscdevice *misc;
728 	void __iomem *mapping;
729 	int ret;
730 
731 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
732 		return -ENODEV;
733 
734 	if (!dev->platform_data)
735 		return -ENODEV;
736 
737 	data = (struct sev_guest_platform_data *)dev->platform_data;
738 	mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
739 	if (!mapping)
740 		return -ENODEV;
741 
742 	layout = (__force void *)mapping;
743 
744 	ret = -ENOMEM;
745 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
746 	if (!snp_dev)
747 		goto e_unmap;
748 
749 	ret = -EINVAL;
750 	snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
751 	if (!snp_dev->vmpck) {
752 		dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
753 		goto e_unmap;
754 	}
755 
756 	/* Verify that VMPCK is not zero. */
757 	if (is_vmpck_empty(snp_dev)) {
758 		dev_err(dev, "vmpck id %d is null\n", vmpck_id);
759 		goto e_unmap;
760 	}
761 
762 	platform_set_drvdata(pdev, snp_dev);
763 	snp_dev->dev = dev;
764 	snp_dev->layout = layout;
765 
766 	/* Allocate the shared page used for the request and response message. */
767 	snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
768 	if (!snp_dev->request)
769 		goto e_unmap;
770 
771 	snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
772 	if (!snp_dev->response)
773 		goto e_free_request;
774 
775 	snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
776 	if (!snp_dev->certs_data)
777 		goto e_free_response;
778 
779 	ret = -EIO;
780 	snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
781 	if (!snp_dev->crypto)
782 		goto e_free_cert_data;
783 
784 	misc = &snp_dev->misc;
785 	misc->minor = MISC_DYNAMIC_MINOR;
786 	misc->name = DEVICE_NAME;
787 	misc->fops = &snp_guest_fops;
788 
789 	/* initial the input address for guest request */
790 	snp_dev->input.req_gpa = __pa(snp_dev->request);
791 	snp_dev->input.resp_gpa = __pa(snp_dev->response);
792 	snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
793 
794 	ret =  misc_register(misc);
795 	if (ret)
796 		goto e_free_cert_data;
797 
798 	dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
799 	return 0;
800 
801 e_free_cert_data:
802 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
803 e_free_response:
804 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
805 e_free_request:
806 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
807 e_unmap:
808 	iounmap(mapping);
809 	return ret;
810 }
811 
812 static int __exit sev_guest_remove(struct platform_device *pdev)
813 {
814 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
815 
816 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
817 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
818 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
819 	deinit_crypto(snp_dev->crypto);
820 	misc_deregister(&snp_dev->misc);
821 
822 	return 0;
823 }
824 
825 /*
826  * This driver is meant to be a common SEV guest interface driver and to
827  * support any SEV guest API. As such, even though it has been introduced
828  * with the SEV-SNP support, it is named "sev-guest".
829  */
830 static struct platform_driver sev_guest_driver = {
831 	.remove		= __exit_p(sev_guest_remove),
832 	.driver		= {
833 		.name = "sev-guest",
834 	},
835 };
836 
837 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
838 
839 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
840 MODULE_LICENSE("GPL");
841 MODULE_VERSION("1.0.0");
842 MODULE_DESCRIPTION("AMD SEV Guest Driver");
843 MODULE_ALIAS("platform:sev-guest");
844