xref: /openbmc/linux/arch/x86/mm/mem_encrypt.c (revision f21e49be)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Memory Encryption Support
4  *
5  * Copyright (C) 2016 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9 
10 #define DISABLE_BRANCH_PROFILING
11 
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/dma-direct.h>
16 #include <linux/swiotlb.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/virtio_config.h>
23 #include <linux/cc_platform.h>
24 
25 #include <asm/tlbflush.h>
26 #include <asm/fixmap.h>
27 #include <asm/setup.h>
28 #include <asm/bootparam.h>
29 #include <asm/set_memory.h>
30 #include <asm/cacheflush.h>
31 #include <asm/processor-flags.h>
32 #include <asm/msr.h>
33 #include <asm/cmdline.h>
34 
35 #include "mm_internal.h"
36 
37 /*
38  * Since SME related variables are set early in the boot process they must
39  * reside in the .data section so as not to be zeroed out when the .bss
40  * section is later cleared.
41  */
42 u64 sme_me_mask __section(".data") = 0;
43 u64 sev_status __section(".data") = 0;
44 u64 sev_check_data __section(".data") = 0;
45 EXPORT_SYMBOL(sme_me_mask);
46 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
47 EXPORT_SYMBOL_GPL(sev_enable_key);
48 
49 /* Buffer used for early in-place encryption by BSP, no locking needed */
50 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
51 
52 /*
53  * This routine does not change the underlying encryption setting of the
54  * page(s) that map this memory. It assumes that eventually the memory is
55  * meant to be accessed as either encrypted or decrypted but the contents
56  * are currently not in the desired state.
57  *
58  * This routine follows the steps outlined in the AMD64 Architecture
59  * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
60  */
61 static void __init __sme_early_enc_dec(resource_size_t paddr,
62 				       unsigned long size, bool enc)
63 {
64 	void *src, *dst;
65 	size_t len;
66 
67 	if (!sme_me_mask)
68 		return;
69 
70 	wbinvd();
71 
72 	/*
73 	 * There are limited number of early mapping slots, so map (at most)
74 	 * one page at time.
75 	 */
76 	while (size) {
77 		len = min_t(size_t, sizeof(sme_early_buffer), size);
78 
79 		/*
80 		 * Create mappings for the current and desired format of
81 		 * the memory. Use a write-protected mapping for the source.
82 		 */
83 		src = enc ? early_memremap_decrypted_wp(paddr, len) :
84 			    early_memremap_encrypted_wp(paddr, len);
85 
86 		dst = enc ? early_memremap_encrypted(paddr, len) :
87 			    early_memremap_decrypted(paddr, len);
88 
89 		/*
90 		 * If a mapping can't be obtained to perform the operation,
91 		 * then eventual access of that area in the desired mode
92 		 * will cause a crash.
93 		 */
94 		BUG_ON(!src || !dst);
95 
96 		/*
97 		 * Use a temporary buffer, of cache-line multiple size, to
98 		 * avoid data corruption as documented in the APM.
99 		 */
100 		memcpy(sme_early_buffer, src, len);
101 		memcpy(dst, sme_early_buffer, len);
102 
103 		early_memunmap(dst, len);
104 		early_memunmap(src, len);
105 
106 		paddr += len;
107 		size -= len;
108 	}
109 }
110 
111 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
112 {
113 	__sme_early_enc_dec(paddr, size, true);
114 }
115 
116 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
117 {
118 	__sme_early_enc_dec(paddr, size, false);
119 }
120 
121 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
122 					     bool map)
123 {
124 	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
125 	pmdval_t pmd_flags, pmd;
126 
127 	/* Use early_pmd_flags but remove the encryption mask */
128 	pmd_flags = __sme_clr(early_pmd_flags);
129 
130 	do {
131 		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
132 		__early_make_pgtable((unsigned long)vaddr, pmd);
133 
134 		vaddr += PMD_SIZE;
135 		paddr += PMD_SIZE;
136 		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
137 	} while (size);
138 
139 	flush_tlb_local();
140 }
141 
142 void __init sme_unmap_bootdata(char *real_mode_data)
143 {
144 	struct boot_params *boot_data;
145 	unsigned long cmdline_paddr;
146 
147 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
148 		return;
149 
150 	/* Get the command line address before unmapping the real_mode_data */
151 	boot_data = (struct boot_params *)real_mode_data;
152 	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
153 
154 	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
155 
156 	if (!cmdline_paddr)
157 		return;
158 
159 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
160 }
161 
162 void __init sme_map_bootdata(char *real_mode_data)
163 {
164 	struct boot_params *boot_data;
165 	unsigned long cmdline_paddr;
166 
167 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
168 		return;
169 
170 	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
171 
172 	/* Get the command line address after mapping the real_mode_data */
173 	boot_data = (struct boot_params *)real_mode_data;
174 	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
175 
176 	if (!cmdline_paddr)
177 		return;
178 
179 	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
180 }
181 
182 void __init sme_early_init(void)
183 {
184 	unsigned int i;
185 
186 	if (!sme_me_mask)
187 		return;
188 
189 	early_pmd_flags = __sme_set(early_pmd_flags);
190 
191 	__supported_pte_mask = __sme_set(__supported_pte_mask);
192 
193 	/* Update the protection map with memory encryption mask */
194 	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
195 		protection_map[i] = pgprot_encrypted(protection_map[i]);
196 
197 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
198 		swiotlb_force = SWIOTLB_FORCE;
199 }
200 
201 void __init sev_setup_arch(void)
202 {
203 	phys_addr_t total_mem = memblock_phys_mem_size();
204 	unsigned long size;
205 
206 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
207 		return;
208 
209 	/*
210 	 * For SEV, all DMA has to occur via shared/unencrypted pages.
211 	 * SEV uses SWIOTLB to make this happen without changing device
212 	 * drivers. However, depending on the workload being run, the
213 	 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
214 	 * run out of buffers for DMA, resulting in I/O errors and/or
215 	 * performance degradation especially with high I/O workloads.
216 	 *
217 	 * Adjust the default size of SWIOTLB for SEV guests using
218 	 * a percentage of guest memory for SWIOTLB buffers.
219 	 * Also, as the SWIOTLB bounce buffer memory is allocated
220 	 * from low memory, ensure that the adjusted size is within
221 	 * the limits of low available memory.
222 	 *
223 	 * The percentage of guest memory used here for SWIOTLB buffers
224 	 * is more of an approximation of the static adjustment which
225 	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
226 	 */
227 	size = total_mem * 6 / 100;
228 	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
229 	swiotlb_adjust_size(size);
230 }
231 
232 static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
233 {
234 	unsigned long pfn = 0;
235 	pgprot_t prot;
236 
237 	switch (level) {
238 	case PG_LEVEL_4K:
239 		pfn = pte_pfn(*kpte);
240 		prot = pte_pgprot(*kpte);
241 		break;
242 	case PG_LEVEL_2M:
243 		pfn = pmd_pfn(*(pmd_t *)kpte);
244 		prot = pmd_pgprot(*(pmd_t *)kpte);
245 		break;
246 	case PG_LEVEL_1G:
247 		pfn = pud_pfn(*(pud_t *)kpte);
248 		prot = pud_pgprot(*(pud_t *)kpte);
249 		break;
250 	default:
251 		WARN_ONCE(1, "Invalid level for kpte\n");
252 		return 0;
253 	}
254 
255 	if (ret_prot)
256 		*ret_prot = prot;
257 
258 	return pfn;
259 }
260 
261 void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
262 {
263 #ifdef CONFIG_PARAVIRT
264 	unsigned long sz = npages << PAGE_SHIFT;
265 	unsigned long vaddr_end = vaddr + sz;
266 
267 	while (vaddr < vaddr_end) {
268 		int psize, pmask, level;
269 		unsigned long pfn;
270 		pte_t *kpte;
271 
272 		kpte = lookup_address(vaddr, &level);
273 		if (!kpte || pte_none(*kpte)) {
274 			WARN_ONCE(1, "kpte lookup for vaddr\n");
275 			return;
276 		}
277 
278 		pfn = pg_level_to_pfn(level, kpte, NULL);
279 		if (!pfn)
280 			continue;
281 
282 		psize = page_level_size(level);
283 		pmask = page_level_mask(level);
284 
285 		notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
286 
287 		vaddr = (vaddr & pmask) + psize;
288 	}
289 #endif
290 }
291 
292 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
293 {
294 	pgprot_t old_prot, new_prot;
295 	unsigned long pfn, pa, size;
296 	pte_t new_pte;
297 
298 	pfn = pg_level_to_pfn(level, kpte, &old_prot);
299 	if (!pfn)
300 		return;
301 
302 	new_prot = old_prot;
303 	if (enc)
304 		pgprot_val(new_prot) |= _PAGE_ENC;
305 	else
306 		pgprot_val(new_prot) &= ~_PAGE_ENC;
307 
308 	/* If prot is same then do nothing. */
309 	if (pgprot_val(old_prot) == pgprot_val(new_prot))
310 		return;
311 
312 	pa = pfn << PAGE_SHIFT;
313 	size = page_level_size(level);
314 
315 	/*
316 	 * We are going to perform in-place en-/decryption and change the
317 	 * physical page attribute from C=1 to C=0 or vice versa. Flush the
318 	 * caches to ensure that data gets accessed with the correct C-bit.
319 	 */
320 	clflush_cache_range(__va(pa), size);
321 
322 	/* Encrypt/decrypt the contents in-place */
323 	if (enc)
324 		sme_early_encrypt(pa, size);
325 	else
326 		sme_early_decrypt(pa, size);
327 
328 	/* Change the page encryption mask. */
329 	new_pte = pfn_pte(pfn, new_prot);
330 	set_pte_atomic(kpte, new_pte);
331 }
332 
333 static int __init early_set_memory_enc_dec(unsigned long vaddr,
334 					   unsigned long size, bool enc)
335 {
336 	unsigned long vaddr_end, vaddr_next, start;
337 	unsigned long psize, pmask;
338 	int split_page_size_mask;
339 	int level, ret;
340 	pte_t *kpte;
341 
342 	start = vaddr;
343 	vaddr_next = vaddr;
344 	vaddr_end = vaddr + size;
345 
346 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
347 		kpte = lookup_address(vaddr, &level);
348 		if (!kpte || pte_none(*kpte)) {
349 			ret = 1;
350 			goto out;
351 		}
352 
353 		if (level == PG_LEVEL_4K) {
354 			__set_clr_pte_enc(kpte, level, enc);
355 			vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
356 			continue;
357 		}
358 
359 		psize = page_level_size(level);
360 		pmask = page_level_mask(level);
361 
362 		/*
363 		 * Check whether we can change the large page in one go.
364 		 * We request a split when the address is not aligned and
365 		 * the number of pages to set/clear encryption bit is smaller
366 		 * than the number of pages in the large page.
367 		 */
368 		if (vaddr == (vaddr & pmask) &&
369 		    ((vaddr_end - vaddr) >= psize)) {
370 			__set_clr_pte_enc(kpte, level, enc);
371 			vaddr_next = (vaddr & pmask) + psize;
372 			continue;
373 		}
374 
375 		/*
376 		 * The virtual address is part of a larger page, create the next
377 		 * level page table mapping (4K or 2M). If it is part of a 2M
378 		 * page then we request a split of the large page into 4K
379 		 * chunks. A 1GB large page is split into 2M pages, resp.
380 		 */
381 		if (level == PG_LEVEL_2M)
382 			split_page_size_mask = 0;
383 		else
384 			split_page_size_mask = 1 << PG_LEVEL_2M;
385 
386 		/*
387 		 * kernel_physical_mapping_change() does not flush the TLBs, so
388 		 * a TLB flush is required after we exit from the for loop.
389 		 */
390 		kernel_physical_mapping_change(__pa(vaddr & pmask),
391 					       __pa((vaddr_end & pmask) + psize),
392 					       split_page_size_mask);
393 	}
394 
395 	ret = 0;
396 
397 	notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
398 out:
399 	__flush_tlb_all();
400 	return ret;
401 }
402 
403 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
404 {
405 	return early_set_memory_enc_dec(vaddr, size, false);
406 }
407 
408 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
409 {
410 	return early_set_memory_enc_dec(vaddr, size, true);
411 }
412 
413 void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
414 {
415 	notify_range_enc_status_changed(vaddr, npages, enc);
416 }
417 
418 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
419 bool force_dma_unencrypted(struct device *dev)
420 {
421 	/*
422 	 * For SEV, all DMA must be to unencrypted addresses.
423 	 */
424 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
425 		return true;
426 
427 	/*
428 	 * For SME, all DMA must be to unencrypted addresses if the
429 	 * device does not support DMA to addresses that include the
430 	 * encryption mask.
431 	 */
432 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
433 		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
434 		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
435 						dev->bus_dma_limit);
436 
437 		if (dma_dev_mask <= dma_enc_mask)
438 			return true;
439 	}
440 
441 	return false;
442 }
443 
444 void __init mem_encrypt_free_decrypted_mem(void)
445 {
446 	unsigned long vaddr, vaddr_end, npages;
447 	int r;
448 
449 	vaddr = (unsigned long)__start_bss_decrypted_unused;
450 	vaddr_end = (unsigned long)__end_bss_decrypted;
451 	npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
452 
453 	/*
454 	 * The unused memory range was mapped decrypted, change the encryption
455 	 * attribute from decrypted to encrypted before freeing it.
456 	 */
457 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
458 		r = set_memory_encrypted(vaddr, npages);
459 		if (r) {
460 			pr_warn("failed to free unused decrypted pages\n");
461 			return;
462 		}
463 	}
464 
465 	free_init_pages("unused decrypted", vaddr, vaddr_end);
466 }
467 
468 static void print_mem_encrypt_feature_info(void)
469 {
470 	pr_info("AMD Memory Encryption Features active:");
471 
472 	/* Secure Memory Encryption */
473 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
474 		/*
475 		 * SME is mutually exclusive with any of the SEV
476 		 * features below.
477 		 */
478 		pr_cont(" SME\n");
479 		return;
480 	}
481 
482 	/* Secure Encrypted Virtualization */
483 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
484 		pr_cont(" SEV");
485 
486 	/* Encrypted Register State */
487 	if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
488 		pr_cont(" SEV-ES");
489 
490 	pr_cont("\n");
491 }
492 
493 /* Architecture __weak replacement functions */
494 void __init mem_encrypt_init(void)
495 {
496 	if (!sme_me_mask)
497 		return;
498 
499 	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
500 	swiotlb_update_mem_attributes();
501 
502 	/*
503 	 * With SEV, we need to unroll the rep string I/O instructions,
504 	 * but SEV-ES supports them through the #VC handler.
505 	 */
506 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
507 	    !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
508 		static_branch_enable(&sev_enable_key);
509 
510 	print_mem_encrypt_feature_info();
511 }
512 
513 int arch_has_restricted_virtio_memory_access(void)
514 {
515 	return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
516 }
517 EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
518