1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Memory Encryption Support 4 * 5 * Copyright (C) 2016 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #define DISABLE_BRANCH_PROFILING 11 12 #include <linux/linkage.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/dma-direct.h> 16 #include <linux/swiotlb.h> 17 #include <linux/mem_encrypt.h> 18 #include <linux/device.h> 19 #include <linux/kernel.h> 20 #include <linux/bitops.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/virtio_config.h> 23 #include <linux/cc_platform.h> 24 25 #include <asm/tlbflush.h> 26 #include <asm/fixmap.h> 27 #include <asm/setup.h> 28 #include <asm/bootparam.h> 29 #include <asm/set_memory.h> 30 #include <asm/cacheflush.h> 31 #include <asm/processor-flags.h> 32 #include <asm/msr.h> 33 #include <asm/cmdline.h> 34 #include <asm/sev.h> 35 36 #include "mm_internal.h" 37 38 /* 39 * Since SME related variables are set early in the boot process they must 40 * reside in the .data section so as not to be zeroed out when the .bss 41 * section is later cleared. 42 */ 43 u64 sme_me_mask __section(".data") = 0; 44 u64 sev_status __section(".data") = 0; 45 u64 sev_check_data __section(".data") = 0; 46 EXPORT_SYMBOL(sme_me_mask); 47 48 /* Buffer used for early in-place encryption by BSP, no locking needed */ 49 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); 50 51 /* 52 * SNP-specific routine which needs to additionally change the page state from 53 * private to shared before copying the data from the source to destination and 54 * restore after the copy. 55 */ 56 static inline void __init snp_memcpy(void *dst, void *src, size_t sz, 57 unsigned long paddr, bool decrypt) 58 { 59 unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; 60 61 if (decrypt) { 62 /* 63 * @paddr needs to be accessed decrypted, mark the page shared in 64 * the RMP table before copying it. 65 */ 66 early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages); 67 68 memcpy(dst, src, sz); 69 70 /* Restore the page state after the memcpy. */ 71 early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages); 72 } else { 73 /* 74 * @paddr need to be accessed encrypted, no need for the page state 75 * change. 76 */ 77 memcpy(dst, src, sz); 78 } 79 } 80 81 /* 82 * This routine does not change the underlying encryption setting of the 83 * page(s) that map this memory. It assumes that eventually the memory is 84 * meant to be accessed as either encrypted or decrypted but the contents 85 * are currently not in the desired state. 86 * 87 * This routine follows the steps outlined in the AMD64 Architecture 88 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. 89 */ 90 static void __init __sme_early_enc_dec(resource_size_t paddr, 91 unsigned long size, bool enc) 92 { 93 void *src, *dst; 94 size_t len; 95 96 if (!sme_me_mask) 97 return; 98 99 wbinvd(); 100 101 /* 102 * There are limited number of early mapping slots, so map (at most) 103 * one page at time. 104 */ 105 while (size) { 106 len = min_t(size_t, sizeof(sme_early_buffer), size); 107 108 /* 109 * Create mappings for the current and desired format of 110 * the memory. Use a write-protected mapping for the source. 111 */ 112 src = enc ? early_memremap_decrypted_wp(paddr, len) : 113 early_memremap_encrypted_wp(paddr, len); 114 115 dst = enc ? early_memremap_encrypted(paddr, len) : 116 early_memremap_decrypted(paddr, len); 117 118 /* 119 * If a mapping can't be obtained to perform the operation, 120 * then eventual access of that area in the desired mode 121 * will cause a crash. 122 */ 123 BUG_ON(!src || !dst); 124 125 /* 126 * Use a temporary buffer, of cache-line multiple size, to 127 * avoid data corruption as documented in the APM. 128 */ 129 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { 130 snp_memcpy(sme_early_buffer, src, len, paddr, enc); 131 snp_memcpy(dst, sme_early_buffer, len, paddr, !enc); 132 } else { 133 memcpy(sme_early_buffer, src, len); 134 memcpy(dst, sme_early_buffer, len); 135 } 136 137 early_memunmap(dst, len); 138 early_memunmap(src, len); 139 140 paddr += len; 141 size -= len; 142 } 143 } 144 145 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) 146 { 147 __sme_early_enc_dec(paddr, size, true); 148 } 149 150 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) 151 { 152 __sme_early_enc_dec(paddr, size, false); 153 } 154 155 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, 156 bool map) 157 { 158 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; 159 pmdval_t pmd_flags, pmd; 160 161 /* Use early_pmd_flags but remove the encryption mask */ 162 pmd_flags = __sme_clr(early_pmd_flags); 163 164 do { 165 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 166 __early_make_pgtable((unsigned long)vaddr, pmd); 167 168 vaddr += PMD_SIZE; 169 paddr += PMD_SIZE; 170 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; 171 } while (size); 172 173 flush_tlb_local(); 174 } 175 176 void __init sme_unmap_bootdata(char *real_mode_data) 177 { 178 struct boot_params *boot_data; 179 unsigned long cmdline_paddr; 180 181 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 182 return; 183 184 /* Get the command line address before unmapping the real_mode_data */ 185 boot_data = (struct boot_params *)real_mode_data; 186 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 187 188 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); 189 190 if (!cmdline_paddr) 191 return; 192 193 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); 194 } 195 196 void __init sme_map_bootdata(char *real_mode_data) 197 { 198 struct boot_params *boot_data; 199 unsigned long cmdline_paddr; 200 201 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 202 return; 203 204 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); 205 206 /* Get the command line address after mapping the real_mode_data */ 207 boot_data = (struct boot_params *)real_mode_data; 208 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 209 210 if (!cmdline_paddr) 211 return; 212 213 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); 214 } 215 216 void __init sev_setup_arch(void) 217 { 218 phys_addr_t total_mem = memblock_phys_mem_size(); 219 unsigned long size; 220 221 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 222 return; 223 224 /* 225 * For SEV, all DMA has to occur via shared/unencrypted pages. 226 * SEV uses SWIOTLB to make this happen without changing device 227 * drivers. However, depending on the workload being run, the 228 * default 64MB of SWIOTLB may not be enough and SWIOTLB may 229 * run out of buffers for DMA, resulting in I/O errors and/or 230 * performance degradation especially with high I/O workloads. 231 * 232 * Adjust the default size of SWIOTLB for SEV guests using 233 * a percentage of guest memory for SWIOTLB buffers. 234 * Also, as the SWIOTLB bounce buffer memory is allocated 235 * from low memory, ensure that the adjusted size is within 236 * the limits of low available memory. 237 * 238 * The percentage of guest memory used here for SWIOTLB buffers 239 * is more of an approximation of the static adjustment which 240 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% 241 */ 242 size = total_mem * 6 / 100; 243 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); 244 swiotlb_adjust_size(size); 245 } 246 247 static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) 248 { 249 unsigned long pfn = 0; 250 pgprot_t prot; 251 252 switch (level) { 253 case PG_LEVEL_4K: 254 pfn = pte_pfn(*kpte); 255 prot = pte_pgprot(*kpte); 256 break; 257 case PG_LEVEL_2M: 258 pfn = pmd_pfn(*(pmd_t *)kpte); 259 prot = pmd_pgprot(*(pmd_t *)kpte); 260 break; 261 case PG_LEVEL_1G: 262 pfn = pud_pfn(*(pud_t *)kpte); 263 prot = pud_pgprot(*(pud_t *)kpte); 264 break; 265 default: 266 WARN_ONCE(1, "Invalid level for kpte\n"); 267 return 0; 268 } 269 270 if (ret_prot) 271 *ret_prot = prot; 272 273 return pfn; 274 } 275 276 static bool amd_enc_tlb_flush_required(bool enc) 277 { 278 return true; 279 } 280 281 static bool amd_enc_cache_flush_required(void) 282 { 283 return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT); 284 } 285 286 static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) 287 { 288 #ifdef CONFIG_PARAVIRT 289 unsigned long sz = npages << PAGE_SHIFT; 290 unsigned long vaddr_end = vaddr + sz; 291 292 while (vaddr < vaddr_end) { 293 int psize, pmask, level; 294 unsigned long pfn; 295 pte_t *kpte; 296 297 kpte = lookup_address(vaddr, &level); 298 if (!kpte || pte_none(*kpte)) { 299 WARN_ONCE(1, "kpte lookup for vaddr\n"); 300 return; 301 } 302 303 pfn = pg_level_to_pfn(level, kpte, NULL); 304 if (!pfn) 305 continue; 306 307 psize = page_level_size(level); 308 pmask = page_level_mask(level); 309 310 notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); 311 312 vaddr = (vaddr & pmask) + psize; 313 } 314 #endif 315 } 316 317 static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) 318 { 319 /* 320 * To maintain the security guarantees of SEV-SNP guests, make sure 321 * to invalidate the memory before encryption attribute is cleared. 322 */ 323 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) 324 snp_set_memory_shared(vaddr, npages); 325 } 326 327 /* Return true unconditionally: return value doesn't matter for the SEV side */ 328 static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) 329 { 330 /* 331 * After memory is mapped encrypted in the page table, validate it 332 * so that it is consistent with the page table updates. 333 */ 334 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc) 335 snp_set_memory_private(vaddr, npages); 336 337 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 338 enc_dec_hypercall(vaddr, npages, enc); 339 340 return true; 341 } 342 343 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 344 { 345 pgprot_t old_prot, new_prot; 346 unsigned long pfn, pa, size; 347 pte_t new_pte; 348 349 pfn = pg_level_to_pfn(level, kpte, &old_prot); 350 if (!pfn) 351 return; 352 353 new_prot = old_prot; 354 if (enc) 355 pgprot_val(new_prot) |= _PAGE_ENC; 356 else 357 pgprot_val(new_prot) &= ~_PAGE_ENC; 358 359 /* If prot is same then do nothing. */ 360 if (pgprot_val(old_prot) == pgprot_val(new_prot)) 361 return; 362 363 pa = pfn << PAGE_SHIFT; 364 size = page_level_size(level); 365 366 /* 367 * We are going to perform in-place en-/decryption and change the 368 * physical page attribute from C=1 to C=0 or vice versa. Flush the 369 * caches to ensure that data gets accessed with the correct C-bit. 370 */ 371 clflush_cache_range(__va(pa), size); 372 373 /* Encrypt/decrypt the contents in-place */ 374 if (enc) { 375 sme_early_encrypt(pa, size); 376 } else { 377 sme_early_decrypt(pa, size); 378 379 /* 380 * ON SNP, the page state in the RMP table must happen 381 * before the page table updates. 382 */ 383 early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); 384 } 385 386 /* Change the page encryption mask. */ 387 new_pte = pfn_pte(pfn, new_prot); 388 set_pte_atomic(kpte, new_pte); 389 390 /* 391 * If page is set encrypted in the page table, then update the RMP table to 392 * add this page as private. 393 */ 394 if (enc) 395 early_snp_set_memory_private((unsigned long)__va(pa), pa, 1); 396 } 397 398 static int __init early_set_memory_enc_dec(unsigned long vaddr, 399 unsigned long size, bool enc) 400 { 401 unsigned long vaddr_end, vaddr_next, start; 402 unsigned long psize, pmask; 403 int split_page_size_mask; 404 int level, ret; 405 pte_t *kpte; 406 407 start = vaddr; 408 vaddr_next = vaddr; 409 vaddr_end = vaddr + size; 410 411 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 412 kpte = lookup_address(vaddr, &level); 413 if (!kpte || pte_none(*kpte)) { 414 ret = 1; 415 goto out; 416 } 417 418 if (level == PG_LEVEL_4K) { 419 __set_clr_pte_enc(kpte, level, enc); 420 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; 421 continue; 422 } 423 424 psize = page_level_size(level); 425 pmask = page_level_mask(level); 426 427 /* 428 * Check whether we can change the large page in one go. 429 * We request a split when the address is not aligned and 430 * the number of pages to set/clear encryption bit is smaller 431 * than the number of pages in the large page. 432 */ 433 if (vaddr == (vaddr & pmask) && 434 ((vaddr_end - vaddr) >= psize)) { 435 __set_clr_pte_enc(kpte, level, enc); 436 vaddr_next = (vaddr & pmask) + psize; 437 continue; 438 } 439 440 /* 441 * The virtual address is part of a larger page, create the next 442 * level page table mapping (4K or 2M). If it is part of a 2M 443 * page then we request a split of the large page into 4K 444 * chunks. A 1GB large page is split into 2M pages, resp. 445 */ 446 if (level == PG_LEVEL_2M) 447 split_page_size_mask = 0; 448 else 449 split_page_size_mask = 1 << PG_LEVEL_2M; 450 451 /* 452 * kernel_physical_mapping_change() does not flush the TLBs, so 453 * a TLB flush is required after we exit from the for loop. 454 */ 455 kernel_physical_mapping_change(__pa(vaddr & pmask), 456 __pa((vaddr_end & pmask) + psize), 457 split_page_size_mask); 458 } 459 460 ret = 0; 461 462 early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); 463 out: 464 __flush_tlb_all(); 465 return ret; 466 } 467 468 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) 469 { 470 return early_set_memory_enc_dec(vaddr, size, false); 471 } 472 473 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) 474 { 475 return early_set_memory_enc_dec(vaddr, size, true); 476 } 477 478 void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) 479 { 480 enc_dec_hypercall(vaddr, npages, enc); 481 } 482 483 void __init sme_early_init(void) 484 { 485 unsigned int i; 486 487 if (!sme_me_mask) 488 return; 489 490 early_pmd_flags = __sme_set(early_pmd_flags); 491 492 __supported_pte_mask = __sme_set(__supported_pte_mask); 493 494 /* Update the protection map with memory encryption mask */ 495 for (i = 0; i < ARRAY_SIZE(protection_map); i++) 496 protection_map[i] = pgprot_encrypted(protection_map[i]); 497 498 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 499 swiotlb_force = SWIOTLB_FORCE; 500 501 x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; 502 x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; 503 x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; 504 x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; 505 } 506 507 void __init mem_encrypt_free_decrypted_mem(void) 508 { 509 unsigned long vaddr, vaddr_end, npages; 510 int r; 511 512 vaddr = (unsigned long)__start_bss_decrypted_unused; 513 vaddr_end = (unsigned long)__end_bss_decrypted; 514 npages = (vaddr_end - vaddr) >> PAGE_SHIFT; 515 516 /* 517 * The unused memory range was mapped decrypted, change the encryption 518 * attribute from decrypted to encrypted before freeing it. 519 */ 520 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 521 r = set_memory_encrypted(vaddr, npages); 522 if (r) { 523 pr_warn("failed to free unused decrypted pages\n"); 524 return; 525 } 526 } 527 528 free_init_pages("unused decrypted", vaddr, vaddr_end); 529 } 530