1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Memory Encryption Support 4 * 5 * Copyright (C) 2016 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #define DISABLE_BRANCH_PROFILING 11 12 #include <linux/linkage.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/dma-direct.h> 16 #include <linux/swiotlb.h> 17 #include <linux/mem_encrypt.h> 18 #include <linux/device.h> 19 #include <linux/kernel.h> 20 #include <linux/bitops.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/virtio_config.h> 23 #include <linux/cc_platform.h> 24 25 #include <asm/tlbflush.h> 26 #include <asm/fixmap.h> 27 #include <asm/setup.h> 28 #include <asm/bootparam.h> 29 #include <asm/set_memory.h> 30 #include <asm/cacheflush.h> 31 #include <asm/processor-flags.h> 32 #include <asm/msr.h> 33 #include <asm/cmdline.h> 34 35 #include "mm_internal.h" 36 37 /* 38 * Since SME related variables are set early in the boot process they must 39 * reside in the .data section so as not to be zeroed out when the .bss 40 * section is later cleared. 41 */ 42 u64 sme_me_mask __section(".data") = 0; 43 u64 sev_status __section(".data") = 0; 44 u64 sev_check_data __section(".data") = 0; 45 EXPORT_SYMBOL(sme_me_mask); 46 DEFINE_STATIC_KEY_FALSE(sev_enable_key); 47 EXPORT_SYMBOL_GPL(sev_enable_key); 48 49 /* Buffer used for early in-place encryption by BSP, no locking needed */ 50 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); 51 52 /* 53 * This routine does not change the underlying encryption setting of the 54 * page(s) that map this memory. It assumes that eventually the memory is 55 * meant to be accessed as either encrypted or decrypted but the contents 56 * are currently not in the desired state. 57 * 58 * This routine follows the steps outlined in the AMD64 Architecture 59 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. 60 */ 61 static void __init __sme_early_enc_dec(resource_size_t paddr, 62 unsigned long size, bool enc) 63 { 64 void *src, *dst; 65 size_t len; 66 67 if (!sme_me_mask) 68 return; 69 70 wbinvd(); 71 72 /* 73 * There are limited number of early mapping slots, so map (at most) 74 * one page at time. 75 */ 76 while (size) { 77 len = min_t(size_t, sizeof(sme_early_buffer), size); 78 79 /* 80 * Create mappings for the current and desired format of 81 * the memory. Use a write-protected mapping for the source. 82 */ 83 src = enc ? early_memremap_decrypted_wp(paddr, len) : 84 early_memremap_encrypted_wp(paddr, len); 85 86 dst = enc ? early_memremap_encrypted(paddr, len) : 87 early_memremap_decrypted(paddr, len); 88 89 /* 90 * If a mapping can't be obtained to perform the operation, 91 * then eventual access of that area in the desired mode 92 * will cause a crash. 93 */ 94 BUG_ON(!src || !dst); 95 96 /* 97 * Use a temporary buffer, of cache-line multiple size, to 98 * avoid data corruption as documented in the APM. 99 */ 100 memcpy(sme_early_buffer, src, len); 101 memcpy(dst, sme_early_buffer, len); 102 103 early_memunmap(dst, len); 104 early_memunmap(src, len); 105 106 paddr += len; 107 size -= len; 108 } 109 } 110 111 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) 112 { 113 __sme_early_enc_dec(paddr, size, true); 114 } 115 116 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) 117 { 118 __sme_early_enc_dec(paddr, size, false); 119 } 120 121 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, 122 bool map) 123 { 124 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; 125 pmdval_t pmd_flags, pmd; 126 127 /* Use early_pmd_flags but remove the encryption mask */ 128 pmd_flags = __sme_clr(early_pmd_flags); 129 130 do { 131 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 132 __early_make_pgtable((unsigned long)vaddr, pmd); 133 134 vaddr += PMD_SIZE; 135 paddr += PMD_SIZE; 136 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; 137 } while (size); 138 139 flush_tlb_local(); 140 } 141 142 void __init sme_unmap_bootdata(char *real_mode_data) 143 { 144 struct boot_params *boot_data; 145 unsigned long cmdline_paddr; 146 147 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 148 return; 149 150 /* Get the command line address before unmapping the real_mode_data */ 151 boot_data = (struct boot_params *)real_mode_data; 152 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 153 154 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); 155 156 if (!cmdline_paddr) 157 return; 158 159 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); 160 } 161 162 void __init sme_map_bootdata(char *real_mode_data) 163 { 164 struct boot_params *boot_data; 165 unsigned long cmdline_paddr; 166 167 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 168 return; 169 170 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); 171 172 /* Get the command line address after mapping the real_mode_data */ 173 boot_data = (struct boot_params *)real_mode_data; 174 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 175 176 if (!cmdline_paddr) 177 return; 178 179 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); 180 } 181 182 void __init sme_early_init(void) 183 { 184 unsigned int i; 185 186 if (!sme_me_mask) 187 return; 188 189 early_pmd_flags = __sme_set(early_pmd_flags); 190 191 __supported_pte_mask = __sme_set(__supported_pte_mask); 192 193 /* Update the protection map with memory encryption mask */ 194 for (i = 0; i < ARRAY_SIZE(protection_map); i++) 195 protection_map[i] = pgprot_encrypted(protection_map[i]); 196 197 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 198 swiotlb_force = SWIOTLB_FORCE; 199 } 200 201 void __init sev_setup_arch(void) 202 { 203 phys_addr_t total_mem = memblock_phys_mem_size(); 204 unsigned long size; 205 206 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 207 return; 208 209 /* 210 * For SEV, all DMA has to occur via shared/unencrypted pages. 211 * SEV uses SWIOTLB to make this happen without changing device 212 * drivers. However, depending on the workload being run, the 213 * default 64MB of SWIOTLB may not be enough and SWIOTLB may 214 * run out of buffers for DMA, resulting in I/O errors and/or 215 * performance degradation especially with high I/O workloads. 216 * 217 * Adjust the default size of SWIOTLB for SEV guests using 218 * a percentage of guest memory for SWIOTLB buffers. 219 * Also, as the SWIOTLB bounce buffer memory is allocated 220 * from low memory, ensure that the adjusted size is within 221 * the limits of low available memory. 222 * 223 * The percentage of guest memory used here for SWIOTLB buffers 224 * is more of an approximation of the static adjustment which 225 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% 226 */ 227 size = total_mem * 6 / 100; 228 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); 229 swiotlb_adjust_size(size); 230 } 231 232 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 233 { 234 pgprot_t old_prot, new_prot; 235 unsigned long pfn, pa, size; 236 pte_t new_pte; 237 238 switch (level) { 239 case PG_LEVEL_4K: 240 pfn = pte_pfn(*kpte); 241 old_prot = pte_pgprot(*kpte); 242 break; 243 case PG_LEVEL_2M: 244 pfn = pmd_pfn(*(pmd_t *)kpte); 245 old_prot = pmd_pgprot(*(pmd_t *)kpte); 246 break; 247 case PG_LEVEL_1G: 248 pfn = pud_pfn(*(pud_t *)kpte); 249 old_prot = pud_pgprot(*(pud_t *)kpte); 250 break; 251 default: 252 return; 253 } 254 255 new_prot = old_prot; 256 if (enc) 257 pgprot_val(new_prot) |= _PAGE_ENC; 258 else 259 pgprot_val(new_prot) &= ~_PAGE_ENC; 260 261 /* If prot is same then do nothing. */ 262 if (pgprot_val(old_prot) == pgprot_val(new_prot)) 263 return; 264 265 pa = pfn << PAGE_SHIFT; 266 size = page_level_size(level); 267 268 /* 269 * We are going to perform in-place en-/decryption and change the 270 * physical page attribute from C=1 to C=0 or vice versa. Flush the 271 * caches to ensure that data gets accessed with the correct C-bit. 272 */ 273 clflush_cache_range(__va(pa), size); 274 275 /* Encrypt/decrypt the contents in-place */ 276 if (enc) 277 sme_early_encrypt(pa, size); 278 else 279 sme_early_decrypt(pa, size); 280 281 /* Change the page encryption mask. */ 282 new_pte = pfn_pte(pfn, new_prot); 283 set_pte_atomic(kpte, new_pte); 284 } 285 286 static int __init early_set_memory_enc_dec(unsigned long vaddr, 287 unsigned long size, bool enc) 288 { 289 unsigned long vaddr_end, vaddr_next; 290 unsigned long psize, pmask; 291 int split_page_size_mask; 292 int level, ret; 293 pte_t *kpte; 294 295 vaddr_next = vaddr; 296 vaddr_end = vaddr + size; 297 298 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 299 kpte = lookup_address(vaddr, &level); 300 if (!kpte || pte_none(*kpte)) { 301 ret = 1; 302 goto out; 303 } 304 305 if (level == PG_LEVEL_4K) { 306 __set_clr_pte_enc(kpte, level, enc); 307 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; 308 continue; 309 } 310 311 psize = page_level_size(level); 312 pmask = page_level_mask(level); 313 314 /* 315 * Check whether we can change the large page in one go. 316 * We request a split when the address is not aligned and 317 * the number of pages to set/clear encryption bit is smaller 318 * than the number of pages in the large page. 319 */ 320 if (vaddr == (vaddr & pmask) && 321 ((vaddr_end - vaddr) >= psize)) { 322 __set_clr_pte_enc(kpte, level, enc); 323 vaddr_next = (vaddr & pmask) + psize; 324 continue; 325 } 326 327 /* 328 * The virtual address is part of a larger page, create the next 329 * level page table mapping (4K or 2M). If it is part of a 2M 330 * page then we request a split of the large page into 4K 331 * chunks. A 1GB large page is split into 2M pages, resp. 332 */ 333 if (level == PG_LEVEL_2M) 334 split_page_size_mask = 0; 335 else 336 split_page_size_mask = 1 << PG_LEVEL_2M; 337 338 /* 339 * kernel_physical_mapping_change() does not flush the TLBs, so 340 * a TLB flush is required after we exit from the for loop. 341 */ 342 kernel_physical_mapping_change(__pa(vaddr & pmask), 343 __pa((vaddr_end & pmask) + psize), 344 split_page_size_mask); 345 } 346 347 ret = 0; 348 349 out: 350 __flush_tlb_all(); 351 return ret; 352 } 353 354 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) 355 { 356 return early_set_memory_enc_dec(vaddr, size, false); 357 } 358 359 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) 360 { 361 return early_set_memory_enc_dec(vaddr, size, true); 362 } 363 364 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 365 bool force_dma_unencrypted(struct device *dev) 366 { 367 /* 368 * For SEV, all DMA must be to unencrypted addresses. 369 */ 370 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 371 return true; 372 373 /* 374 * For SME, all DMA must be to unencrypted addresses if the 375 * device does not support DMA to addresses that include the 376 * encryption mask. 377 */ 378 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 379 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); 380 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, 381 dev->bus_dma_limit); 382 383 if (dma_dev_mask <= dma_enc_mask) 384 return true; 385 } 386 387 return false; 388 } 389 390 void __init mem_encrypt_free_decrypted_mem(void) 391 { 392 unsigned long vaddr, vaddr_end, npages; 393 int r; 394 395 vaddr = (unsigned long)__start_bss_decrypted_unused; 396 vaddr_end = (unsigned long)__end_bss_decrypted; 397 npages = (vaddr_end - vaddr) >> PAGE_SHIFT; 398 399 /* 400 * The unused memory range was mapped decrypted, change the encryption 401 * attribute from decrypted to encrypted before freeing it. 402 */ 403 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 404 r = set_memory_encrypted(vaddr, npages); 405 if (r) { 406 pr_warn("failed to free unused decrypted pages\n"); 407 return; 408 } 409 } 410 411 free_init_pages("unused decrypted", vaddr, vaddr_end); 412 } 413 414 static void print_mem_encrypt_feature_info(void) 415 { 416 pr_info("AMD Memory Encryption Features active:"); 417 418 /* Secure Memory Encryption */ 419 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 420 /* 421 * SME is mutually exclusive with any of the SEV 422 * features below. 423 */ 424 pr_cont(" SME\n"); 425 return; 426 } 427 428 /* Secure Encrypted Virtualization */ 429 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 430 pr_cont(" SEV"); 431 432 /* Encrypted Register State */ 433 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 434 pr_cont(" SEV-ES"); 435 436 pr_cont("\n"); 437 } 438 439 /* Architecture __weak replacement functions */ 440 void __init mem_encrypt_init(void) 441 { 442 if (!sme_me_mask) 443 return; 444 445 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ 446 swiotlb_update_mem_attributes(); 447 448 /* 449 * With SEV, we need to unroll the rep string I/O instructions, 450 * but SEV-ES supports them through the #VC handler. 451 */ 452 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && 453 !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 454 static_branch_enable(&sev_enable_key); 455 456 print_mem_encrypt_feature_info(); 457 } 458 459 int arch_has_restricted_virtio_memory_access(void) 460 { 461 return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); 462 } 463 EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); 464