1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Memory Encryption Support 4 * 5 * Copyright (C) 2016 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #define DISABLE_BRANCH_PROFILING 11 12 #include <linux/linkage.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/dma-direct.h> 16 #include <linux/swiotlb.h> 17 #include <linux/mem_encrypt.h> 18 #include <linux/device.h> 19 #include <linux/kernel.h> 20 #include <linux/bitops.h> 21 #include <linux/dma-mapping.h> 22 23 #include <asm/tlbflush.h> 24 #include <asm/fixmap.h> 25 #include <asm/setup.h> 26 #include <asm/bootparam.h> 27 #include <asm/set_memory.h> 28 #include <asm/cacheflush.h> 29 #include <asm/processor-flags.h> 30 #include <asm/msr.h> 31 #include <asm/cmdline.h> 32 33 #include "mm_internal.h" 34 35 /* 36 * Since SME related variables are set early in the boot process they must 37 * reside in the .data section so as not to be zeroed out when the .bss 38 * section is later cleared. 39 */ 40 u64 sme_me_mask __section(".data") = 0; 41 u64 sev_status __section(".data") = 0; 42 EXPORT_SYMBOL(sme_me_mask); 43 DEFINE_STATIC_KEY_FALSE(sev_enable_key); 44 EXPORT_SYMBOL_GPL(sev_enable_key); 45 46 bool sev_enabled __section(".data"); 47 48 /* Buffer used for early in-place encryption by BSP, no locking needed */ 49 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); 50 51 /* 52 * This routine does not change the underlying encryption setting of the 53 * page(s) that map this memory. It assumes that eventually the memory is 54 * meant to be accessed as either encrypted or decrypted but the contents 55 * are currently not in the desired state. 56 * 57 * This routine follows the steps outlined in the AMD64 Architecture 58 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. 59 */ 60 static void __init __sme_early_enc_dec(resource_size_t paddr, 61 unsigned long size, bool enc) 62 { 63 void *src, *dst; 64 size_t len; 65 66 if (!sme_me_mask) 67 return; 68 69 wbinvd(); 70 71 /* 72 * There are limited number of early mapping slots, so map (at most) 73 * one page at time. 74 */ 75 while (size) { 76 len = min_t(size_t, sizeof(sme_early_buffer), size); 77 78 /* 79 * Create mappings for the current and desired format of 80 * the memory. Use a write-protected mapping for the source. 81 */ 82 src = enc ? early_memremap_decrypted_wp(paddr, len) : 83 early_memremap_encrypted_wp(paddr, len); 84 85 dst = enc ? early_memremap_encrypted(paddr, len) : 86 early_memremap_decrypted(paddr, len); 87 88 /* 89 * If a mapping can't be obtained to perform the operation, 90 * then eventual access of that area in the desired mode 91 * will cause a crash. 92 */ 93 BUG_ON(!src || !dst); 94 95 /* 96 * Use a temporary buffer, of cache-line multiple size, to 97 * avoid data corruption as documented in the APM. 98 */ 99 memcpy(sme_early_buffer, src, len); 100 memcpy(dst, sme_early_buffer, len); 101 102 early_memunmap(dst, len); 103 early_memunmap(src, len); 104 105 paddr += len; 106 size -= len; 107 } 108 } 109 110 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) 111 { 112 __sme_early_enc_dec(paddr, size, true); 113 } 114 115 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) 116 { 117 __sme_early_enc_dec(paddr, size, false); 118 } 119 120 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, 121 bool map) 122 { 123 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; 124 pmdval_t pmd_flags, pmd; 125 126 /* Use early_pmd_flags but remove the encryption mask */ 127 pmd_flags = __sme_clr(early_pmd_flags); 128 129 do { 130 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 131 __early_make_pgtable((unsigned long)vaddr, pmd); 132 133 vaddr += PMD_SIZE; 134 paddr += PMD_SIZE; 135 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; 136 } while (size); 137 138 flush_tlb_local(); 139 } 140 141 void __init sme_unmap_bootdata(char *real_mode_data) 142 { 143 struct boot_params *boot_data; 144 unsigned long cmdline_paddr; 145 146 if (!sme_active()) 147 return; 148 149 /* Get the command line address before unmapping the real_mode_data */ 150 boot_data = (struct boot_params *)real_mode_data; 151 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 152 153 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); 154 155 if (!cmdline_paddr) 156 return; 157 158 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); 159 } 160 161 void __init sme_map_bootdata(char *real_mode_data) 162 { 163 struct boot_params *boot_data; 164 unsigned long cmdline_paddr; 165 166 if (!sme_active()) 167 return; 168 169 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); 170 171 /* Get the command line address after mapping the real_mode_data */ 172 boot_data = (struct boot_params *)real_mode_data; 173 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 174 175 if (!cmdline_paddr) 176 return; 177 178 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); 179 } 180 181 void __init sme_early_init(void) 182 { 183 unsigned int i; 184 185 if (!sme_me_mask) 186 return; 187 188 early_pmd_flags = __sme_set(early_pmd_flags); 189 190 __supported_pte_mask = __sme_set(__supported_pte_mask); 191 192 /* Update the protection map with memory encryption mask */ 193 for (i = 0; i < ARRAY_SIZE(protection_map); i++) 194 protection_map[i] = pgprot_encrypted(protection_map[i]); 195 196 if (sev_active()) 197 swiotlb_force = SWIOTLB_FORCE; 198 } 199 200 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 201 { 202 pgprot_t old_prot, new_prot; 203 unsigned long pfn, pa, size; 204 pte_t new_pte; 205 206 switch (level) { 207 case PG_LEVEL_4K: 208 pfn = pte_pfn(*kpte); 209 old_prot = pte_pgprot(*kpte); 210 break; 211 case PG_LEVEL_2M: 212 pfn = pmd_pfn(*(pmd_t *)kpte); 213 old_prot = pmd_pgprot(*(pmd_t *)kpte); 214 break; 215 case PG_LEVEL_1G: 216 pfn = pud_pfn(*(pud_t *)kpte); 217 old_prot = pud_pgprot(*(pud_t *)kpte); 218 break; 219 default: 220 return; 221 } 222 223 new_prot = old_prot; 224 if (enc) 225 pgprot_val(new_prot) |= _PAGE_ENC; 226 else 227 pgprot_val(new_prot) &= ~_PAGE_ENC; 228 229 /* If prot is same then do nothing. */ 230 if (pgprot_val(old_prot) == pgprot_val(new_prot)) 231 return; 232 233 pa = pfn << page_level_shift(level); 234 size = page_level_size(level); 235 236 /* 237 * We are going to perform in-place en-/decryption and change the 238 * physical page attribute from C=1 to C=0 or vice versa. Flush the 239 * caches to ensure that data gets accessed with the correct C-bit. 240 */ 241 clflush_cache_range(__va(pa), size); 242 243 /* Encrypt/decrypt the contents in-place */ 244 if (enc) 245 sme_early_encrypt(pa, size); 246 else 247 sme_early_decrypt(pa, size); 248 249 /* Change the page encryption mask. */ 250 new_pte = pfn_pte(pfn, new_prot); 251 set_pte_atomic(kpte, new_pte); 252 } 253 254 static int __init early_set_memory_enc_dec(unsigned long vaddr, 255 unsigned long size, bool enc) 256 { 257 unsigned long vaddr_end, vaddr_next; 258 unsigned long psize, pmask; 259 int split_page_size_mask; 260 int level, ret; 261 pte_t *kpte; 262 263 vaddr_next = vaddr; 264 vaddr_end = vaddr + size; 265 266 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 267 kpte = lookup_address(vaddr, &level); 268 if (!kpte || pte_none(*kpte)) { 269 ret = 1; 270 goto out; 271 } 272 273 if (level == PG_LEVEL_4K) { 274 __set_clr_pte_enc(kpte, level, enc); 275 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; 276 continue; 277 } 278 279 psize = page_level_size(level); 280 pmask = page_level_mask(level); 281 282 /* 283 * Check whether we can change the large page in one go. 284 * We request a split when the address is not aligned and 285 * the number of pages to set/clear encryption bit is smaller 286 * than the number of pages in the large page. 287 */ 288 if (vaddr == (vaddr & pmask) && 289 ((vaddr_end - vaddr) >= psize)) { 290 __set_clr_pte_enc(kpte, level, enc); 291 vaddr_next = (vaddr & pmask) + psize; 292 continue; 293 } 294 295 /* 296 * The virtual address is part of a larger page, create the next 297 * level page table mapping (4K or 2M). If it is part of a 2M 298 * page then we request a split of the large page into 4K 299 * chunks. A 1GB large page is split into 2M pages, resp. 300 */ 301 if (level == PG_LEVEL_2M) 302 split_page_size_mask = 0; 303 else 304 split_page_size_mask = 1 << PG_LEVEL_2M; 305 306 /* 307 * kernel_physical_mapping_change() does not flush the TLBs, so 308 * a TLB flush is required after we exit from the for loop. 309 */ 310 kernel_physical_mapping_change(__pa(vaddr & pmask), 311 __pa((vaddr_end & pmask) + psize), 312 split_page_size_mask); 313 } 314 315 ret = 0; 316 317 out: 318 __flush_tlb_all(); 319 return ret; 320 } 321 322 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) 323 { 324 return early_set_memory_enc_dec(vaddr, size, false); 325 } 326 327 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) 328 { 329 return early_set_memory_enc_dec(vaddr, size, true); 330 } 331 332 /* 333 * SME and SEV are very similar but they are not the same, so there are 334 * times that the kernel will need to distinguish between SME and SEV. The 335 * sme_active() and sev_active() functions are used for this. When a 336 * distinction isn't needed, the mem_encrypt_active() function can be used. 337 * 338 * The trampoline code is a good example for this requirement. Before 339 * paging is activated, SME will access all memory as decrypted, but SEV 340 * will access all memory as encrypted. So, when APs are being brought 341 * up under SME the trampoline area cannot be encrypted, whereas under SEV 342 * the trampoline area must be encrypted. 343 */ 344 bool sme_active(void) 345 { 346 return sme_me_mask && !sev_enabled; 347 } 348 349 bool sev_active(void) 350 { 351 return sev_status & MSR_AMD64_SEV_ENABLED; 352 } 353 354 /* Needs to be called from non-instrumentable code */ 355 bool noinstr sev_es_active(void) 356 { 357 return sev_status & MSR_AMD64_SEV_ES_ENABLED; 358 } 359 360 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 361 bool force_dma_unencrypted(struct device *dev) 362 { 363 /* 364 * For SEV, all DMA must be to unencrypted addresses. 365 */ 366 if (sev_active()) 367 return true; 368 369 /* 370 * For SME, all DMA must be to unencrypted addresses if the 371 * device does not support DMA to addresses that include the 372 * encryption mask. 373 */ 374 if (sme_active()) { 375 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); 376 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, 377 dev->bus_dma_limit); 378 379 if (dma_dev_mask <= dma_enc_mask) 380 return true; 381 } 382 383 return false; 384 } 385 386 void __init mem_encrypt_free_decrypted_mem(void) 387 { 388 unsigned long vaddr, vaddr_end, npages; 389 int r; 390 391 vaddr = (unsigned long)__start_bss_decrypted_unused; 392 vaddr_end = (unsigned long)__end_bss_decrypted; 393 npages = (vaddr_end - vaddr) >> PAGE_SHIFT; 394 395 /* 396 * The unused memory range was mapped decrypted, change the encryption 397 * attribute from decrypted to encrypted before freeing it. 398 */ 399 if (mem_encrypt_active()) { 400 r = set_memory_encrypted(vaddr, npages); 401 if (r) { 402 pr_warn("failed to free unused decrypted pages\n"); 403 return; 404 } 405 } 406 407 free_init_pages("unused decrypted", vaddr, vaddr_end); 408 } 409 410 static void print_mem_encrypt_feature_info(void) 411 { 412 pr_info("AMD Memory Encryption Features active:"); 413 414 /* Secure Memory Encryption */ 415 if (sme_active()) { 416 /* 417 * SME is mutually exclusive with any of the SEV 418 * features below. 419 */ 420 pr_cont(" SME\n"); 421 return; 422 } 423 424 /* Secure Encrypted Virtualization */ 425 if (sev_active()) 426 pr_cont(" SEV"); 427 428 /* Encrypted Register State */ 429 if (sev_es_active()) 430 pr_cont(" SEV-ES"); 431 432 pr_cont("\n"); 433 } 434 435 /* Architecture __weak replacement functions */ 436 void __init mem_encrypt_init(void) 437 { 438 if (!sme_me_mask) 439 return; 440 441 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ 442 swiotlb_update_mem_attributes(); 443 444 /* 445 * With SEV, we need to unroll the rep string I/O instructions. 446 */ 447 if (sev_active()) 448 static_branch_enable(&sev_enable_key); 449 450 print_mem_encrypt_feature_info(); 451 } 452 453