1*dbca5e1aSKuppuswamy Sathyanarayanan // SPDX-License-Identifier: GPL-2.0-only 2*dbca5e1aSKuppuswamy Sathyanarayanan /* 3*dbca5e1aSKuppuswamy Sathyanarayanan * AMD Memory Encryption Support 4*dbca5e1aSKuppuswamy Sathyanarayanan * 5*dbca5e1aSKuppuswamy Sathyanarayanan * Copyright (C) 2016 Advanced Micro Devices, Inc. 6*dbca5e1aSKuppuswamy Sathyanarayanan * 7*dbca5e1aSKuppuswamy Sathyanarayanan * Author: Tom Lendacky <thomas.lendacky@amd.com> 8*dbca5e1aSKuppuswamy Sathyanarayanan */ 9*dbca5e1aSKuppuswamy Sathyanarayanan 10*dbca5e1aSKuppuswamy Sathyanarayanan #define DISABLE_BRANCH_PROFILING 11*dbca5e1aSKuppuswamy Sathyanarayanan 12*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/linkage.h> 13*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/init.h> 14*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/mm.h> 15*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/dma-direct.h> 16*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/swiotlb.h> 17*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/mem_encrypt.h> 18*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/device.h> 19*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/kernel.h> 20*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/bitops.h> 21*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/dma-mapping.h> 22*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/virtio_config.h> 23*dbca5e1aSKuppuswamy Sathyanarayanan #include <linux/cc_platform.h> 24*dbca5e1aSKuppuswamy Sathyanarayanan 25*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/tlbflush.h> 26*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/fixmap.h> 27*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/setup.h> 28*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/bootparam.h> 29*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/set_memory.h> 30*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/cacheflush.h> 31*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/processor-flags.h> 32*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/msr.h> 33*dbca5e1aSKuppuswamy Sathyanarayanan #include <asm/cmdline.h> 34*dbca5e1aSKuppuswamy Sathyanarayanan 35*dbca5e1aSKuppuswamy Sathyanarayanan #include "mm_internal.h" 36*dbca5e1aSKuppuswamy Sathyanarayanan 37*dbca5e1aSKuppuswamy Sathyanarayanan /* 38*dbca5e1aSKuppuswamy Sathyanarayanan * Since SME related variables are set early in the boot process they must 39*dbca5e1aSKuppuswamy Sathyanarayanan * reside in the .data section so as not to be zeroed out when the .bss 40*dbca5e1aSKuppuswamy Sathyanarayanan * section is later cleared. 41*dbca5e1aSKuppuswamy Sathyanarayanan */ 42*dbca5e1aSKuppuswamy Sathyanarayanan u64 sme_me_mask __section(".data") = 0; 43*dbca5e1aSKuppuswamy Sathyanarayanan u64 sev_status __section(".data") = 0; 44*dbca5e1aSKuppuswamy Sathyanarayanan u64 sev_check_data __section(".data") = 0; 45*dbca5e1aSKuppuswamy Sathyanarayanan EXPORT_SYMBOL(sme_me_mask); 46*dbca5e1aSKuppuswamy Sathyanarayanan 47*dbca5e1aSKuppuswamy Sathyanarayanan /* Buffer used for early in-place encryption by BSP, no locking needed */ 48*dbca5e1aSKuppuswamy Sathyanarayanan static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); 49*dbca5e1aSKuppuswamy Sathyanarayanan 50*dbca5e1aSKuppuswamy Sathyanarayanan /* 51*dbca5e1aSKuppuswamy Sathyanarayanan * This routine does not change the underlying encryption setting of the 52*dbca5e1aSKuppuswamy Sathyanarayanan * page(s) that map this memory. It assumes that eventually the memory is 53*dbca5e1aSKuppuswamy Sathyanarayanan * meant to be accessed as either encrypted or decrypted but the contents 54*dbca5e1aSKuppuswamy Sathyanarayanan * are currently not in the desired state. 55*dbca5e1aSKuppuswamy Sathyanarayanan * 56*dbca5e1aSKuppuswamy Sathyanarayanan * This routine follows the steps outlined in the AMD64 Architecture 57*dbca5e1aSKuppuswamy Sathyanarayanan * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. 58*dbca5e1aSKuppuswamy Sathyanarayanan */ 59*dbca5e1aSKuppuswamy Sathyanarayanan static void __init __sme_early_enc_dec(resource_size_t paddr, 60*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long size, bool enc) 61*dbca5e1aSKuppuswamy Sathyanarayanan { 62*dbca5e1aSKuppuswamy Sathyanarayanan void *src, *dst; 63*dbca5e1aSKuppuswamy Sathyanarayanan size_t len; 64*dbca5e1aSKuppuswamy Sathyanarayanan 65*dbca5e1aSKuppuswamy Sathyanarayanan if (!sme_me_mask) 66*dbca5e1aSKuppuswamy Sathyanarayanan return; 67*dbca5e1aSKuppuswamy Sathyanarayanan 68*dbca5e1aSKuppuswamy Sathyanarayanan wbinvd(); 69*dbca5e1aSKuppuswamy Sathyanarayanan 70*dbca5e1aSKuppuswamy Sathyanarayanan /* 71*dbca5e1aSKuppuswamy Sathyanarayanan * There are limited number of early mapping slots, so map (at most) 72*dbca5e1aSKuppuswamy Sathyanarayanan * one page at time. 73*dbca5e1aSKuppuswamy Sathyanarayanan */ 74*dbca5e1aSKuppuswamy Sathyanarayanan while (size) { 75*dbca5e1aSKuppuswamy Sathyanarayanan len = min_t(size_t, sizeof(sme_early_buffer), size); 76*dbca5e1aSKuppuswamy Sathyanarayanan 77*dbca5e1aSKuppuswamy Sathyanarayanan /* 78*dbca5e1aSKuppuswamy Sathyanarayanan * Create mappings for the current and desired format of 79*dbca5e1aSKuppuswamy Sathyanarayanan * the memory. Use a write-protected mapping for the source. 80*dbca5e1aSKuppuswamy Sathyanarayanan */ 81*dbca5e1aSKuppuswamy Sathyanarayanan src = enc ? early_memremap_decrypted_wp(paddr, len) : 82*dbca5e1aSKuppuswamy Sathyanarayanan early_memremap_encrypted_wp(paddr, len); 83*dbca5e1aSKuppuswamy Sathyanarayanan 84*dbca5e1aSKuppuswamy Sathyanarayanan dst = enc ? early_memremap_encrypted(paddr, len) : 85*dbca5e1aSKuppuswamy Sathyanarayanan early_memremap_decrypted(paddr, len); 86*dbca5e1aSKuppuswamy Sathyanarayanan 87*dbca5e1aSKuppuswamy Sathyanarayanan /* 88*dbca5e1aSKuppuswamy Sathyanarayanan * If a mapping can't be obtained to perform the operation, 89*dbca5e1aSKuppuswamy Sathyanarayanan * then eventual access of that area in the desired mode 90*dbca5e1aSKuppuswamy Sathyanarayanan * will cause a crash. 91*dbca5e1aSKuppuswamy Sathyanarayanan */ 92*dbca5e1aSKuppuswamy Sathyanarayanan BUG_ON(!src || !dst); 93*dbca5e1aSKuppuswamy Sathyanarayanan 94*dbca5e1aSKuppuswamy Sathyanarayanan /* 95*dbca5e1aSKuppuswamy Sathyanarayanan * Use a temporary buffer, of cache-line multiple size, to 96*dbca5e1aSKuppuswamy Sathyanarayanan * avoid data corruption as documented in the APM. 97*dbca5e1aSKuppuswamy Sathyanarayanan */ 98*dbca5e1aSKuppuswamy Sathyanarayanan memcpy(sme_early_buffer, src, len); 99*dbca5e1aSKuppuswamy Sathyanarayanan memcpy(dst, sme_early_buffer, len); 100*dbca5e1aSKuppuswamy Sathyanarayanan 101*dbca5e1aSKuppuswamy Sathyanarayanan early_memunmap(dst, len); 102*dbca5e1aSKuppuswamy Sathyanarayanan early_memunmap(src, len); 103*dbca5e1aSKuppuswamy Sathyanarayanan 104*dbca5e1aSKuppuswamy Sathyanarayanan paddr += len; 105*dbca5e1aSKuppuswamy Sathyanarayanan size -= len; 106*dbca5e1aSKuppuswamy Sathyanarayanan } 107*dbca5e1aSKuppuswamy Sathyanarayanan } 108*dbca5e1aSKuppuswamy Sathyanarayanan 109*dbca5e1aSKuppuswamy Sathyanarayanan void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) 110*dbca5e1aSKuppuswamy Sathyanarayanan { 111*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_enc_dec(paddr, size, true); 112*dbca5e1aSKuppuswamy Sathyanarayanan } 113*dbca5e1aSKuppuswamy Sathyanarayanan 114*dbca5e1aSKuppuswamy Sathyanarayanan void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) 115*dbca5e1aSKuppuswamy Sathyanarayanan { 116*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_enc_dec(paddr, size, false); 117*dbca5e1aSKuppuswamy Sathyanarayanan } 118*dbca5e1aSKuppuswamy Sathyanarayanan 119*dbca5e1aSKuppuswamy Sathyanarayanan static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, 120*dbca5e1aSKuppuswamy Sathyanarayanan bool map) 121*dbca5e1aSKuppuswamy Sathyanarayanan { 122*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; 123*dbca5e1aSKuppuswamy Sathyanarayanan pmdval_t pmd_flags, pmd; 124*dbca5e1aSKuppuswamy Sathyanarayanan 125*dbca5e1aSKuppuswamy Sathyanarayanan /* Use early_pmd_flags but remove the encryption mask */ 126*dbca5e1aSKuppuswamy Sathyanarayanan pmd_flags = __sme_clr(early_pmd_flags); 127*dbca5e1aSKuppuswamy Sathyanarayanan 128*dbca5e1aSKuppuswamy Sathyanarayanan do { 129*dbca5e1aSKuppuswamy Sathyanarayanan pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 130*dbca5e1aSKuppuswamy Sathyanarayanan __early_make_pgtable((unsigned long)vaddr, pmd); 131*dbca5e1aSKuppuswamy Sathyanarayanan 132*dbca5e1aSKuppuswamy Sathyanarayanan vaddr += PMD_SIZE; 133*dbca5e1aSKuppuswamy Sathyanarayanan paddr += PMD_SIZE; 134*dbca5e1aSKuppuswamy Sathyanarayanan size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; 135*dbca5e1aSKuppuswamy Sathyanarayanan } while (size); 136*dbca5e1aSKuppuswamy Sathyanarayanan 137*dbca5e1aSKuppuswamy Sathyanarayanan flush_tlb_local(); 138*dbca5e1aSKuppuswamy Sathyanarayanan } 139*dbca5e1aSKuppuswamy Sathyanarayanan 140*dbca5e1aSKuppuswamy Sathyanarayanan void __init sme_unmap_bootdata(char *real_mode_data) 141*dbca5e1aSKuppuswamy Sathyanarayanan { 142*dbca5e1aSKuppuswamy Sathyanarayanan struct boot_params *boot_data; 143*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long cmdline_paddr; 144*dbca5e1aSKuppuswamy Sathyanarayanan 145*dbca5e1aSKuppuswamy Sathyanarayanan if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 146*dbca5e1aSKuppuswamy Sathyanarayanan return; 147*dbca5e1aSKuppuswamy Sathyanarayanan 148*dbca5e1aSKuppuswamy Sathyanarayanan /* Get the command line address before unmapping the real_mode_data */ 149*dbca5e1aSKuppuswamy Sathyanarayanan boot_data = (struct boot_params *)real_mode_data; 150*dbca5e1aSKuppuswamy Sathyanarayanan cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 151*dbca5e1aSKuppuswamy Sathyanarayanan 152*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); 153*dbca5e1aSKuppuswamy Sathyanarayanan 154*dbca5e1aSKuppuswamy Sathyanarayanan if (!cmdline_paddr) 155*dbca5e1aSKuppuswamy Sathyanarayanan return; 156*dbca5e1aSKuppuswamy Sathyanarayanan 157*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); 158*dbca5e1aSKuppuswamy Sathyanarayanan } 159*dbca5e1aSKuppuswamy Sathyanarayanan 160*dbca5e1aSKuppuswamy Sathyanarayanan void __init sme_map_bootdata(char *real_mode_data) 161*dbca5e1aSKuppuswamy Sathyanarayanan { 162*dbca5e1aSKuppuswamy Sathyanarayanan struct boot_params *boot_data; 163*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long cmdline_paddr; 164*dbca5e1aSKuppuswamy Sathyanarayanan 165*dbca5e1aSKuppuswamy Sathyanarayanan if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 166*dbca5e1aSKuppuswamy Sathyanarayanan return; 167*dbca5e1aSKuppuswamy Sathyanarayanan 168*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); 169*dbca5e1aSKuppuswamy Sathyanarayanan 170*dbca5e1aSKuppuswamy Sathyanarayanan /* Get the command line address after mapping the real_mode_data */ 171*dbca5e1aSKuppuswamy Sathyanarayanan boot_data = (struct boot_params *)real_mode_data; 172*dbca5e1aSKuppuswamy Sathyanarayanan cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); 173*dbca5e1aSKuppuswamy Sathyanarayanan 174*dbca5e1aSKuppuswamy Sathyanarayanan if (!cmdline_paddr) 175*dbca5e1aSKuppuswamy Sathyanarayanan return; 176*dbca5e1aSKuppuswamy Sathyanarayanan 177*dbca5e1aSKuppuswamy Sathyanarayanan __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); 178*dbca5e1aSKuppuswamy Sathyanarayanan } 179*dbca5e1aSKuppuswamy Sathyanarayanan 180*dbca5e1aSKuppuswamy Sathyanarayanan void __init sme_early_init(void) 181*dbca5e1aSKuppuswamy Sathyanarayanan { 182*dbca5e1aSKuppuswamy Sathyanarayanan unsigned int i; 183*dbca5e1aSKuppuswamy Sathyanarayanan 184*dbca5e1aSKuppuswamy Sathyanarayanan if (!sme_me_mask) 185*dbca5e1aSKuppuswamy Sathyanarayanan return; 186*dbca5e1aSKuppuswamy Sathyanarayanan 187*dbca5e1aSKuppuswamy Sathyanarayanan early_pmd_flags = __sme_set(early_pmd_flags); 188*dbca5e1aSKuppuswamy Sathyanarayanan 189*dbca5e1aSKuppuswamy Sathyanarayanan __supported_pte_mask = __sme_set(__supported_pte_mask); 190*dbca5e1aSKuppuswamy Sathyanarayanan 191*dbca5e1aSKuppuswamy Sathyanarayanan /* Update the protection map with memory encryption mask */ 192*dbca5e1aSKuppuswamy Sathyanarayanan for (i = 0; i < ARRAY_SIZE(protection_map); i++) 193*dbca5e1aSKuppuswamy Sathyanarayanan protection_map[i] = pgprot_encrypted(protection_map[i]); 194*dbca5e1aSKuppuswamy Sathyanarayanan 195*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 196*dbca5e1aSKuppuswamy Sathyanarayanan swiotlb_force = SWIOTLB_FORCE; 197*dbca5e1aSKuppuswamy Sathyanarayanan } 198*dbca5e1aSKuppuswamy Sathyanarayanan 199*dbca5e1aSKuppuswamy Sathyanarayanan void __init sev_setup_arch(void) 200*dbca5e1aSKuppuswamy Sathyanarayanan { 201*dbca5e1aSKuppuswamy Sathyanarayanan phys_addr_t total_mem = memblock_phys_mem_size(); 202*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long size; 203*dbca5e1aSKuppuswamy Sathyanarayanan 204*dbca5e1aSKuppuswamy Sathyanarayanan if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 205*dbca5e1aSKuppuswamy Sathyanarayanan return; 206*dbca5e1aSKuppuswamy Sathyanarayanan 207*dbca5e1aSKuppuswamy Sathyanarayanan /* 208*dbca5e1aSKuppuswamy Sathyanarayanan * For SEV, all DMA has to occur via shared/unencrypted pages. 209*dbca5e1aSKuppuswamy Sathyanarayanan * SEV uses SWIOTLB to make this happen without changing device 210*dbca5e1aSKuppuswamy Sathyanarayanan * drivers. However, depending on the workload being run, the 211*dbca5e1aSKuppuswamy Sathyanarayanan * default 64MB of SWIOTLB may not be enough and SWIOTLB may 212*dbca5e1aSKuppuswamy Sathyanarayanan * run out of buffers for DMA, resulting in I/O errors and/or 213*dbca5e1aSKuppuswamy Sathyanarayanan * performance degradation especially with high I/O workloads. 214*dbca5e1aSKuppuswamy Sathyanarayanan * 215*dbca5e1aSKuppuswamy Sathyanarayanan * Adjust the default size of SWIOTLB for SEV guests using 216*dbca5e1aSKuppuswamy Sathyanarayanan * a percentage of guest memory for SWIOTLB buffers. 217*dbca5e1aSKuppuswamy Sathyanarayanan * Also, as the SWIOTLB bounce buffer memory is allocated 218*dbca5e1aSKuppuswamy Sathyanarayanan * from low memory, ensure that the adjusted size is within 219*dbca5e1aSKuppuswamy Sathyanarayanan * the limits of low available memory. 220*dbca5e1aSKuppuswamy Sathyanarayanan * 221*dbca5e1aSKuppuswamy Sathyanarayanan * The percentage of guest memory used here for SWIOTLB buffers 222*dbca5e1aSKuppuswamy Sathyanarayanan * is more of an approximation of the static adjustment which 223*dbca5e1aSKuppuswamy Sathyanarayanan * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% 224*dbca5e1aSKuppuswamy Sathyanarayanan */ 225*dbca5e1aSKuppuswamy Sathyanarayanan size = total_mem * 6 / 100; 226*dbca5e1aSKuppuswamy Sathyanarayanan size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); 227*dbca5e1aSKuppuswamy Sathyanarayanan swiotlb_adjust_size(size); 228*dbca5e1aSKuppuswamy Sathyanarayanan } 229*dbca5e1aSKuppuswamy Sathyanarayanan 230*dbca5e1aSKuppuswamy Sathyanarayanan static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) 231*dbca5e1aSKuppuswamy Sathyanarayanan { 232*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long pfn = 0; 233*dbca5e1aSKuppuswamy Sathyanarayanan pgprot_t prot; 234*dbca5e1aSKuppuswamy Sathyanarayanan 235*dbca5e1aSKuppuswamy Sathyanarayanan switch (level) { 236*dbca5e1aSKuppuswamy Sathyanarayanan case PG_LEVEL_4K: 237*dbca5e1aSKuppuswamy Sathyanarayanan pfn = pte_pfn(*kpte); 238*dbca5e1aSKuppuswamy Sathyanarayanan prot = pte_pgprot(*kpte); 239*dbca5e1aSKuppuswamy Sathyanarayanan break; 240*dbca5e1aSKuppuswamy Sathyanarayanan case PG_LEVEL_2M: 241*dbca5e1aSKuppuswamy Sathyanarayanan pfn = pmd_pfn(*(pmd_t *)kpte); 242*dbca5e1aSKuppuswamy Sathyanarayanan prot = pmd_pgprot(*(pmd_t *)kpte); 243*dbca5e1aSKuppuswamy Sathyanarayanan break; 244*dbca5e1aSKuppuswamy Sathyanarayanan case PG_LEVEL_1G: 245*dbca5e1aSKuppuswamy Sathyanarayanan pfn = pud_pfn(*(pud_t *)kpte); 246*dbca5e1aSKuppuswamy Sathyanarayanan prot = pud_pgprot(*(pud_t *)kpte); 247*dbca5e1aSKuppuswamy Sathyanarayanan break; 248*dbca5e1aSKuppuswamy Sathyanarayanan default: 249*dbca5e1aSKuppuswamy Sathyanarayanan WARN_ONCE(1, "Invalid level for kpte\n"); 250*dbca5e1aSKuppuswamy Sathyanarayanan return 0; 251*dbca5e1aSKuppuswamy Sathyanarayanan } 252*dbca5e1aSKuppuswamy Sathyanarayanan 253*dbca5e1aSKuppuswamy Sathyanarayanan if (ret_prot) 254*dbca5e1aSKuppuswamy Sathyanarayanan *ret_prot = prot; 255*dbca5e1aSKuppuswamy Sathyanarayanan 256*dbca5e1aSKuppuswamy Sathyanarayanan return pfn; 257*dbca5e1aSKuppuswamy Sathyanarayanan } 258*dbca5e1aSKuppuswamy Sathyanarayanan 259*dbca5e1aSKuppuswamy Sathyanarayanan void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc) 260*dbca5e1aSKuppuswamy Sathyanarayanan { 261*dbca5e1aSKuppuswamy Sathyanarayanan #ifdef CONFIG_PARAVIRT 262*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long sz = npages << PAGE_SHIFT; 263*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long vaddr_end = vaddr + sz; 264*dbca5e1aSKuppuswamy Sathyanarayanan 265*dbca5e1aSKuppuswamy Sathyanarayanan while (vaddr < vaddr_end) { 266*dbca5e1aSKuppuswamy Sathyanarayanan int psize, pmask, level; 267*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long pfn; 268*dbca5e1aSKuppuswamy Sathyanarayanan pte_t *kpte; 269*dbca5e1aSKuppuswamy Sathyanarayanan 270*dbca5e1aSKuppuswamy Sathyanarayanan kpte = lookup_address(vaddr, &level); 271*dbca5e1aSKuppuswamy Sathyanarayanan if (!kpte || pte_none(*kpte)) { 272*dbca5e1aSKuppuswamy Sathyanarayanan WARN_ONCE(1, "kpte lookup for vaddr\n"); 273*dbca5e1aSKuppuswamy Sathyanarayanan return; 274*dbca5e1aSKuppuswamy Sathyanarayanan } 275*dbca5e1aSKuppuswamy Sathyanarayanan 276*dbca5e1aSKuppuswamy Sathyanarayanan pfn = pg_level_to_pfn(level, kpte, NULL); 277*dbca5e1aSKuppuswamy Sathyanarayanan if (!pfn) 278*dbca5e1aSKuppuswamy Sathyanarayanan continue; 279*dbca5e1aSKuppuswamy Sathyanarayanan 280*dbca5e1aSKuppuswamy Sathyanarayanan psize = page_level_size(level); 281*dbca5e1aSKuppuswamy Sathyanarayanan pmask = page_level_mask(level); 282*dbca5e1aSKuppuswamy Sathyanarayanan 283*dbca5e1aSKuppuswamy Sathyanarayanan notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); 284*dbca5e1aSKuppuswamy Sathyanarayanan 285*dbca5e1aSKuppuswamy Sathyanarayanan vaddr = (vaddr & pmask) + psize; 286*dbca5e1aSKuppuswamy Sathyanarayanan } 287*dbca5e1aSKuppuswamy Sathyanarayanan #endif 288*dbca5e1aSKuppuswamy Sathyanarayanan } 289*dbca5e1aSKuppuswamy Sathyanarayanan 290*dbca5e1aSKuppuswamy Sathyanarayanan static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 291*dbca5e1aSKuppuswamy Sathyanarayanan { 292*dbca5e1aSKuppuswamy Sathyanarayanan pgprot_t old_prot, new_prot; 293*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long pfn, pa, size; 294*dbca5e1aSKuppuswamy Sathyanarayanan pte_t new_pte; 295*dbca5e1aSKuppuswamy Sathyanarayanan 296*dbca5e1aSKuppuswamy Sathyanarayanan pfn = pg_level_to_pfn(level, kpte, &old_prot); 297*dbca5e1aSKuppuswamy Sathyanarayanan if (!pfn) 298*dbca5e1aSKuppuswamy Sathyanarayanan return; 299*dbca5e1aSKuppuswamy Sathyanarayanan 300*dbca5e1aSKuppuswamy Sathyanarayanan new_prot = old_prot; 301*dbca5e1aSKuppuswamy Sathyanarayanan if (enc) 302*dbca5e1aSKuppuswamy Sathyanarayanan pgprot_val(new_prot) |= _PAGE_ENC; 303*dbca5e1aSKuppuswamy Sathyanarayanan else 304*dbca5e1aSKuppuswamy Sathyanarayanan pgprot_val(new_prot) &= ~_PAGE_ENC; 305*dbca5e1aSKuppuswamy Sathyanarayanan 306*dbca5e1aSKuppuswamy Sathyanarayanan /* If prot is same then do nothing. */ 307*dbca5e1aSKuppuswamy Sathyanarayanan if (pgprot_val(old_prot) == pgprot_val(new_prot)) 308*dbca5e1aSKuppuswamy Sathyanarayanan return; 309*dbca5e1aSKuppuswamy Sathyanarayanan 310*dbca5e1aSKuppuswamy Sathyanarayanan pa = pfn << PAGE_SHIFT; 311*dbca5e1aSKuppuswamy Sathyanarayanan size = page_level_size(level); 312*dbca5e1aSKuppuswamy Sathyanarayanan 313*dbca5e1aSKuppuswamy Sathyanarayanan /* 314*dbca5e1aSKuppuswamy Sathyanarayanan * We are going to perform in-place en-/decryption and change the 315*dbca5e1aSKuppuswamy Sathyanarayanan * physical page attribute from C=1 to C=0 or vice versa. Flush the 316*dbca5e1aSKuppuswamy Sathyanarayanan * caches to ensure that data gets accessed with the correct C-bit. 317*dbca5e1aSKuppuswamy Sathyanarayanan */ 318*dbca5e1aSKuppuswamy Sathyanarayanan clflush_cache_range(__va(pa), size); 319*dbca5e1aSKuppuswamy Sathyanarayanan 320*dbca5e1aSKuppuswamy Sathyanarayanan /* Encrypt/decrypt the contents in-place */ 321*dbca5e1aSKuppuswamy Sathyanarayanan if (enc) 322*dbca5e1aSKuppuswamy Sathyanarayanan sme_early_encrypt(pa, size); 323*dbca5e1aSKuppuswamy Sathyanarayanan else 324*dbca5e1aSKuppuswamy Sathyanarayanan sme_early_decrypt(pa, size); 325*dbca5e1aSKuppuswamy Sathyanarayanan 326*dbca5e1aSKuppuswamy Sathyanarayanan /* Change the page encryption mask. */ 327*dbca5e1aSKuppuswamy Sathyanarayanan new_pte = pfn_pte(pfn, new_prot); 328*dbca5e1aSKuppuswamy Sathyanarayanan set_pte_atomic(kpte, new_pte); 329*dbca5e1aSKuppuswamy Sathyanarayanan } 330*dbca5e1aSKuppuswamy Sathyanarayanan 331*dbca5e1aSKuppuswamy Sathyanarayanan static int __init early_set_memory_enc_dec(unsigned long vaddr, 332*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long size, bool enc) 333*dbca5e1aSKuppuswamy Sathyanarayanan { 334*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long vaddr_end, vaddr_next, start; 335*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long psize, pmask; 336*dbca5e1aSKuppuswamy Sathyanarayanan int split_page_size_mask; 337*dbca5e1aSKuppuswamy Sathyanarayanan int level, ret; 338*dbca5e1aSKuppuswamy Sathyanarayanan pte_t *kpte; 339*dbca5e1aSKuppuswamy Sathyanarayanan 340*dbca5e1aSKuppuswamy Sathyanarayanan start = vaddr; 341*dbca5e1aSKuppuswamy Sathyanarayanan vaddr_next = vaddr; 342*dbca5e1aSKuppuswamy Sathyanarayanan vaddr_end = vaddr + size; 343*dbca5e1aSKuppuswamy Sathyanarayanan 344*dbca5e1aSKuppuswamy Sathyanarayanan for (; vaddr < vaddr_end; vaddr = vaddr_next) { 345*dbca5e1aSKuppuswamy Sathyanarayanan kpte = lookup_address(vaddr, &level); 346*dbca5e1aSKuppuswamy Sathyanarayanan if (!kpte || pte_none(*kpte)) { 347*dbca5e1aSKuppuswamy Sathyanarayanan ret = 1; 348*dbca5e1aSKuppuswamy Sathyanarayanan goto out; 349*dbca5e1aSKuppuswamy Sathyanarayanan } 350*dbca5e1aSKuppuswamy Sathyanarayanan 351*dbca5e1aSKuppuswamy Sathyanarayanan if (level == PG_LEVEL_4K) { 352*dbca5e1aSKuppuswamy Sathyanarayanan __set_clr_pte_enc(kpte, level, enc); 353*dbca5e1aSKuppuswamy Sathyanarayanan vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; 354*dbca5e1aSKuppuswamy Sathyanarayanan continue; 355*dbca5e1aSKuppuswamy Sathyanarayanan } 356*dbca5e1aSKuppuswamy Sathyanarayanan 357*dbca5e1aSKuppuswamy Sathyanarayanan psize = page_level_size(level); 358*dbca5e1aSKuppuswamy Sathyanarayanan pmask = page_level_mask(level); 359*dbca5e1aSKuppuswamy Sathyanarayanan 360*dbca5e1aSKuppuswamy Sathyanarayanan /* 361*dbca5e1aSKuppuswamy Sathyanarayanan * Check whether we can change the large page in one go. 362*dbca5e1aSKuppuswamy Sathyanarayanan * We request a split when the address is not aligned and 363*dbca5e1aSKuppuswamy Sathyanarayanan * the number of pages to set/clear encryption bit is smaller 364*dbca5e1aSKuppuswamy Sathyanarayanan * than the number of pages in the large page. 365*dbca5e1aSKuppuswamy Sathyanarayanan */ 366*dbca5e1aSKuppuswamy Sathyanarayanan if (vaddr == (vaddr & pmask) && 367*dbca5e1aSKuppuswamy Sathyanarayanan ((vaddr_end - vaddr) >= psize)) { 368*dbca5e1aSKuppuswamy Sathyanarayanan __set_clr_pte_enc(kpte, level, enc); 369*dbca5e1aSKuppuswamy Sathyanarayanan vaddr_next = (vaddr & pmask) + psize; 370*dbca5e1aSKuppuswamy Sathyanarayanan continue; 371*dbca5e1aSKuppuswamy Sathyanarayanan } 372*dbca5e1aSKuppuswamy Sathyanarayanan 373*dbca5e1aSKuppuswamy Sathyanarayanan /* 374*dbca5e1aSKuppuswamy Sathyanarayanan * The virtual address is part of a larger page, create the next 375*dbca5e1aSKuppuswamy Sathyanarayanan * level page table mapping (4K or 2M). If it is part of a 2M 376*dbca5e1aSKuppuswamy Sathyanarayanan * page then we request a split of the large page into 4K 377*dbca5e1aSKuppuswamy Sathyanarayanan * chunks. A 1GB large page is split into 2M pages, resp. 378*dbca5e1aSKuppuswamy Sathyanarayanan */ 379*dbca5e1aSKuppuswamy Sathyanarayanan if (level == PG_LEVEL_2M) 380*dbca5e1aSKuppuswamy Sathyanarayanan split_page_size_mask = 0; 381*dbca5e1aSKuppuswamy Sathyanarayanan else 382*dbca5e1aSKuppuswamy Sathyanarayanan split_page_size_mask = 1 << PG_LEVEL_2M; 383*dbca5e1aSKuppuswamy Sathyanarayanan 384*dbca5e1aSKuppuswamy Sathyanarayanan /* 385*dbca5e1aSKuppuswamy Sathyanarayanan * kernel_physical_mapping_change() does not flush the TLBs, so 386*dbca5e1aSKuppuswamy Sathyanarayanan * a TLB flush is required after we exit from the for loop. 387*dbca5e1aSKuppuswamy Sathyanarayanan */ 388*dbca5e1aSKuppuswamy Sathyanarayanan kernel_physical_mapping_change(__pa(vaddr & pmask), 389*dbca5e1aSKuppuswamy Sathyanarayanan __pa((vaddr_end & pmask) + psize), 390*dbca5e1aSKuppuswamy Sathyanarayanan split_page_size_mask); 391*dbca5e1aSKuppuswamy Sathyanarayanan } 392*dbca5e1aSKuppuswamy Sathyanarayanan 393*dbca5e1aSKuppuswamy Sathyanarayanan ret = 0; 394*dbca5e1aSKuppuswamy Sathyanarayanan 395*dbca5e1aSKuppuswamy Sathyanarayanan notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); 396*dbca5e1aSKuppuswamy Sathyanarayanan out: 397*dbca5e1aSKuppuswamy Sathyanarayanan __flush_tlb_all(); 398*dbca5e1aSKuppuswamy Sathyanarayanan return ret; 399*dbca5e1aSKuppuswamy Sathyanarayanan } 400*dbca5e1aSKuppuswamy Sathyanarayanan 401*dbca5e1aSKuppuswamy Sathyanarayanan int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) 402*dbca5e1aSKuppuswamy Sathyanarayanan { 403*dbca5e1aSKuppuswamy Sathyanarayanan return early_set_memory_enc_dec(vaddr, size, false); 404*dbca5e1aSKuppuswamy Sathyanarayanan } 405*dbca5e1aSKuppuswamy Sathyanarayanan 406*dbca5e1aSKuppuswamy Sathyanarayanan int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) 407*dbca5e1aSKuppuswamy Sathyanarayanan { 408*dbca5e1aSKuppuswamy Sathyanarayanan return early_set_memory_enc_dec(vaddr, size, true); 409*dbca5e1aSKuppuswamy Sathyanarayanan } 410*dbca5e1aSKuppuswamy Sathyanarayanan 411*dbca5e1aSKuppuswamy Sathyanarayanan void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) 412*dbca5e1aSKuppuswamy Sathyanarayanan { 413*dbca5e1aSKuppuswamy Sathyanarayanan notify_range_enc_status_changed(vaddr, npages, enc); 414*dbca5e1aSKuppuswamy Sathyanarayanan } 415*dbca5e1aSKuppuswamy Sathyanarayanan 416*dbca5e1aSKuppuswamy Sathyanarayanan /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 417*dbca5e1aSKuppuswamy Sathyanarayanan bool force_dma_unencrypted(struct device *dev) 418*dbca5e1aSKuppuswamy Sathyanarayanan { 419*dbca5e1aSKuppuswamy Sathyanarayanan /* 420*dbca5e1aSKuppuswamy Sathyanarayanan * For SEV, all DMA must be to unencrypted addresses. 421*dbca5e1aSKuppuswamy Sathyanarayanan */ 422*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 423*dbca5e1aSKuppuswamy Sathyanarayanan return true; 424*dbca5e1aSKuppuswamy Sathyanarayanan 425*dbca5e1aSKuppuswamy Sathyanarayanan /* 426*dbca5e1aSKuppuswamy Sathyanarayanan * For SME, all DMA must be to unencrypted addresses if the 427*dbca5e1aSKuppuswamy Sathyanarayanan * device does not support DMA to addresses that include the 428*dbca5e1aSKuppuswamy Sathyanarayanan * encryption mask. 429*dbca5e1aSKuppuswamy Sathyanarayanan */ 430*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 431*dbca5e1aSKuppuswamy Sathyanarayanan u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); 432*dbca5e1aSKuppuswamy Sathyanarayanan u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, 433*dbca5e1aSKuppuswamy Sathyanarayanan dev->bus_dma_limit); 434*dbca5e1aSKuppuswamy Sathyanarayanan 435*dbca5e1aSKuppuswamy Sathyanarayanan if (dma_dev_mask <= dma_enc_mask) 436*dbca5e1aSKuppuswamy Sathyanarayanan return true; 437*dbca5e1aSKuppuswamy Sathyanarayanan } 438*dbca5e1aSKuppuswamy Sathyanarayanan 439*dbca5e1aSKuppuswamy Sathyanarayanan return false; 440*dbca5e1aSKuppuswamy Sathyanarayanan } 441*dbca5e1aSKuppuswamy Sathyanarayanan 442*dbca5e1aSKuppuswamy Sathyanarayanan void __init mem_encrypt_free_decrypted_mem(void) 443*dbca5e1aSKuppuswamy Sathyanarayanan { 444*dbca5e1aSKuppuswamy Sathyanarayanan unsigned long vaddr, vaddr_end, npages; 445*dbca5e1aSKuppuswamy Sathyanarayanan int r; 446*dbca5e1aSKuppuswamy Sathyanarayanan 447*dbca5e1aSKuppuswamy Sathyanarayanan vaddr = (unsigned long)__start_bss_decrypted_unused; 448*dbca5e1aSKuppuswamy Sathyanarayanan vaddr_end = (unsigned long)__end_bss_decrypted; 449*dbca5e1aSKuppuswamy Sathyanarayanan npages = (vaddr_end - vaddr) >> PAGE_SHIFT; 450*dbca5e1aSKuppuswamy Sathyanarayanan 451*dbca5e1aSKuppuswamy Sathyanarayanan /* 452*dbca5e1aSKuppuswamy Sathyanarayanan * The unused memory range was mapped decrypted, change the encryption 453*dbca5e1aSKuppuswamy Sathyanarayanan * attribute from decrypted to encrypted before freeing it. 454*dbca5e1aSKuppuswamy Sathyanarayanan */ 455*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 456*dbca5e1aSKuppuswamy Sathyanarayanan r = set_memory_encrypted(vaddr, npages); 457*dbca5e1aSKuppuswamy Sathyanarayanan if (r) { 458*dbca5e1aSKuppuswamy Sathyanarayanan pr_warn("failed to free unused decrypted pages\n"); 459*dbca5e1aSKuppuswamy Sathyanarayanan return; 460*dbca5e1aSKuppuswamy Sathyanarayanan } 461*dbca5e1aSKuppuswamy Sathyanarayanan } 462*dbca5e1aSKuppuswamy Sathyanarayanan 463*dbca5e1aSKuppuswamy Sathyanarayanan free_init_pages("unused decrypted", vaddr, vaddr_end); 464*dbca5e1aSKuppuswamy Sathyanarayanan } 465*dbca5e1aSKuppuswamy Sathyanarayanan 466*dbca5e1aSKuppuswamy Sathyanarayanan static void print_mem_encrypt_feature_info(void) 467*dbca5e1aSKuppuswamy Sathyanarayanan { 468*dbca5e1aSKuppuswamy Sathyanarayanan pr_info("AMD Memory Encryption Features active:"); 469*dbca5e1aSKuppuswamy Sathyanarayanan 470*dbca5e1aSKuppuswamy Sathyanarayanan /* Secure Memory Encryption */ 471*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 472*dbca5e1aSKuppuswamy Sathyanarayanan /* 473*dbca5e1aSKuppuswamy Sathyanarayanan * SME is mutually exclusive with any of the SEV 474*dbca5e1aSKuppuswamy Sathyanarayanan * features below. 475*dbca5e1aSKuppuswamy Sathyanarayanan */ 476*dbca5e1aSKuppuswamy Sathyanarayanan pr_cont(" SME\n"); 477*dbca5e1aSKuppuswamy Sathyanarayanan return; 478*dbca5e1aSKuppuswamy Sathyanarayanan } 479*dbca5e1aSKuppuswamy Sathyanarayanan 480*dbca5e1aSKuppuswamy Sathyanarayanan /* Secure Encrypted Virtualization */ 481*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 482*dbca5e1aSKuppuswamy Sathyanarayanan pr_cont(" SEV"); 483*dbca5e1aSKuppuswamy Sathyanarayanan 484*dbca5e1aSKuppuswamy Sathyanarayanan /* Encrypted Register State */ 485*dbca5e1aSKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 486*dbca5e1aSKuppuswamy Sathyanarayanan pr_cont(" SEV-ES"); 487*dbca5e1aSKuppuswamy Sathyanarayanan 488*dbca5e1aSKuppuswamy Sathyanarayanan pr_cont("\n"); 489*dbca5e1aSKuppuswamy Sathyanarayanan } 490*dbca5e1aSKuppuswamy Sathyanarayanan 491*dbca5e1aSKuppuswamy Sathyanarayanan /* Architecture __weak replacement functions */ 492*dbca5e1aSKuppuswamy Sathyanarayanan void __init mem_encrypt_init(void) 493*dbca5e1aSKuppuswamy Sathyanarayanan { 494*dbca5e1aSKuppuswamy Sathyanarayanan if (!sme_me_mask) 495*dbca5e1aSKuppuswamy Sathyanarayanan return; 496*dbca5e1aSKuppuswamy Sathyanarayanan 497*dbca5e1aSKuppuswamy Sathyanarayanan /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ 498*dbca5e1aSKuppuswamy Sathyanarayanan swiotlb_update_mem_attributes(); 499*dbca5e1aSKuppuswamy Sathyanarayanan 500*dbca5e1aSKuppuswamy Sathyanarayanan print_mem_encrypt_feature_info(); 501*dbca5e1aSKuppuswamy Sathyanarayanan } 502*dbca5e1aSKuppuswamy Sathyanarayanan 503*dbca5e1aSKuppuswamy Sathyanarayanan int arch_has_restricted_virtio_memory_access(void) 504*dbca5e1aSKuppuswamy Sathyanarayanan { 505*dbca5e1aSKuppuswamy Sathyanarayanan return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); 506*dbca5e1aSKuppuswamy Sathyanarayanan } 507*dbca5e1aSKuppuswamy Sathyanarayanan EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); 508