1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/io.h> 3 #include <linux/slab.h> 4 #include <linux/memblock.h> 5 #include <linux/cc_platform.h> 6 #include <linux/pgtable.h> 7 8 #include <asm/set_memory.h> 9 #include <asm/realmode.h> 10 #include <asm/tlbflush.h> 11 #include <asm/crash.h> 12 #include <asm/sev.h> 13 14 struct real_mode_header *real_mode_header; 15 u32 *trampoline_cr4_features; 16 17 /* Hold the pgd entry used on booting additional CPUs */ 18 pgd_t trampoline_pgd_entry; 19 20 void __init reserve_real_mode(void) 21 { 22 phys_addr_t mem; 23 size_t size = real_mode_size_needed(); 24 25 if (!size) 26 return; 27 28 WARN_ON(slab_is_available()); 29 30 /* Has to be under 1M so we can execute real-mode AP code. */ 31 mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); 32 if (!mem) 33 pr_info("No sub-1M memory is available for the trampoline\n"); 34 else 35 set_real_mode_mem(mem); 36 37 /* 38 * Unconditionally reserve the entire fisrt 1M, see comment in 39 * setup_arch(). 40 */ 41 memblock_reserve(0, SZ_1M); 42 } 43 44 static void sme_sev_setup_real_mode(struct trampoline_header *th) 45 { 46 #ifdef CONFIG_AMD_MEM_ENCRYPT 47 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 48 th->flags |= TH_FLAGS_SME_ACTIVE; 49 50 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { 51 /* 52 * Skip the call to verify_cpu() in secondary_startup_64 as it 53 * will cause #VC exceptions when the AP can't handle them yet. 54 */ 55 th->start = (u64) secondary_startup_64_no_verify; 56 57 if (sev_es_setup_ap_jump_table(real_mode_header)) 58 panic("Failed to get/update SEV-ES AP Jump Table"); 59 } 60 #endif 61 } 62 63 static void __init setup_real_mode(void) 64 { 65 u16 real_mode_seg; 66 const u32 *rel; 67 u32 count; 68 unsigned char *base; 69 unsigned long phys_base; 70 struct trampoline_header *trampoline_header; 71 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 72 #ifdef CONFIG_X86_64 73 u64 *trampoline_pgd; 74 u64 efer; 75 int i; 76 #endif 77 78 base = (unsigned char *)real_mode_header; 79 80 /* 81 * If SME is active, the trampoline area will need to be in 82 * decrypted memory in order to bring up other processors 83 * successfully. This is not needed for SEV. 84 */ 85 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 86 set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); 87 88 memcpy(base, real_mode_blob, size); 89 90 phys_base = __pa(base); 91 real_mode_seg = phys_base >> 4; 92 93 rel = (u32 *) real_mode_relocs; 94 95 /* 16-bit segment relocations. */ 96 count = *rel++; 97 while (count--) { 98 u16 *seg = (u16 *) (base + *rel++); 99 *seg = real_mode_seg; 100 } 101 102 /* 32-bit linear relocations. */ 103 count = *rel++; 104 while (count--) { 105 u32 *ptr = (u32 *) (base + *rel++); 106 *ptr += phys_base; 107 } 108 109 /* Must be performed *after* relocation. */ 110 trampoline_header = (struct trampoline_header *) 111 __va(real_mode_header->trampoline_header); 112 113 #ifdef CONFIG_X86_32 114 trampoline_header->start = __pa_symbol(startup_32_smp); 115 trampoline_header->gdt_limit = __BOOT_DS + 7; 116 trampoline_header->gdt_base = __pa_symbol(boot_gdt); 117 #else 118 /* 119 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR 120 * so we need to mask it out. 121 */ 122 rdmsrl(MSR_EFER, efer); 123 trampoline_header->efer = efer & ~EFER_LMA; 124 125 trampoline_header->start = (u64) secondary_startup_64; 126 trampoline_cr4_features = &trampoline_header->cr4; 127 *trampoline_cr4_features = mmu_cr4_features; 128 129 trampoline_header->flags = 0; 130 131 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 132 133 /* Map the real mode stub as virtual == physical */ 134 trampoline_pgd[0] = trampoline_pgd_entry.pgd; 135 136 /* 137 * Include the entirety of the kernel mapping into the trampoline 138 * PGD. This way, all mappings present in the normal kernel page 139 * tables are usable while running on trampoline_pgd. 140 */ 141 for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) 142 trampoline_pgd[i] = init_top_pgt[i].pgd; 143 #endif 144 145 sme_sev_setup_real_mode(trampoline_header); 146 } 147 148 /* 149 * reserve_real_mode() gets called very early, to guarantee the 150 * availability of low memory. This is before the proper kernel page 151 * tables are set up, so we cannot set page permissions in that 152 * function. Also trampoline code will be executed by APs so we 153 * need to mark it executable at do_pre_smp_initcalls() at least, 154 * thus run it as a early_initcall(). 155 */ 156 static void __init set_real_mode_permissions(void) 157 { 158 unsigned char *base = (unsigned char *) real_mode_header; 159 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 160 161 size_t ro_size = 162 PAGE_ALIGN(real_mode_header->ro_end) - 163 __pa(base); 164 165 size_t text_size = 166 PAGE_ALIGN(real_mode_header->ro_end) - 167 real_mode_header->text_start; 168 169 unsigned long text_start = 170 (unsigned long) __va(real_mode_header->text_start); 171 172 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 173 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 174 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 175 } 176 177 static int __init init_real_mode(void) 178 { 179 if (!real_mode_header) 180 panic("Real mode trampoline was not allocated"); 181 182 setup_real_mode(); 183 set_real_mode_permissions(); 184 185 return 0; 186 } 187 early_initcall(init_real_mode); 188