1 #include <linux/io.h> 2 #include <linux/memblock.h> 3 4 #include <asm/cacheflush.h> 5 #include <asm/pgtable.h> 6 #include <asm/realmode.h> 7 8 struct real_mode_header *real_mode_header; 9 u32 *trampoline_cr4_features; 10 11 void __init reserve_real_mode(void) 12 { 13 phys_addr_t mem; 14 unsigned char *base; 15 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 16 17 /* Has to be under 1M so we can execute real-mode AP code. */ 18 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); 19 if (!mem) 20 panic("Cannot allocate trampoline\n"); 21 22 base = __va(mem); 23 memblock_reserve(mem, size); 24 real_mode_header = (struct real_mode_header *) base; 25 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", 26 base, (unsigned long long)mem, size); 27 } 28 29 void __init setup_real_mode(void) 30 { 31 u16 real_mode_seg; 32 u32 *rel; 33 u32 count; 34 u32 *ptr; 35 u16 *seg; 36 int i; 37 unsigned char *base; 38 struct trampoline_header *trampoline_header; 39 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 40 #ifdef CONFIG_X86_64 41 u64 *trampoline_pgd; 42 u64 efer; 43 #endif 44 45 base = (unsigned char *)real_mode_header; 46 47 memcpy(base, real_mode_blob, size); 48 49 real_mode_seg = __pa(base) >> 4; 50 rel = (u32 *) real_mode_relocs; 51 52 /* 16-bit segment relocations. */ 53 count = rel[0]; 54 rel = &rel[1]; 55 for (i = 0; i < count; i++) { 56 seg = (u16 *) (base + rel[i]); 57 *seg = real_mode_seg; 58 } 59 60 /* 32-bit linear relocations. */ 61 count = rel[i]; 62 rel = &rel[i + 1]; 63 for (i = 0; i < count; i++) { 64 ptr = (u32 *) (base + rel[i]); 65 *ptr += __pa(base); 66 } 67 68 /* Must be perfomed *after* relocation. */ 69 trampoline_header = (struct trampoline_header *) 70 __va(real_mode_header->trampoline_header); 71 72 #ifdef CONFIG_X86_32 73 trampoline_header->start = __pa_symbol(startup_32_smp); 74 trampoline_header->gdt_limit = __BOOT_DS + 7; 75 trampoline_header->gdt_base = __pa_symbol(boot_gdt); 76 #else 77 /* 78 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR 79 * so we need to mask it out. 80 */ 81 rdmsrl(MSR_EFER, efer); 82 trampoline_header->efer = efer & ~EFER_LMA; 83 84 trampoline_header->start = (u64) secondary_startup_64; 85 trampoline_cr4_features = &trampoline_header->cr4; 86 *trampoline_cr4_features = read_cr4(); 87 88 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 89 trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd; 90 trampoline_pgd[511] = init_level4_pgt[511].pgd; 91 #endif 92 } 93 94 /* 95 * reserve_real_mode() gets called very early, to guarantee the 96 * availability of low memory. This is before the proper kernel page 97 * tables are set up, so we cannot set page permissions in that 98 * function. Also trampoline code will be executed by APs so we 99 * need to mark it executable at do_pre_smp_initcalls() at least, 100 * thus run it as a early_initcall(). 101 */ 102 static int __init set_real_mode_permissions(void) 103 { 104 unsigned char *base = (unsigned char *) real_mode_header; 105 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 106 107 size_t ro_size = 108 PAGE_ALIGN(real_mode_header->ro_end) - 109 __pa(base); 110 111 size_t text_size = 112 PAGE_ALIGN(real_mode_header->ro_end) - 113 real_mode_header->text_start; 114 115 unsigned long text_start = 116 (unsigned long) __va(real_mode_header->text_start); 117 118 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 119 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 120 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 121 122 return 0; 123 } 124 early_initcall(set_real_mode_permissions); 125