1 /* 2 * x86_64 specific EFI support functions 3 * Based on Extensible Firmware Interface Specification version 1.0 4 * 5 * Copyright (C) 2005-2008 Intel Co. 6 * Fenghua Yu <fenghua.yu@intel.com> 7 * Bibo Mao <bibo.mao@intel.com> 8 * Chandramouli Narayanan <mouli@linux.intel.com> 9 * Huang Ying <ying.huang@intel.com> 10 * 11 * Code to convert EFI to E820 map has been implemented in elilo bootloader 12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table 13 * is setup appropriately for EFI runtime code. 14 * - mouli 06/14/2007. 15 * 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/mm.h> 21 #include <linux/types.h> 22 #include <linux/spinlock.h> 23 #include <linux/bootmem.h> 24 #include <linux/ioport.h> 25 #include <linux/module.h> 26 #include <linux/efi.h> 27 #include <linux/uaccess.h> 28 #include <linux/io.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 32 #include <asm/setup.h> 33 #include <asm/page.h> 34 #include <asm/e820.h> 35 #include <asm/pgtable.h> 36 #include <asm/tlbflush.h> 37 #include <asm/proto.h> 38 #include <asm/efi.h> 39 #include <asm/cacheflush.h> 40 #include <asm/fixmap.h> 41 #include <asm/realmode.h> 42 43 static pgd_t *save_pgd __initdata; 44 static unsigned long efi_flags __initdata; 45 46 /* 47 * We allocate runtime services regions bottom-up, starting from -4G, i.e. 48 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. 49 */ 50 static u64 efi_va = -4 * (1UL << 30); 51 #define EFI_VA_END (-68 * (1UL << 30)) 52 53 /* 54 * Scratch space used for switching the pagetable in the EFI stub 55 */ 56 struct efi_scratch { 57 u64 r15; 58 u64 prev_cr3; 59 pgd_t *efi_pgt; 60 bool use_pgd; 61 }; 62 63 static void __init early_code_mapping_set_exec(int executable) 64 { 65 efi_memory_desc_t *md; 66 void *p; 67 68 if (!(__supported_pte_mask & _PAGE_NX)) 69 return; 70 71 /* Make EFI service code area executable */ 72 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 73 md = p; 74 if (md->type == EFI_RUNTIME_SERVICES_CODE || 75 md->type == EFI_BOOT_SERVICES_CODE) 76 efi_set_executable(md, executable); 77 } 78 } 79 80 void __init efi_call_phys_prelog(void) 81 { 82 unsigned long vaddress; 83 int pgd; 84 int n_pgds; 85 86 if (!efi_enabled(EFI_OLD_MEMMAP)) 87 return; 88 89 early_code_mapping_set_exec(1); 90 local_irq_save(efi_flags); 91 92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 93 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); 94 95 for (pgd = 0; pgd < n_pgds; pgd++) { 96 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 97 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 98 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 99 } 100 __flush_tlb_all(); 101 } 102 103 void __init efi_call_phys_epilog(void) 104 { 105 /* 106 * After the lock is released, the original page table is restored. 107 */ 108 int pgd; 109 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 110 111 if (!efi_enabled(EFI_OLD_MEMMAP)) 112 return; 113 114 for (pgd = 0; pgd < n_pgds; pgd++) 115 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); 116 kfree(save_pgd); 117 __flush_tlb_all(); 118 local_irq_restore(efi_flags); 119 early_code_mapping_set_exec(0); 120 } 121 122 /* 123 * Add low kernel mappings for passing arguments to EFI functions. 124 */ 125 void efi_sync_low_kernel_mappings(void) 126 { 127 unsigned num_pgds; 128 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); 129 130 if (efi_enabled(EFI_OLD_MEMMAP)) 131 return; 132 133 num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); 134 135 memcpy(pgd + pgd_index(PAGE_OFFSET), 136 init_mm.pgd + pgd_index(PAGE_OFFSET), 137 sizeof(pgd_t) * num_pgds); 138 } 139 140 void efi_setup_page_tables(void) 141 { 142 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; 143 144 if (!efi_enabled(EFI_OLD_MEMMAP)) 145 efi_scratch.use_pgd = true; 146 } 147 148 static void __init __map_region(efi_memory_desc_t *md, u64 va) 149 { 150 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); 151 unsigned long pf = 0; 152 153 if (!(md->attribute & EFI_MEMORY_WB)) 154 pf |= _PAGE_PCD; 155 156 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) 157 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 158 md->phys_addr, va); 159 } 160 161 void __init efi_map_region(efi_memory_desc_t *md) 162 { 163 unsigned long size = md->num_pages << PAGE_SHIFT; 164 u64 pa = md->phys_addr; 165 166 if (efi_enabled(EFI_OLD_MEMMAP)) 167 return old_map_region(md); 168 169 /* 170 * Make sure the 1:1 mappings are present as a catch-all for b0rked 171 * firmware which doesn't update all internal pointers after switching 172 * to virtual mode and would otherwise crap on us. 173 */ 174 __map_region(md, md->phys_addr); 175 176 efi_va -= size; 177 178 /* Is PA 2M-aligned? */ 179 if (!(pa & (PMD_SIZE - 1))) { 180 efi_va &= PMD_MASK; 181 } else { 182 u64 pa_offset = pa & (PMD_SIZE - 1); 183 u64 prev_va = efi_va; 184 185 /* get us the same offset within this 2M page */ 186 efi_va = (efi_va & PMD_MASK) + pa_offset; 187 188 if (efi_va > prev_va) 189 efi_va -= PMD_SIZE; 190 } 191 192 if (efi_va < EFI_VA_END) { 193 pr_warn(FW_WARN "VA address range overflow!\n"); 194 return; 195 } 196 197 /* Do the VA map */ 198 __map_region(md, efi_va); 199 md->virt_addr = efi_va; 200 } 201 202 /* 203 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. 204 * md->virt_addr is the original virtual address which had been mapped in kexec 205 * 1st kernel. 206 */ 207 void __init efi_map_region_fixed(efi_memory_desc_t *md) 208 { 209 __map_region(md, md->virt_addr); 210 } 211 212 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 213 u32 type, u64 attribute) 214 { 215 unsigned long last_map_pfn; 216 217 if (type == EFI_MEMORY_MAPPED_IO) 218 return ioremap(phys_addr, size); 219 220 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 221 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 222 unsigned long top = last_map_pfn << PAGE_SHIFT; 223 efi_ioremap(top, size - (top - phys_addr), type, attribute); 224 } 225 226 if (!(attribute & EFI_MEMORY_WB)) 227 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 228 229 return (void __iomem *)__va(phys_addr); 230 } 231 232 void __init parse_efi_setup(u64 phys_addr, u32 data_len) 233 { 234 efi_setup = phys_addr + sizeof(struct setup_data); 235 } 236