1 /* Paravirtualization interfaces 2 Copyright (C) 2006 Rusty Russell IBM Corporation 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/module.h> 23 #include <linux/efi.h> 24 #include <linux/bcd.h> 25 #include <linux/highmem.h> 26 27 #include <asm/bug.h> 28 #include <asm/paravirt.h> 29 #include <asm/desc.h> 30 #include <asm/setup.h> 31 #include <asm/arch_hooks.h> 32 #include <asm/time.h> 33 #include <asm/irq.h> 34 #include <asm/delay.h> 35 #include <asm/fixmap.h> 36 #include <asm/apic.h> 37 #include <asm/tlbflush.h> 38 #include <asm/timer.h> 39 40 /* nop stub */ 41 void _paravirt_nop(void) 42 { 43 } 44 45 static void __init default_banner(void) 46 { 47 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 48 pv_info.name); 49 } 50 51 char *memory_setup(void) 52 { 53 return pv_init_ops.memory_setup(); 54 } 55 56 /* Simple instruction patching code. */ 57 #define DEF_NATIVE(ops, name, code) \ 58 extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 59 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 60 61 /* Undefined instruction for dealing with missing ops pointers. */ 62 static const unsigned char ud2a[] = { 0x0f, 0x0b }; 63 64 unsigned paravirt_patch_nop(void) 65 { 66 return 0; 67 } 68 69 unsigned paravirt_patch_ignore(unsigned len) 70 { 71 return len; 72 } 73 74 struct branch { 75 unsigned char opcode; 76 u32 delta; 77 } __attribute__((packed)); 78 79 unsigned paravirt_patch_call(void *insnbuf, 80 const void *target, u16 tgt_clobbers, 81 unsigned long addr, u16 site_clobbers, 82 unsigned len) 83 { 84 struct branch *b = insnbuf; 85 unsigned long delta = (unsigned long)target - (addr+5); 86 87 if (tgt_clobbers & ~site_clobbers) 88 return len; /* target would clobber too much for this site */ 89 if (len < 5) 90 return len; /* call too long for patch site */ 91 92 b->opcode = 0xe8; /* call */ 93 b->delta = delta; 94 BUILD_BUG_ON(sizeof(*b) != 5); 95 96 return 5; 97 } 98 99 unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 100 unsigned long addr, unsigned len) 101 { 102 struct branch *b = insnbuf; 103 unsigned long delta = (unsigned long)target - (addr+5); 104 105 if (len < 5) 106 return len; /* call too long for patch site */ 107 108 b->opcode = 0xe9; /* jmp */ 109 b->delta = delta; 110 111 return 5; 112 } 113 114 /* Neat trick to map patch type back to the call within the 115 * corresponding structure. */ 116 static void *get_call_destination(u8 type) 117 { 118 struct paravirt_patch_template tmpl = { 119 .pv_init_ops = pv_init_ops, 120 .pv_time_ops = pv_time_ops, 121 .pv_cpu_ops = pv_cpu_ops, 122 .pv_irq_ops = pv_irq_ops, 123 .pv_apic_ops = pv_apic_ops, 124 .pv_mmu_ops = pv_mmu_ops, 125 }; 126 return *((void **)&tmpl + type); 127 } 128 129 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 130 unsigned long addr, unsigned len) 131 { 132 void *opfunc = get_call_destination(type); 133 unsigned ret; 134 135 if (opfunc == NULL) 136 /* If there's no function, patch it with a ud2a (BUG) */ 137 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 138 else if (opfunc == paravirt_nop) 139 /* If the operation is a nop, then nop the callsite */ 140 ret = paravirt_patch_nop(); 141 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 142 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret)) 143 /* If operation requires a jmp, then jmp */ 144 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); 145 else 146 /* Otherwise call the function; assume target could 147 clobber any caller-save reg */ 148 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY, 149 addr, clobbers, len); 150 151 return ret; 152 } 153 154 unsigned paravirt_patch_insns(void *insnbuf, unsigned len, 155 const char *start, const char *end) 156 { 157 unsigned insn_len = end - start; 158 159 if (insn_len > len || start == NULL) 160 insn_len = len; 161 else 162 memcpy(insnbuf, start, insn_len); 163 164 return insn_len; 165 } 166 167 void init_IRQ(void) 168 { 169 pv_irq_ops.init_IRQ(); 170 } 171 172 static void native_flush_tlb(void) 173 { 174 __native_flush_tlb(); 175 } 176 177 /* 178 * Global pages have to be flushed a bit differently. Not a real 179 * performance problem because this does not happen often. 180 */ 181 static void native_flush_tlb_global(void) 182 { 183 __native_flush_tlb_global(); 184 } 185 186 static void native_flush_tlb_single(unsigned long addr) 187 { 188 __native_flush_tlb_single(addr); 189 } 190 191 /* These are in entry.S */ 192 extern void native_iret(void); 193 extern void native_irq_enable_syscall_ret(void); 194 195 static int __init print_banner(void) 196 { 197 pv_init_ops.banner(); 198 return 0; 199 } 200 core_initcall(print_banner); 201 202 static struct resource reserve_ioports = { 203 .start = 0, 204 .end = IO_SPACE_LIMIT, 205 .name = "paravirt-ioport", 206 .flags = IORESOURCE_IO | IORESOURCE_BUSY, 207 }; 208 209 /* 210 * Reserve the whole legacy IO space to prevent any legacy drivers 211 * from wasting time probing for their hardware. This is a fairly 212 * brute-force approach to disabling all non-virtual drivers. 213 * 214 * Note that this must be called very early to have any effect. 215 */ 216 int paravirt_disable_iospace(void) 217 { 218 return request_resource(&ioport_resource, &reserve_ioports); 219 } 220 221 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 222 223 static inline void enter_lazy(enum paravirt_lazy_mode mode) 224 { 225 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); 226 BUG_ON(preemptible()); 227 228 __get_cpu_var(paravirt_lazy_mode) = mode; 229 } 230 231 void paravirt_leave_lazy(enum paravirt_lazy_mode mode) 232 { 233 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); 234 BUG_ON(preemptible()); 235 236 __get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 237 } 238 239 void paravirt_enter_lazy_mmu(void) 240 { 241 enter_lazy(PARAVIRT_LAZY_MMU); 242 } 243 244 void paravirt_leave_lazy_mmu(void) 245 { 246 paravirt_leave_lazy(PARAVIRT_LAZY_MMU); 247 } 248 249 void paravirt_enter_lazy_cpu(void) 250 { 251 enter_lazy(PARAVIRT_LAZY_CPU); 252 } 253 254 void paravirt_leave_lazy_cpu(void) 255 { 256 paravirt_leave_lazy(PARAVIRT_LAZY_CPU); 257 } 258 259 enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 260 { 261 return __get_cpu_var(paravirt_lazy_mode); 262 } 263 264 struct pv_info pv_info = { 265 .name = "bare hardware", 266 .paravirt_enabled = 0, 267 .kernel_rpl = 0, 268 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ 269 }; 270 271 struct pv_init_ops pv_init_ops = { 272 .patch = native_patch, 273 .banner = default_banner, 274 .arch_setup = paravirt_nop, 275 .memory_setup = machine_specific_memory_setup, 276 }; 277 278 struct pv_time_ops pv_time_ops = { 279 .time_init = hpet_time_init, 280 .get_wallclock = native_get_wallclock, 281 .set_wallclock = native_set_wallclock, 282 .sched_clock = native_sched_clock, 283 .get_cpu_khz = native_calculate_cpu_khz, 284 }; 285 286 struct pv_irq_ops pv_irq_ops = { 287 .init_IRQ = native_init_IRQ, 288 .save_fl = native_save_fl, 289 .restore_fl = native_restore_fl, 290 .irq_disable = native_irq_disable, 291 .irq_enable = native_irq_enable, 292 .safe_halt = native_safe_halt, 293 .halt = native_halt, 294 }; 295 296 struct pv_cpu_ops pv_cpu_ops = { 297 .cpuid = native_cpuid, 298 .get_debugreg = native_get_debugreg, 299 .set_debugreg = native_set_debugreg, 300 .clts = native_clts, 301 .read_cr0 = native_read_cr0, 302 .write_cr0 = native_write_cr0, 303 .read_cr4 = native_read_cr4, 304 .read_cr4_safe = native_read_cr4_safe, 305 .write_cr4 = native_write_cr4, 306 #ifdef CONFIG_X86_64 307 .read_cr8 = native_read_cr8, 308 .write_cr8 = native_write_cr8, 309 #endif 310 .wbinvd = native_wbinvd, 311 .read_msr = native_read_msr_safe, 312 .write_msr = native_write_msr_safe, 313 .read_tsc = native_read_tsc, 314 .read_pmc = native_read_pmc, 315 .read_tscp = native_read_tscp, 316 .load_tr_desc = native_load_tr_desc, 317 .set_ldt = native_set_ldt, 318 .load_gdt = native_load_gdt, 319 .load_idt = native_load_idt, 320 .store_gdt = native_store_gdt, 321 .store_idt = native_store_idt, 322 .store_tr = native_store_tr, 323 .load_tls = native_load_tls, 324 .write_ldt_entry = native_write_ldt_entry, 325 .write_gdt_entry = native_write_gdt_entry, 326 .write_idt_entry = native_write_idt_entry, 327 .load_sp0 = native_load_sp0, 328 329 .irq_enable_syscall_ret = native_irq_enable_syscall_ret, 330 .iret = native_iret, 331 .swapgs = native_swapgs, 332 333 .set_iopl_mask = native_set_iopl_mask, 334 .io_delay = native_io_delay, 335 336 .lazy_mode = { 337 .enter = paravirt_nop, 338 .leave = paravirt_nop, 339 }, 340 }; 341 342 struct pv_apic_ops pv_apic_ops = { 343 #ifdef CONFIG_X86_LOCAL_APIC 344 .apic_write = native_apic_write, 345 .apic_write_atomic = native_apic_write_atomic, 346 .apic_read = native_apic_read, 347 .setup_boot_clock = setup_boot_APIC_clock, 348 .setup_secondary_clock = setup_secondary_APIC_clock, 349 .startup_ipi_hook = paravirt_nop, 350 #endif 351 }; 352 353 struct pv_mmu_ops pv_mmu_ops = { 354 #ifndef CONFIG_X86_64 355 .pagetable_setup_start = native_pagetable_setup_start, 356 .pagetable_setup_done = native_pagetable_setup_done, 357 #endif 358 359 .read_cr2 = native_read_cr2, 360 .write_cr2 = native_write_cr2, 361 .read_cr3 = native_read_cr3, 362 .write_cr3 = native_write_cr3, 363 364 .flush_tlb_user = native_flush_tlb, 365 .flush_tlb_kernel = native_flush_tlb_global, 366 .flush_tlb_single = native_flush_tlb_single, 367 .flush_tlb_others = native_flush_tlb_others, 368 369 .alloc_pt = paravirt_nop, 370 .alloc_pd = paravirt_nop, 371 .alloc_pd_clone = paravirt_nop, 372 .release_pt = paravirt_nop, 373 .release_pd = paravirt_nop, 374 375 .set_pte = native_set_pte, 376 .set_pte_at = native_set_pte_at, 377 .set_pmd = native_set_pmd, 378 .pte_update = paravirt_nop, 379 .pte_update_defer = paravirt_nop, 380 381 #ifdef CONFIG_HIGHPTE 382 .kmap_atomic_pte = kmap_atomic, 383 #endif 384 385 #if PAGETABLE_LEVELS >= 3 386 #ifdef CONFIG_X86_PAE 387 .set_pte_atomic = native_set_pte_atomic, 388 .set_pte_present = native_set_pte_present, 389 .pte_clear = native_pte_clear, 390 .pmd_clear = native_pmd_clear, 391 #endif 392 .set_pud = native_set_pud, 393 .pmd_val = native_pmd_val, 394 .make_pmd = native_make_pmd, 395 396 #if PAGETABLE_LEVELS == 4 397 .pud_val = native_pud_val, 398 .make_pud = native_make_pud, 399 .set_pgd = native_set_pgd, 400 #endif 401 #endif /* PAGETABLE_LEVELS >= 3 */ 402 403 .pte_val = native_pte_val, 404 .pgd_val = native_pgd_val, 405 406 .make_pte = native_make_pte, 407 .make_pgd = native_make_pgd, 408 409 .dup_mmap = paravirt_nop, 410 .exit_mmap = paravirt_nop, 411 .activate_mm = paravirt_nop, 412 413 .lazy_mode = { 414 .enter = paravirt_nop, 415 .leave = paravirt_nop, 416 }, 417 }; 418 419 EXPORT_SYMBOL_GPL(pv_time_ops); 420 EXPORT_SYMBOL (pv_cpu_ops); 421 EXPORT_SYMBOL (pv_mmu_ops); 422 EXPORT_SYMBOL_GPL(pv_apic_ops); 423 EXPORT_SYMBOL_GPL(pv_info); 424 EXPORT_SYMBOL (pv_irq_ops); 425