1 /* Paravirtualization interfaces 2 Copyright (C) 2006 Rusty Russell IBM Corporation 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/module.h> 23 #include <linux/efi.h> 24 #include <linux/bcd.h> 25 #include <linux/highmem.h> 26 27 #include <asm/bug.h> 28 #include <asm/paravirt.h> 29 #include <asm/desc.h> 30 #include <asm/setup.h> 31 #include <asm/arch_hooks.h> 32 #include <asm/time.h> 33 #include <asm/irq.h> 34 #include <asm/delay.h> 35 #include <asm/fixmap.h> 36 #include <asm/apic.h> 37 #include <asm/tlbflush.h> 38 #include <asm/timer.h> 39 40 /* nop stub */ 41 void _paravirt_nop(void) 42 { 43 } 44 45 static void __init default_banner(void) 46 { 47 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 48 pv_info.name); 49 } 50 51 char *memory_setup(void) 52 { 53 return pv_init_ops.memory_setup(); 54 } 55 56 /* Simple instruction patching code. */ 57 #define DEF_NATIVE(ops, name, code) \ 58 extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 59 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 60 61 /* Undefined instruction for dealing with missing ops pointers. */ 62 static const unsigned char ud2a[] = { 0x0f, 0x0b }; 63 64 unsigned paravirt_patch_nop(void) 65 { 66 return 0; 67 } 68 69 unsigned paravirt_patch_ignore(unsigned len) 70 { 71 return len; 72 } 73 74 struct branch { 75 unsigned char opcode; 76 u32 delta; 77 } __attribute__((packed)); 78 79 unsigned paravirt_patch_call(void *insnbuf, 80 const void *target, u16 tgt_clobbers, 81 unsigned long addr, u16 site_clobbers, 82 unsigned len) 83 { 84 struct branch *b = insnbuf; 85 unsigned long delta = (unsigned long)target - (addr+5); 86 87 if (tgt_clobbers & ~site_clobbers) 88 return len; /* target would clobber too much for this site */ 89 if (len < 5) 90 return len; /* call too long for patch site */ 91 92 b->opcode = 0xe8; /* call */ 93 b->delta = delta; 94 BUILD_BUG_ON(sizeof(*b) != 5); 95 96 return 5; 97 } 98 99 unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 100 unsigned long addr, unsigned len) 101 { 102 struct branch *b = insnbuf; 103 unsigned long delta = (unsigned long)target - (addr+5); 104 105 if (len < 5) 106 return len; /* call too long for patch site */ 107 108 b->opcode = 0xe9; /* jmp */ 109 b->delta = delta; 110 111 return 5; 112 } 113 114 /* Neat trick to map patch type back to the call within the 115 * corresponding structure. */ 116 static void *get_call_destination(u8 type) 117 { 118 struct paravirt_patch_template tmpl = { 119 .pv_init_ops = pv_init_ops, 120 .pv_time_ops = pv_time_ops, 121 .pv_cpu_ops = pv_cpu_ops, 122 .pv_irq_ops = pv_irq_ops, 123 .pv_apic_ops = pv_apic_ops, 124 .pv_mmu_ops = pv_mmu_ops, 125 }; 126 return *((void **)&tmpl + type); 127 } 128 129 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, 130 unsigned long addr, unsigned len) 131 { 132 void *opfunc = get_call_destination(type); 133 unsigned ret; 134 135 if (opfunc == NULL) 136 /* If there's no function, patch it with a ud2a (BUG) */ 137 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 138 else if (opfunc == paravirt_nop) 139 /* If the operation is a nop, then nop the callsite */ 140 ret = paravirt_patch_nop(); 141 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 142 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret)) 143 /* If operation requires a jmp, then jmp */ 144 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); 145 else 146 /* Otherwise call the function; assume target could 147 clobber any caller-save reg */ 148 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY, 149 addr, clobbers, len); 150 151 return ret; 152 } 153 154 unsigned paravirt_patch_insns(void *insnbuf, unsigned len, 155 const char *start, const char *end) 156 { 157 unsigned insn_len = end - start; 158 159 if (insn_len > len || start == NULL) 160 insn_len = len; 161 else 162 memcpy(insnbuf, start, insn_len); 163 164 return insn_len; 165 } 166 167 void init_IRQ(void) 168 { 169 pv_irq_ops.init_IRQ(); 170 } 171 172 static void native_flush_tlb(void) 173 { 174 __native_flush_tlb(); 175 } 176 177 /* 178 * Global pages have to be flushed a bit differently. Not a real 179 * performance problem because this does not happen often. 180 */ 181 static void native_flush_tlb_global(void) 182 { 183 __native_flush_tlb_global(); 184 } 185 186 static void native_flush_tlb_single(unsigned long addr) 187 { 188 __native_flush_tlb_single(addr); 189 } 190 191 /* These are in entry.S */ 192 extern void native_iret(void); 193 extern void native_irq_enable_syscall_ret(void); 194 195 static int __init print_banner(void) 196 { 197 pv_init_ops.banner(); 198 return 0; 199 } 200 core_initcall(print_banner); 201 202 static struct resource reserve_ioports = { 203 .start = 0, 204 .end = IO_SPACE_LIMIT, 205 .name = "paravirt-ioport", 206 .flags = IORESOURCE_IO | IORESOURCE_BUSY, 207 }; 208 209 static struct resource reserve_iomem = { 210 .start = 0, 211 .end = -1, 212 .name = "paravirt-iomem", 213 .flags = IORESOURCE_MEM | IORESOURCE_BUSY, 214 }; 215 216 /* 217 * Reserve the whole legacy IO space to prevent any legacy drivers 218 * from wasting time probing for their hardware. This is a fairly 219 * brute-force approach to disabling all non-virtual drivers. 220 * 221 * Note that this must be called very early to have any effect. 222 */ 223 int paravirt_disable_iospace(void) 224 { 225 int ret; 226 227 ret = request_resource(&ioport_resource, &reserve_ioports); 228 if (ret == 0) { 229 ret = request_resource(&iomem_resource, &reserve_iomem); 230 if (ret) 231 release_resource(&reserve_ioports); 232 } 233 234 return ret; 235 } 236 237 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 238 239 static inline void enter_lazy(enum paravirt_lazy_mode mode) 240 { 241 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); 242 BUG_ON(preemptible()); 243 244 __get_cpu_var(paravirt_lazy_mode) = mode; 245 } 246 247 void paravirt_leave_lazy(enum paravirt_lazy_mode mode) 248 { 249 BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); 250 BUG_ON(preemptible()); 251 252 __get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; 253 } 254 255 void paravirt_enter_lazy_mmu(void) 256 { 257 enter_lazy(PARAVIRT_LAZY_MMU); 258 } 259 260 void paravirt_leave_lazy_mmu(void) 261 { 262 paravirt_leave_lazy(PARAVIRT_LAZY_MMU); 263 } 264 265 void paravirt_enter_lazy_cpu(void) 266 { 267 enter_lazy(PARAVIRT_LAZY_CPU); 268 } 269 270 void paravirt_leave_lazy_cpu(void) 271 { 272 paravirt_leave_lazy(PARAVIRT_LAZY_CPU); 273 } 274 275 enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 276 { 277 return __get_cpu_var(paravirt_lazy_mode); 278 } 279 280 struct pv_info pv_info = { 281 .name = "bare hardware", 282 .paravirt_enabled = 0, 283 .kernel_rpl = 0, 284 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ 285 }; 286 287 struct pv_init_ops pv_init_ops = { 288 .patch = native_patch, 289 .banner = default_banner, 290 .arch_setup = paravirt_nop, 291 .memory_setup = machine_specific_memory_setup, 292 }; 293 294 struct pv_time_ops pv_time_ops = { 295 .time_init = hpet_time_init, 296 .get_wallclock = native_get_wallclock, 297 .set_wallclock = native_set_wallclock, 298 .sched_clock = native_sched_clock, 299 .get_cpu_khz = native_calculate_cpu_khz, 300 }; 301 302 struct pv_irq_ops pv_irq_ops = { 303 .init_IRQ = native_init_IRQ, 304 .save_fl = native_save_fl, 305 .restore_fl = native_restore_fl, 306 .irq_disable = native_irq_disable, 307 .irq_enable = native_irq_enable, 308 .safe_halt = native_safe_halt, 309 .halt = native_halt, 310 }; 311 312 struct pv_cpu_ops pv_cpu_ops = { 313 .cpuid = native_cpuid, 314 .get_debugreg = native_get_debugreg, 315 .set_debugreg = native_set_debugreg, 316 .clts = native_clts, 317 .read_cr0 = native_read_cr0, 318 .write_cr0 = native_write_cr0, 319 .read_cr4 = native_read_cr4, 320 .read_cr4_safe = native_read_cr4_safe, 321 .write_cr4 = native_write_cr4, 322 #ifdef CONFIG_X86_64 323 .read_cr8 = native_read_cr8, 324 .write_cr8 = native_write_cr8, 325 #endif 326 .wbinvd = native_wbinvd, 327 .read_msr = native_read_msr_safe, 328 .write_msr = native_write_msr_safe, 329 .read_tsc = native_read_tsc, 330 .read_pmc = native_read_pmc, 331 .read_tscp = native_read_tscp, 332 .load_tr_desc = native_load_tr_desc, 333 .set_ldt = native_set_ldt, 334 .load_gdt = native_load_gdt, 335 .load_idt = native_load_idt, 336 .store_gdt = native_store_gdt, 337 .store_idt = native_store_idt, 338 .store_tr = native_store_tr, 339 .load_tls = native_load_tls, 340 .write_ldt_entry = native_write_ldt_entry, 341 .write_gdt_entry = native_write_gdt_entry, 342 .write_idt_entry = native_write_idt_entry, 343 .load_sp0 = native_load_sp0, 344 345 .irq_enable_syscall_ret = native_irq_enable_syscall_ret, 346 .iret = native_iret, 347 .swapgs = native_swapgs, 348 349 .set_iopl_mask = native_set_iopl_mask, 350 .io_delay = native_io_delay, 351 352 .lazy_mode = { 353 .enter = paravirt_nop, 354 .leave = paravirt_nop, 355 }, 356 }; 357 358 struct pv_apic_ops pv_apic_ops = { 359 #ifdef CONFIG_X86_LOCAL_APIC 360 .apic_write = native_apic_write, 361 .apic_write_atomic = native_apic_write_atomic, 362 .apic_read = native_apic_read, 363 .setup_boot_clock = setup_boot_APIC_clock, 364 .setup_secondary_clock = setup_secondary_APIC_clock, 365 .startup_ipi_hook = paravirt_nop, 366 #endif 367 }; 368 369 struct pv_mmu_ops pv_mmu_ops = { 370 #ifndef CONFIG_X86_64 371 .pagetable_setup_start = native_pagetable_setup_start, 372 .pagetable_setup_done = native_pagetable_setup_done, 373 #endif 374 375 .read_cr2 = native_read_cr2, 376 .write_cr2 = native_write_cr2, 377 .read_cr3 = native_read_cr3, 378 .write_cr3 = native_write_cr3, 379 380 .flush_tlb_user = native_flush_tlb, 381 .flush_tlb_kernel = native_flush_tlb_global, 382 .flush_tlb_single = native_flush_tlb_single, 383 .flush_tlb_others = native_flush_tlb_others, 384 385 .alloc_pt = paravirt_nop, 386 .alloc_pd = paravirt_nop, 387 .alloc_pd_clone = paravirt_nop, 388 .release_pt = paravirt_nop, 389 .release_pd = paravirt_nop, 390 391 .set_pte = native_set_pte, 392 .set_pte_at = native_set_pte_at, 393 .set_pmd = native_set_pmd, 394 .pte_update = paravirt_nop, 395 .pte_update_defer = paravirt_nop, 396 397 #ifdef CONFIG_HIGHPTE 398 .kmap_atomic_pte = kmap_atomic, 399 #endif 400 401 #if PAGETABLE_LEVELS >= 3 402 #ifdef CONFIG_X86_PAE 403 .set_pte_atomic = native_set_pte_atomic, 404 .set_pte_present = native_set_pte_present, 405 .pte_clear = native_pte_clear, 406 .pmd_clear = native_pmd_clear, 407 #endif 408 .set_pud = native_set_pud, 409 .pmd_val = native_pmd_val, 410 .make_pmd = native_make_pmd, 411 412 #if PAGETABLE_LEVELS == 4 413 .pud_val = native_pud_val, 414 .make_pud = native_make_pud, 415 .set_pgd = native_set_pgd, 416 #endif 417 #endif /* PAGETABLE_LEVELS >= 3 */ 418 419 .pte_val = native_pte_val, 420 .pgd_val = native_pgd_val, 421 422 .make_pte = native_make_pte, 423 .make_pgd = native_make_pgd, 424 425 .dup_mmap = paravirt_nop, 426 .exit_mmap = paravirt_nop, 427 .activate_mm = paravirt_nop, 428 429 .lazy_mode = { 430 .enter = paravirt_nop, 431 .leave = paravirt_nop, 432 }, 433 }; 434 435 EXPORT_SYMBOL_GPL(pv_time_ops); 436 EXPORT_SYMBOL (pv_cpu_ops); 437 EXPORT_SYMBOL (pv_mmu_ops); 438 EXPORT_SYMBOL_GPL(pv_apic_ops); 439 EXPORT_SYMBOL_GPL(pv_info); 440 EXPORT_SYMBOL (pv_irq_ops); 441