1 /* 2 * Core of Xen paravirt_ops implementation. 3 * 4 * This file contains the xen_paravirt_ops structure itself, and the 5 * implementations for: 6 * - privileged instructions 7 * - interrupt flags 8 * - segment operations 9 * - booting and setup 10 * 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/smp.h> 17 #include <linux/preempt.h> 18 #include <linux/hardirq.h> 19 #include <linux/percpu.h> 20 #include <linux/delay.h> 21 #include <linux/start_kernel.h> 22 #include <linux/sched.h> 23 #include <linux/bootmem.h> 24 #include <linux/module.h> 25 #include <linux/mm.h> 26 #include <linux/page-flags.h> 27 #include <linux/highmem.h> 28 #include <linux/console.h> 29 30 #include <xen/interface/xen.h> 31 #include <xen/interface/version.h> 32 #include <xen/interface/physdev.h> 33 #include <xen/interface/vcpu.h> 34 #include <xen/features.h> 35 #include <xen/page.h> 36 #include <xen/hvc-console.h> 37 38 #include <asm/paravirt.h> 39 #include <asm/apic.h> 40 #include <asm/page.h> 41 #include <asm/xen/hypercall.h> 42 #include <asm/xen/hypervisor.h> 43 #include <asm/fixmap.h> 44 #include <asm/processor.h> 45 #include <asm/proto.h> 46 #include <asm/msr-index.h> 47 #include <asm/setup.h> 48 #include <asm/desc.h> 49 #include <asm/pgtable.h> 50 #include <asm/tlbflush.h> 51 #include <asm/reboot.h> 52 53 #include "xen-ops.h" 54 #include "mmu.h" 55 #include "multicalls.h" 56 57 EXPORT_SYMBOL_GPL(hypercall_page); 58 59 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 60 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 61 62 enum xen_domain_type xen_domain_type = XEN_NATIVE; 63 EXPORT_SYMBOL_GPL(xen_domain_type); 64 65 struct start_info *xen_start_info; 66 EXPORT_SYMBOL_GPL(xen_start_info); 67 68 struct shared_info xen_dummy_shared_info; 69 70 void *xen_initial_gdt; 71 72 /* 73 * Point at some empty memory to start with. We map the real shared_info 74 * page as soon as fixmap is up and running. 75 */ 76 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 77 78 /* 79 * Flag to determine whether vcpu info placement is available on all 80 * VCPUs. We assume it is to start with, and then set it to zero on 81 * the first failure. This is because it can succeed on some VCPUs 82 * and not others, since it can involve hypervisor memory allocation, 83 * or because the guest failed to guarantee all the appropriate 84 * constraints on all VCPUs (ie buffer can't cross a page boundary). 85 * 86 * Note that any particular CPU may be using a placed vcpu structure, 87 * but we can only optimise if the all are. 88 * 89 * 0: not available, 1: available 90 */ 91 static int have_vcpu_info_placement = 1; 92 93 static void xen_vcpu_setup(int cpu) 94 { 95 struct vcpu_register_vcpu_info info; 96 int err; 97 struct vcpu_info *vcpup; 98 99 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 100 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 101 102 if (!have_vcpu_info_placement) 103 return; /* already tested, not available */ 104 105 vcpup = &per_cpu(xen_vcpu_info, cpu); 106 107 info.mfn = arbitrary_virt_to_mfn(vcpup); 108 info.offset = offset_in_page(vcpup); 109 110 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", 111 cpu, vcpup, info.mfn, info.offset); 112 113 /* Check to see if the hypervisor will put the vcpu_info 114 structure where we want it, which allows direct access via 115 a percpu-variable. */ 116 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 117 118 if (err) { 119 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 120 have_vcpu_info_placement = 0; 121 } else { 122 /* This cpu is using the registered vcpu info, even if 123 later ones fail to. */ 124 per_cpu(xen_vcpu, cpu) = vcpup; 125 126 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", 127 cpu, vcpup); 128 } 129 } 130 131 /* 132 * On restore, set the vcpu placement up again. 133 * If it fails, then we're in a bad state, since 134 * we can't back out from using it... 135 */ 136 void xen_vcpu_restore(void) 137 { 138 if (have_vcpu_info_placement) { 139 int cpu; 140 141 for_each_online_cpu(cpu) { 142 bool other_cpu = (cpu != smp_processor_id()); 143 144 if (other_cpu && 145 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 146 BUG(); 147 148 xen_vcpu_setup(cpu); 149 150 if (other_cpu && 151 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 152 BUG(); 153 } 154 155 BUG_ON(!have_vcpu_info_placement); 156 } 157 } 158 159 static void __init xen_banner(void) 160 { 161 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); 162 struct xen_extraversion extra; 163 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 164 165 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 166 pv_info.name); 167 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 168 version >> 16, version & 0xffff, extra.extraversion, 169 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 170 } 171 172 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 173 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 174 175 static void xen_cpuid(unsigned int *ax, unsigned int *bx, 176 unsigned int *cx, unsigned int *dx) 177 { 178 unsigned maskecx = ~0; 179 unsigned maskedx = ~0; 180 181 /* 182 * Mask out inconvenient features, to try and disable as many 183 * unsupported kernel subsystems as possible. 184 */ 185 if (*ax == 1) { 186 maskecx = cpuid_leaf1_ecx_mask; 187 maskedx = cpuid_leaf1_edx_mask; 188 } 189 190 asm(XEN_EMULATE_PREFIX "cpuid" 191 : "=a" (*ax), 192 "=b" (*bx), 193 "=c" (*cx), 194 "=d" (*dx) 195 : "0" (*ax), "2" (*cx)); 196 197 *cx &= maskecx; 198 *dx &= maskedx; 199 } 200 201 static __init void xen_init_cpuid_mask(void) 202 { 203 unsigned int ax, bx, cx, dx; 204 205 cpuid_leaf1_edx_mask = 206 ~((1 << X86_FEATURE_MCE) | /* disable MCE */ 207 (1 << X86_FEATURE_MCA) | /* disable MCA */ 208 (1 << X86_FEATURE_ACC)); /* thermal monitoring */ 209 210 if (!xen_initial_domain()) 211 cpuid_leaf1_edx_mask &= 212 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ 213 (1 << X86_FEATURE_ACPI)); /* disable ACPI */ 214 215 ax = 1; 216 xen_cpuid(&ax, &bx, &cx, &dx); 217 218 /* cpuid claims we support xsave; try enabling it to see what happens */ 219 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { 220 unsigned long cr4; 221 222 set_in_cr4(X86_CR4_OSXSAVE); 223 224 cr4 = read_cr4(); 225 226 if ((cr4 & X86_CR4_OSXSAVE) == 0) 227 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); 228 229 clear_in_cr4(X86_CR4_OSXSAVE); 230 } 231 } 232 233 static void xen_set_debugreg(int reg, unsigned long val) 234 { 235 HYPERVISOR_set_debugreg(reg, val); 236 } 237 238 static unsigned long xen_get_debugreg(int reg) 239 { 240 return HYPERVISOR_get_debugreg(reg); 241 } 242 243 void xen_leave_lazy(void) 244 { 245 paravirt_leave_lazy(paravirt_get_lazy_mode()); 246 xen_mc_flush(); 247 } 248 249 static unsigned long xen_store_tr(void) 250 { 251 return 0; 252 } 253 254 /* 255 * Set the page permissions for a particular virtual address. If the 256 * address is a vmalloc mapping (or other non-linear mapping), then 257 * find the linear mapping of the page and also set its protections to 258 * match. 259 */ 260 static void set_aliased_prot(void *v, pgprot_t prot) 261 { 262 int level; 263 pte_t *ptep; 264 pte_t pte; 265 unsigned long pfn; 266 struct page *page; 267 268 ptep = lookup_address((unsigned long)v, &level); 269 BUG_ON(ptep == NULL); 270 271 pfn = pte_pfn(*ptep); 272 page = pfn_to_page(pfn); 273 274 pte = pfn_pte(pfn, prot); 275 276 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 277 BUG(); 278 279 if (!PageHighMem(page)) { 280 void *av = __va(PFN_PHYS(pfn)); 281 282 if (av != v) 283 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) 284 BUG(); 285 } else 286 kmap_flush_unused(); 287 } 288 289 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 290 { 291 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 292 int i; 293 294 for(i = 0; i < entries; i += entries_per_page) 295 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 296 } 297 298 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) 299 { 300 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 301 int i; 302 303 for(i = 0; i < entries; i += entries_per_page) 304 set_aliased_prot(ldt + i, PAGE_KERNEL); 305 } 306 307 static void xen_set_ldt(const void *addr, unsigned entries) 308 { 309 struct mmuext_op *op; 310 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 311 312 op = mcs.args; 313 op->cmd = MMUEXT_SET_LDT; 314 op->arg1.linear_addr = (unsigned long)addr; 315 op->arg2.nr_ents = entries; 316 317 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 318 319 xen_mc_issue(PARAVIRT_LAZY_CPU); 320 } 321 322 static void xen_load_gdt(const struct desc_ptr *dtr) 323 { 324 unsigned long va = dtr->address; 325 unsigned int size = dtr->size + 1; 326 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 327 unsigned long frames[pages]; 328 int f; 329 330 /* A GDT can be up to 64k in size, which corresponds to 8192 331 8-byte entries, or 16 4k pages.. */ 332 333 BUG_ON(size > 65536); 334 BUG_ON(va & ~PAGE_MASK); 335 336 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 337 int level; 338 pte_t *ptep = lookup_address(va, &level); 339 unsigned long pfn, mfn; 340 void *virt; 341 342 BUG_ON(ptep == NULL); 343 344 pfn = pte_pfn(*ptep); 345 mfn = pfn_to_mfn(pfn); 346 virt = __va(PFN_PHYS(pfn)); 347 348 frames[f] = mfn; 349 350 make_lowmem_page_readonly((void *)va); 351 make_lowmem_page_readonly(virt); 352 } 353 354 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 355 BUG(); 356 } 357 358 static void load_TLS_descriptor(struct thread_struct *t, 359 unsigned int cpu, unsigned int i) 360 { 361 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 362 xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 363 struct multicall_space mc = __xen_mc_entry(0); 364 365 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 366 } 367 368 static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 369 { 370 /* 371 * XXX sleazy hack: If we're being called in a lazy-cpu zone 372 * and lazy gs handling is enabled, it means we're in a 373 * context switch, and %gs has just been saved. This means we 374 * can zero it out to prevent faults on exit from the 375 * hypervisor if the next process has no %gs. Either way, it 376 * has been saved, and the new value will get loaded properly. 377 * This will go away as soon as Xen has been modified to not 378 * save/restore %gs for normal hypercalls. 379 * 380 * On x86_64, this hack is not used for %gs, because gs points 381 * to KERNEL_GS_BASE (and uses it for PDA references), so we 382 * must not zero %gs on x86_64 383 * 384 * For x86_64, we need to zero %fs, otherwise we may get an 385 * exception between the new %fs descriptor being loaded and 386 * %fs being effectively cleared at __switch_to(). 387 */ 388 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 389 #ifdef CONFIG_X86_32 390 lazy_load_gs(0); 391 #else 392 loadsegment(fs, 0); 393 #endif 394 } 395 396 xen_mc_batch(); 397 398 load_TLS_descriptor(t, cpu, 0); 399 load_TLS_descriptor(t, cpu, 1); 400 load_TLS_descriptor(t, cpu, 2); 401 402 xen_mc_issue(PARAVIRT_LAZY_CPU); 403 } 404 405 #ifdef CONFIG_X86_64 406 static void xen_load_gs_index(unsigned int idx) 407 { 408 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) 409 BUG(); 410 } 411 #endif 412 413 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, 414 const void *ptr) 415 { 416 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); 417 u64 entry = *(u64 *)ptr; 418 419 preempt_disable(); 420 421 xen_mc_flush(); 422 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) 423 BUG(); 424 425 preempt_enable(); 426 } 427 428 static int cvt_gate_to_trap(int vector, const gate_desc *val, 429 struct trap_info *info) 430 { 431 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT) 432 return 0; 433 434 info->vector = vector; 435 info->address = gate_offset(*val); 436 info->cs = gate_segment(*val); 437 info->flags = val->dpl; 438 /* interrupt gates clear IF */ 439 if (val->type == GATE_INTERRUPT) 440 info->flags |= 1 << 2; 441 442 return 1; 443 } 444 445 /* Locations of each CPU's IDT */ 446 static DEFINE_PER_CPU(struct desc_ptr, idt_desc); 447 448 /* Set an IDT entry. If the entry is part of the current IDT, then 449 also update Xen. */ 450 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) 451 { 452 unsigned long p = (unsigned long)&dt[entrynum]; 453 unsigned long start, end; 454 455 preempt_disable(); 456 457 start = __get_cpu_var(idt_desc).address; 458 end = start + __get_cpu_var(idt_desc).size + 1; 459 460 xen_mc_flush(); 461 462 native_write_idt_entry(dt, entrynum, g); 463 464 if (p >= start && (p + 8) <= end) { 465 struct trap_info info[2]; 466 467 info[1].address = 0; 468 469 if (cvt_gate_to_trap(entrynum, g, &info[0])) 470 if (HYPERVISOR_set_trap_table(info)) 471 BUG(); 472 } 473 474 preempt_enable(); 475 } 476 477 static void xen_convert_trap_info(const struct desc_ptr *desc, 478 struct trap_info *traps) 479 { 480 unsigned in, out, count; 481 482 count = (desc->size+1) / sizeof(gate_desc); 483 BUG_ON(count > 256); 484 485 for (in = out = 0; in < count; in++) { 486 gate_desc *entry = (gate_desc*)(desc->address) + in; 487 488 if (cvt_gate_to_trap(in, entry, &traps[out])) 489 out++; 490 } 491 traps[out].address = 0; 492 } 493 494 void xen_copy_trap_info(struct trap_info *traps) 495 { 496 const struct desc_ptr *desc = &__get_cpu_var(idt_desc); 497 498 xen_convert_trap_info(desc, traps); 499 } 500 501 /* Load a new IDT into Xen. In principle this can be per-CPU, so we 502 hold a spinlock to protect the static traps[] array (static because 503 it avoids allocation, and saves stack space). */ 504 static void xen_load_idt(const struct desc_ptr *desc) 505 { 506 static DEFINE_SPINLOCK(lock); 507 static struct trap_info traps[257]; 508 509 spin_lock(&lock); 510 511 __get_cpu_var(idt_desc) = *desc; 512 513 xen_convert_trap_info(desc, traps); 514 515 xen_mc_flush(); 516 if (HYPERVISOR_set_trap_table(traps)) 517 BUG(); 518 519 spin_unlock(&lock); 520 } 521 522 /* Write a GDT descriptor entry. Ignore LDT descriptors, since 523 they're handled differently. */ 524 static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 525 const void *desc, int type) 526 { 527 preempt_disable(); 528 529 switch (type) { 530 case DESC_LDT: 531 case DESC_TSS: 532 /* ignore */ 533 break; 534 535 default: { 536 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); 537 538 xen_mc_flush(); 539 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 540 BUG(); 541 } 542 543 } 544 545 preempt_enable(); 546 } 547 548 static void xen_load_sp0(struct tss_struct *tss, 549 struct thread_struct *thread) 550 { 551 struct multicall_space mcs = xen_mc_entry(0); 552 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 553 xen_mc_issue(PARAVIRT_LAZY_CPU); 554 } 555 556 static void xen_set_iopl_mask(unsigned mask) 557 { 558 struct physdev_set_iopl set_iopl; 559 560 /* Force the change at ring 0. */ 561 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; 562 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 563 } 564 565 static void xen_io_delay(void) 566 { 567 } 568 569 #ifdef CONFIG_X86_LOCAL_APIC 570 static u32 xen_apic_read(u32 reg) 571 { 572 return 0; 573 } 574 575 static void xen_apic_write(u32 reg, u32 val) 576 { 577 /* Warn to see if there's any stray references */ 578 WARN_ON(1); 579 } 580 581 static u64 xen_apic_icr_read(void) 582 { 583 return 0; 584 } 585 586 static void xen_apic_icr_write(u32 low, u32 id) 587 { 588 /* Warn to see if there's any stray references */ 589 WARN_ON(1); 590 } 591 592 static void xen_apic_wait_icr_idle(void) 593 { 594 return; 595 } 596 597 static u32 xen_safe_apic_wait_icr_idle(void) 598 { 599 return 0; 600 } 601 602 static void set_xen_basic_apic_ops(void) 603 { 604 apic->read = xen_apic_read; 605 apic->write = xen_apic_write; 606 apic->icr_read = xen_apic_icr_read; 607 apic->icr_write = xen_apic_icr_write; 608 apic->wait_icr_idle = xen_apic_wait_icr_idle; 609 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 610 } 611 612 #endif 613 614 615 static void xen_clts(void) 616 { 617 struct multicall_space mcs; 618 619 mcs = xen_mc_entry(0); 620 621 MULTI_fpu_taskswitch(mcs.mc, 0); 622 623 xen_mc_issue(PARAVIRT_LAZY_CPU); 624 } 625 626 static void xen_write_cr0(unsigned long cr0) 627 { 628 struct multicall_space mcs; 629 630 /* Only pay attention to cr0.TS; everything else is 631 ignored. */ 632 mcs = xen_mc_entry(0); 633 634 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); 635 636 xen_mc_issue(PARAVIRT_LAZY_CPU); 637 } 638 639 static void xen_write_cr4(unsigned long cr4) 640 { 641 cr4 &= ~X86_CR4_PGE; 642 cr4 &= ~X86_CR4_PSE; 643 644 native_write_cr4(cr4); 645 } 646 647 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 648 { 649 int ret; 650 651 ret = 0; 652 653 switch (msr) { 654 #ifdef CONFIG_X86_64 655 unsigned which; 656 u64 base; 657 658 case MSR_FS_BASE: which = SEGBASE_FS; goto set; 659 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; 660 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; 661 662 set: 663 base = ((u64)high << 32) | low; 664 if (HYPERVISOR_set_segment_base(which, base) != 0) 665 ret = -EFAULT; 666 break; 667 #endif 668 669 case MSR_STAR: 670 case MSR_CSTAR: 671 case MSR_LSTAR: 672 case MSR_SYSCALL_MASK: 673 case MSR_IA32_SYSENTER_CS: 674 case MSR_IA32_SYSENTER_ESP: 675 case MSR_IA32_SYSENTER_EIP: 676 /* Fast syscall setup is all done in hypercalls, so 677 these are all ignored. Stub them out here to stop 678 Xen console noise. */ 679 break; 680 681 default: 682 ret = native_write_msr_safe(msr, low, high); 683 } 684 685 return ret; 686 } 687 688 void xen_setup_shared_info(void) 689 { 690 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 691 set_fixmap(FIX_PARAVIRT_BOOTMAP, 692 xen_start_info->shared_info); 693 694 HYPERVISOR_shared_info = 695 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 696 } else 697 HYPERVISOR_shared_info = 698 (struct shared_info *)__va(xen_start_info->shared_info); 699 700 #ifndef CONFIG_SMP 701 /* In UP this is as good a place as any to set up shared info */ 702 xen_setup_vcpu_info_placement(); 703 #endif 704 705 xen_setup_mfn_list_list(); 706 } 707 708 /* This is called once we have the cpu_possible_map */ 709 void xen_setup_vcpu_info_placement(void) 710 { 711 int cpu; 712 713 for_each_possible_cpu(cpu) 714 xen_vcpu_setup(cpu); 715 716 /* xen_vcpu_setup managed to place the vcpu_info within the 717 percpu area for all cpus, so make use of it */ 718 if (have_vcpu_info_placement) { 719 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 720 721 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 722 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 723 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); 724 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); 725 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 726 } 727 } 728 729 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 730 unsigned long addr, unsigned len) 731 { 732 char *start, *end, *reloc; 733 unsigned ret; 734 735 start = end = reloc = NULL; 736 737 #define SITE(op, x) \ 738 case PARAVIRT_PATCH(op.x): \ 739 if (have_vcpu_info_placement) { \ 740 start = (char *)xen_##x##_direct; \ 741 end = xen_##x##_direct_end; \ 742 reloc = xen_##x##_direct_reloc; \ 743 } \ 744 goto patch_site 745 746 switch (type) { 747 SITE(pv_irq_ops, irq_enable); 748 SITE(pv_irq_ops, irq_disable); 749 SITE(pv_irq_ops, save_fl); 750 SITE(pv_irq_ops, restore_fl); 751 #undef SITE 752 753 patch_site: 754 if (start == NULL || (end-start) > len) 755 goto default_patch; 756 757 ret = paravirt_patch_insns(insnbuf, len, start, end); 758 759 /* Note: because reloc is assigned from something that 760 appears to be an array, gcc assumes it's non-null, 761 but doesn't know its relationship with start and 762 end. */ 763 if (reloc > start && reloc < end) { 764 int reloc_off = reloc - start; 765 long *relocp = (long *)(insnbuf + reloc_off); 766 long delta = start - (char *)addr; 767 768 *relocp += delta; 769 } 770 break; 771 772 default_patch: 773 default: 774 ret = paravirt_patch_default(type, clobbers, insnbuf, 775 addr, len); 776 break; 777 } 778 779 return ret; 780 } 781 782 static const struct pv_info xen_info __initdata = { 783 .paravirt_enabled = 1, 784 .shared_kernel_pmd = 0, 785 786 .name = "Xen", 787 }; 788 789 static const struct pv_init_ops xen_init_ops __initdata = { 790 .patch = xen_patch, 791 792 .banner = xen_banner, 793 .memory_setup = xen_memory_setup, 794 .arch_setup = xen_arch_setup, 795 .post_allocator_init = xen_post_allocator_init, 796 }; 797 798 static const struct pv_time_ops xen_time_ops __initdata = { 799 .time_init = xen_time_init, 800 801 .set_wallclock = xen_set_wallclock, 802 .get_wallclock = xen_get_wallclock, 803 .get_tsc_khz = xen_tsc_khz, 804 .sched_clock = xen_sched_clock, 805 }; 806 807 static const struct pv_cpu_ops xen_cpu_ops __initdata = { 808 .cpuid = xen_cpuid, 809 810 .set_debugreg = xen_set_debugreg, 811 .get_debugreg = xen_get_debugreg, 812 813 .clts = xen_clts, 814 815 .read_cr0 = native_read_cr0, 816 .write_cr0 = xen_write_cr0, 817 818 .read_cr4 = native_read_cr4, 819 .read_cr4_safe = native_read_cr4_safe, 820 .write_cr4 = xen_write_cr4, 821 822 .wbinvd = native_wbinvd, 823 824 .read_msr = native_read_msr_safe, 825 .write_msr = xen_write_msr_safe, 826 .read_tsc = native_read_tsc, 827 .read_pmc = native_read_pmc, 828 829 .iret = xen_iret, 830 .irq_enable_sysexit = xen_sysexit, 831 #ifdef CONFIG_X86_64 832 .usergs_sysret32 = xen_sysret32, 833 .usergs_sysret64 = xen_sysret64, 834 #endif 835 836 .load_tr_desc = paravirt_nop, 837 .set_ldt = xen_set_ldt, 838 .load_gdt = xen_load_gdt, 839 .load_idt = xen_load_idt, 840 .load_tls = xen_load_tls, 841 #ifdef CONFIG_X86_64 842 .load_gs_index = xen_load_gs_index, 843 #endif 844 845 .alloc_ldt = xen_alloc_ldt, 846 .free_ldt = xen_free_ldt, 847 848 .store_gdt = native_store_gdt, 849 .store_idt = native_store_idt, 850 .store_tr = xen_store_tr, 851 852 .write_ldt_entry = xen_write_ldt_entry, 853 .write_gdt_entry = xen_write_gdt_entry, 854 .write_idt_entry = xen_write_idt_entry, 855 .load_sp0 = xen_load_sp0, 856 857 .set_iopl_mask = xen_set_iopl_mask, 858 .io_delay = xen_io_delay, 859 860 /* Xen takes care of %gs when switching to usermode for us */ 861 .swapgs = paravirt_nop, 862 863 .lazy_mode = { 864 .enter = paravirt_enter_lazy_cpu, 865 .leave = xen_leave_lazy, 866 }, 867 }; 868 869 static const struct pv_apic_ops xen_apic_ops __initdata = { 870 #ifdef CONFIG_X86_LOCAL_APIC 871 .setup_boot_clock = paravirt_nop, 872 .setup_secondary_clock = paravirt_nop, 873 .startup_ipi_hook = paravirt_nop, 874 #endif 875 }; 876 877 static void xen_reboot(int reason) 878 { 879 struct sched_shutdown r = { .reason = reason }; 880 881 #ifdef CONFIG_SMP 882 smp_send_stop(); 883 #endif 884 885 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 886 BUG(); 887 } 888 889 static void xen_restart(char *msg) 890 { 891 xen_reboot(SHUTDOWN_reboot); 892 } 893 894 static void xen_emergency_restart(void) 895 { 896 xen_reboot(SHUTDOWN_reboot); 897 } 898 899 static void xen_machine_halt(void) 900 { 901 xen_reboot(SHUTDOWN_poweroff); 902 } 903 904 static void xen_crash_shutdown(struct pt_regs *regs) 905 { 906 xen_reboot(SHUTDOWN_crash); 907 } 908 909 static const struct machine_ops __initdata xen_machine_ops = { 910 .restart = xen_restart, 911 .halt = xen_machine_halt, 912 .power_off = xen_machine_halt, 913 .shutdown = xen_machine_halt, 914 .crash_shutdown = xen_crash_shutdown, 915 .emergency_restart = xen_emergency_restart, 916 }; 917 918 /* First C function to be called on Xen boot */ 919 asmlinkage void __init xen_start_kernel(void) 920 { 921 pgd_t *pgd; 922 923 if (!xen_start_info) 924 return; 925 926 xen_domain_type = XEN_PV_DOMAIN; 927 928 BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0); 929 930 xen_setup_features(); 931 932 /* Install Xen paravirt ops */ 933 pv_info = xen_info; 934 pv_init_ops = xen_init_ops; 935 pv_time_ops = xen_time_ops; 936 pv_cpu_ops = xen_cpu_ops; 937 pv_apic_ops = xen_apic_ops; 938 pv_mmu_ops = xen_mmu_ops; 939 940 xen_init_irq_ops(); 941 942 xen_init_cpuid_mask(); 943 944 #ifdef CONFIG_X86_LOCAL_APIC 945 /* 946 * set up the basic apic ops. 947 */ 948 set_xen_basic_apic_ops(); 949 #endif 950 951 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 952 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 953 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 954 } 955 956 machine_ops = xen_machine_ops; 957 958 #ifdef CONFIG_X86_64 959 /* 960 * Setup percpu state. We only need to do this for 64-bit 961 * because 32-bit already has %fs set properly. 962 */ 963 load_percpu_segment(0); 964 #endif 965 /* 966 * The only reliable way to retain the initial address of the 967 * percpu gdt_page is to remember it here, so we can go and 968 * mark it RW later, when the initial percpu area is freed. 969 */ 970 xen_initial_gdt = &per_cpu(gdt_page, 0); 971 972 xen_smp_init(); 973 974 /* Get mfn list */ 975 if (!xen_feature(XENFEAT_auto_translated_physmap)) 976 xen_build_dynamic_phys_to_machine(); 977 978 pgd = (pgd_t *)xen_start_info->pt_base; 979 980 /* Prevent unwanted bits from being set in PTEs. */ 981 __supported_pte_mask &= ~_PAGE_GLOBAL; 982 if (!xen_initial_domain()) 983 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); 984 985 #ifdef CONFIG_X86_64 986 /* Work out if we support NX */ 987 check_efer(); 988 #endif 989 990 /* Don't do the full vcpu_info placement stuff until we have a 991 possible map and a non-dummy shared_info. */ 992 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 993 994 local_irq_disable(); 995 early_boot_irqs_off(); 996 997 xen_raw_console_write("mapping kernel into physical memory\n"); 998 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 999 1000 init_mm.pgd = pgd; 1001 1002 /* keep using Xen gdt for now; no urgent need to change it */ 1003 1004 pv_info.kernel_rpl = 1; 1005 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1006 pv_info.kernel_rpl = 0; 1007 1008 /* set the limit of our address space */ 1009 xen_reserve_top(); 1010 1011 #ifdef CONFIG_X86_32 1012 /* set up basic CPUID stuff */ 1013 cpu_detect(&new_cpu_data); 1014 new_cpu_data.hard_math = 1; 1015 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1016 #endif 1017 1018 /* Poke various useful things into boot_params */ 1019 boot_params.hdr.type_of_loader = (9 << 4) | 0; 1020 boot_params.hdr.ramdisk_image = xen_start_info->mod_start 1021 ? __pa(xen_start_info->mod_start) : 0; 1022 boot_params.hdr.ramdisk_size = xen_start_info->mod_len; 1023 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); 1024 1025 if (!xen_initial_domain()) { 1026 add_preferred_console("xenboot", 0, NULL); 1027 add_preferred_console("tty", 0, NULL); 1028 add_preferred_console("hvc", 0, NULL); 1029 } 1030 1031 xen_raw_console_write("about to get started...\n"); 1032 1033 /* Start the world */ 1034 #ifdef CONFIG_X86_32 1035 i386_start_kernel(); 1036 #else 1037 x86_64_start_reservations((char *)__pa_symbol(&boot_params)); 1038 #endif 1039 } 1040