1 /* 2 * Core of Xen paravirt_ops implementation. 3 * 4 * This file contains the xen_paravirt_ops structure itself, and the 5 * implementations for: 6 * - privileged instructions 7 * - interrupt flags 8 * - segment operations 9 * - booting and setup 10 * 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/smp.h> 17 #include <linux/preempt.h> 18 #include <linux/hardirq.h> 19 #include <linux/percpu.h> 20 #include <linux/delay.h> 21 #include <linux/start_kernel.h> 22 #include <linux/sched.h> 23 #include <linux/bootmem.h> 24 #include <linux/module.h> 25 #include <linux/mm.h> 26 #include <linux/page-flags.h> 27 #include <linux/highmem.h> 28 #include <linux/smp.h> 29 30 #include <xen/interface/xen.h> 31 #include <xen/interface/physdev.h> 32 #include <xen/interface/vcpu.h> 33 #include <xen/interface/sched.h> 34 #include <xen/features.h> 35 #include <xen/page.h> 36 37 #include <asm/paravirt.h> 38 #include <asm/page.h> 39 #include <asm/xen/hypercall.h> 40 #include <asm/xen/hypervisor.h> 41 #include <asm/fixmap.h> 42 #include <asm/processor.h> 43 #include <asm/setup.h> 44 #include <asm/desc.h> 45 #include <asm/pgtable.h> 46 #include <asm/tlbflush.h> 47 #include <asm/reboot.h> 48 49 #include "xen-ops.h" 50 #include "mmu.h" 51 #include "multicalls.h" 52 53 EXPORT_SYMBOL_GPL(hypercall_page); 54 55 DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); 56 57 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 58 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 59 DEFINE_PER_CPU(unsigned long, xen_cr3); 60 61 struct start_info *xen_start_info; 62 EXPORT_SYMBOL_GPL(xen_start_info); 63 64 static /* __initdata */ struct shared_info dummy_shared_info; 65 66 /* 67 * Point at some empty memory to start with. We map the real shared_info 68 * page as soon as fixmap is up and running. 69 */ 70 struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info; 71 72 /* 73 * Flag to determine whether vcpu info placement is available on all 74 * VCPUs. We assume it is to start with, and then set it to zero on 75 * the first failure. This is because it can succeed on some VCPUs 76 * and not others, since it can involve hypervisor memory allocation, 77 * or because the guest failed to guarantee all the appropriate 78 * constraints on all VCPUs (ie buffer can't cross a page boundary). 79 * 80 * Note that any particular CPU may be using a placed vcpu structure, 81 * but we can only optimise if the all are. 82 * 83 * 0: not available, 1: available 84 */ 85 static int have_vcpu_info_placement = 1; 86 87 static void __init xen_vcpu_setup(int cpu) 88 { 89 struct vcpu_register_vcpu_info info; 90 int err; 91 struct vcpu_info *vcpup; 92 93 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 94 95 if (!have_vcpu_info_placement) 96 return; /* already tested, not available */ 97 98 vcpup = &per_cpu(xen_vcpu_info, cpu); 99 100 info.mfn = virt_to_mfn(vcpup); 101 info.offset = offset_in_page(vcpup); 102 103 printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n", 104 cpu, vcpup, info.mfn, info.offset); 105 106 /* Check to see if the hypervisor will put the vcpu_info 107 structure where we want it, which allows direct access via 108 a percpu-variable. */ 109 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 110 111 if (err) { 112 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 113 have_vcpu_info_placement = 0; 114 } else { 115 /* This cpu is using the registered vcpu info, even if 116 later ones fail to. */ 117 per_cpu(xen_vcpu, cpu) = vcpup; 118 119 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", 120 cpu, vcpup); 121 } 122 } 123 124 static void __init xen_banner(void) 125 { 126 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 127 paravirt_ops.name); 128 printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); 129 } 130 131 static void xen_cpuid(unsigned int *eax, unsigned int *ebx, 132 unsigned int *ecx, unsigned int *edx) 133 { 134 unsigned maskedx = ~0; 135 136 /* 137 * Mask out inconvenient features, to try and disable as many 138 * unsupported kernel subsystems as possible. 139 */ 140 if (*eax == 1) 141 maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ 142 (1 << X86_FEATURE_ACPI) | /* disable ACPI */ 143 (1 << X86_FEATURE_ACC)); /* thermal monitoring */ 144 145 asm(XEN_EMULATE_PREFIX "cpuid" 146 : "=a" (*eax), 147 "=b" (*ebx), 148 "=c" (*ecx), 149 "=d" (*edx) 150 : "0" (*eax), "2" (*ecx)); 151 *edx &= maskedx; 152 } 153 154 static void xen_set_debugreg(int reg, unsigned long val) 155 { 156 HYPERVISOR_set_debugreg(reg, val); 157 } 158 159 static unsigned long xen_get_debugreg(int reg) 160 { 161 return HYPERVISOR_get_debugreg(reg); 162 } 163 164 static unsigned long xen_save_fl(void) 165 { 166 struct vcpu_info *vcpu; 167 unsigned long flags; 168 169 vcpu = x86_read_percpu(xen_vcpu); 170 171 /* flag has opposite sense of mask */ 172 flags = !vcpu->evtchn_upcall_mask; 173 174 /* convert to IF type flag 175 -0 -> 0x00000000 176 -1 -> 0xffffffff 177 */ 178 return (-flags) & X86_EFLAGS_IF; 179 } 180 181 static void xen_restore_fl(unsigned long flags) 182 { 183 struct vcpu_info *vcpu; 184 185 /* convert from IF type flag */ 186 flags = !(flags & X86_EFLAGS_IF); 187 188 /* There's a one instruction preempt window here. We need to 189 make sure we're don't switch CPUs between getting the vcpu 190 pointer and updating the mask. */ 191 preempt_disable(); 192 vcpu = x86_read_percpu(xen_vcpu); 193 vcpu->evtchn_upcall_mask = flags; 194 preempt_enable_no_resched(); 195 196 /* Doesn't matter if we get preempted here, because any 197 pending event will get dealt with anyway. */ 198 199 if (flags == 0) { 200 preempt_check_resched(); 201 barrier(); /* unmask then check (avoid races) */ 202 if (unlikely(vcpu->evtchn_upcall_pending)) 203 force_evtchn_callback(); 204 } 205 } 206 207 static void xen_irq_disable(void) 208 { 209 /* There's a one instruction preempt window here. We need to 210 make sure we're don't switch CPUs between getting the vcpu 211 pointer and updating the mask. */ 212 preempt_disable(); 213 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; 214 preempt_enable_no_resched(); 215 } 216 217 static void xen_irq_enable(void) 218 { 219 struct vcpu_info *vcpu; 220 221 /* There's a one instruction preempt window here. We need to 222 make sure we're don't switch CPUs between getting the vcpu 223 pointer and updating the mask. */ 224 preempt_disable(); 225 vcpu = x86_read_percpu(xen_vcpu); 226 vcpu->evtchn_upcall_mask = 0; 227 preempt_enable_no_resched(); 228 229 /* Doesn't matter if we get preempted here, because any 230 pending event will get dealt with anyway. */ 231 232 barrier(); /* unmask then check (avoid races) */ 233 if (unlikely(vcpu->evtchn_upcall_pending)) 234 force_evtchn_callback(); 235 } 236 237 static void xen_safe_halt(void) 238 { 239 /* Blocking includes an implicit local_irq_enable(). */ 240 if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0) 241 BUG(); 242 } 243 244 static void xen_halt(void) 245 { 246 if (irqs_disabled()) 247 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 248 else 249 xen_safe_halt(); 250 } 251 252 static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) 253 { 254 BUG_ON(preemptible()); 255 256 switch (mode) { 257 case PARAVIRT_LAZY_NONE: 258 BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); 259 break; 260 261 case PARAVIRT_LAZY_MMU: 262 case PARAVIRT_LAZY_CPU: 263 BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); 264 break; 265 266 case PARAVIRT_LAZY_FLUSH: 267 /* flush if necessary, but don't change state */ 268 if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) 269 xen_mc_flush(); 270 return; 271 } 272 273 xen_mc_flush(); 274 x86_write_percpu(xen_lazy_mode, mode); 275 } 276 277 static unsigned long xen_store_tr(void) 278 { 279 return 0; 280 } 281 282 static void xen_set_ldt(const void *addr, unsigned entries) 283 { 284 unsigned long linear_addr = (unsigned long)addr; 285 struct mmuext_op *op; 286 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 287 288 op = mcs.args; 289 op->cmd = MMUEXT_SET_LDT; 290 if (linear_addr) { 291 /* ldt my be vmalloced, use arbitrary_virt_to_machine */ 292 xmaddr_t maddr; 293 maddr = arbitrary_virt_to_machine((unsigned long)addr); 294 linear_addr = (unsigned long)maddr.maddr; 295 } 296 op->arg1.linear_addr = linear_addr; 297 op->arg2.nr_ents = entries; 298 299 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 300 301 xen_mc_issue(PARAVIRT_LAZY_CPU); 302 } 303 304 static void xen_load_gdt(const struct Xgt_desc_struct *dtr) 305 { 306 unsigned long *frames; 307 unsigned long va = dtr->address; 308 unsigned int size = dtr->size + 1; 309 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 310 int f; 311 struct multicall_space mcs; 312 313 /* A GDT can be up to 64k in size, which corresponds to 8192 314 8-byte entries, or 16 4k pages.. */ 315 316 BUG_ON(size > 65536); 317 BUG_ON(va & ~PAGE_MASK); 318 319 mcs = xen_mc_entry(sizeof(*frames) * pages); 320 frames = mcs.args; 321 322 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 323 frames[f] = virt_to_mfn(va); 324 make_lowmem_page_readonly((void *)va); 325 } 326 327 MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); 328 329 xen_mc_issue(PARAVIRT_LAZY_CPU); 330 } 331 332 static void load_TLS_descriptor(struct thread_struct *t, 333 unsigned int cpu, unsigned int i) 334 { 335 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 336 xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 337 struct multicall_space mc = __xen_mc_entry(0); 338 339 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 340 } 341 342 static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 343 { 344 xen_mc_batch(); 345 346 load_TLS_descriptor(t, cpu, 0); 347 load_TLS_descriptor(t, cpu, 1); 348 load_TLS_descriptor(t, cpu, 2); 349 350 xen_mc_issue(PARAVIRT_LAZY_CPU); 351 352 /* 353 * XXX sleazy hack: If we're being called in a lazy-cpu zone, 354 * it means we're in a context switch, and %gs has just been 355 * saved. This means we can zero it out to prevent faults on 356 * exit from the hypervisor if the next process has no %gs. 357 * Either way, it has been saved, and the new value will get 358 * loaded properly. This will go away as soon as Xen has been 359 * modified to not save/restore %gs for normal hypercalls. 360 */ 361 if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) 362 loadsegment(gs, 0); 363 } 364 365 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, 366 u32 low, u32 high) 367 { 368 unsigned long lp = (unsigned long)&dt[entrynum]; 369 xmaddr_t mach_lp = virt_to_machine(lp); 370 u64 entry = (u64)high << 32 | low; 371 372 preempt_disable(); 373 374 xen_mc_flush(); 375 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) 376 BUG(); 377 378 preempt_enable(); 379 } 380 381 static int cvt_gate_to_trap(int vector, u32 low, u32 high, 382 struct trap_info *info) 383 { 384 u8 type, dpl; 385 386 type = (high >> 8) & 0x1f; 387 dpl = (high >> 13) & 3; 388 389 if (type != 0xf && type != 0xe) 390 return 0; 391 392 info->vector = vector; 393 info->address = (high & 0xffff0000) | (low & 0x0000ffff); 394 info->cs = low >> 16; 395 info->flags = dpl; 396 /* interrupt gates clear IF */ 397 if (type == 0xe) 398 info->flags |= 4; 399 400 return 1; 401 } 402 403 /* Locations of each CPU's IDT */ 404 static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); 405 406 /* Set an IDT entry. If the entry is part of the current IDT, then 407 also update Xen. */ 408 static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, 409 u32 low, u32 high) 410 { 411 unsigned long p = (unsigned long)&dt[entrynum]; 412 unsigned long start, end; 413 414 preempt_disable(); 415 416 start = __get_cpu_var(idt_desc).address; 417 end = start + __get_cpu_var(idt_desc).size + 1; 418 419 xen_mc_flush(); 420 421 write_dt_entry(dt, entrynum, low, high); 422 423 if (p >= start && (p + 8) <= end) { 424 struct trap_info info[2]; 425 426 info[1].address = 0; 427 428 if (cvt_gate_to_trap(entrynum, low, high, &info[0])) 429 if (HYPERVISOR_set_trap_table(info)) 430 BUG(); 431 } 432 433 preempt_enable(); 434 } 435 436 static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, 437 struct trap_info *traps) 438 { 439 unsigned in, out, count; 440 441 count = (desc->size+1) / 8; 442 BUG_ON(count > 256); 443 444 for (in = out = 0; in < count; in++) { 445 const u32 *entry = (u32 *)(desc->address + in * 8); 446 447 if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out])) 448 out++; 449 } 450 traps[out].address = 0; 451 } 452 453 void xen_copy_trap_info(struct trap_info *traps) 454 { 455 const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc); 456 457 xen_convert_trap_info(desc, traps); 458 } 459 460 /* Load a new IDT into Xen. In principle this can be per-CPU, so we 461 hold a spinlock to protect the static traps[] array (static because 462 it avoids allocation, and saves stack space). */ 463 static void xen_load_idt(const struct Xgt_desc_struct *desc) 464 { 465 static DEFINE_SPINLOCK(lock); 466 static struct trap_info traps[257]; 467 468 spin_lock(&lock); 469 470 __get_cpu_var(idt_desc) = *desc; 471 472 xen_convert_trap_info(desc, traps); 473 474 xen_mc_flush(); 475 if (HYPERVISOR_set_trap_table(traps)) 476 BUG(); 477 478 spin_unlock(&lock); 479 } 480 481 /* Write a GDT descriptor entry. Ignore LDT descriptors, since 482 they're handled differently. */ 483 static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 484 u32 low, u32 high) 485 { 486 preempt_disable(); 487 488 switch ((high >> 8) & 0xff) { 489 case DESCTYPE_LDT: 490 case DESCTYPE_TSS: 491 /* ignore */ 492 break; 493 494 default: { 495 xmaddr_t maddr = virt_to_machine(&dt[entry]); 496 u64 desc = (u64)high << 32 | low; 497 498 xen_mc_flush(); 499 if (HYPERVISOR_update_descriptor(maddr.maddr, desc)) 500 BUG(); 501 } 502 503 } 504 505 preempt_enable(); 506 } 507 508 static void xen_load_esp0(struct tss_struct *tss, 509 struct thread_struct *thread) 510 { 511 struct multicall_space mcs = xen_mc_entry(0); 512 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); 513 xen_mc_issue(PARAVIRT_LAZY_CPU); 514 } 515 516 static void xen_set_iopl_mask(unsigned mask) 517 { 518 struct physdev_set_iopl set_iopl; 519 520 /* Force the change at ring 0. */ 521 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; 522 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 523 } 524 525 static void xen_io_delay(void) 526 { 527 } 528 529 #ifdef CONFIG_X86_LOCAL_APIC 530 static unsigned long xen_apic_read(unsigned long reg) 531 { 532 return 0; 533 } 534 535 static void xen_apic_write(unsigned long reg, unsigned long val) 536 { 537 /* Warn to see if there's any stray references */ 538 WARN_ON(1); 539 } 540 #endif 541 542 static void xen_flush_tlb(void) 543 { 544 struct mmuext_op *op; 545 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 546 547 op = mcs.args; 548 op->cmd = MMUEXT_TLB_FLUSH_LOCAL; 549 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 550 551 xen_mc_issue(PARAVIRT_LAZY_MMU); 552 } 553 554 static void xen_flush_tlb_single(unsigned long addr) 555 { 556 struct mmuext_op *op; 557 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 558 559 op = mcs.args; 560 op->cmd = MMUEXT_INVLPG_LOCAL; 561 op->arg1.linear_addr = addr & PAGE_MASK; 562 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 563 564 xen_mc_issue(PARAVIRT_LAZY_MMU); 565 } 566 567 static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, 568 unsigned long va) 569 { 570 struct { 571 struct mmuext_op op; 572 cpumask_t mask; 573 } *args; 574 cpumask_t cpumask = *cpus; 575 struct multicall_space mcs; 576 577 /* 578 * A couple of (to be removed) sanity checks: 579 * 580 * - current CPU must not be in mask 581 * - mask must exist :) 582 */ 583 BUG_ON(cpus_empty(cpumask)); 584 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 585 BUG_ON(!mm); 586 587 /* If a CPU which we ran on has gone down, OK. */ 588 cpus_and(cpumask, cpumask, cpu_online_map); 589 if (cpus_empty(cpumask)) 590 return; 591 592 mcs = xen_mc_entry(sizeof(*args)); 593 args = mcs.args; 594 args->mask = cpumask; 595 args->op.arg2.vcpumask = &args->mask; 596 597 if (va == TLB_FLUSH_ALL) { 598 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 599 } else { 600 args->op.cmd = MMUEXT_INVLPG_MULTI; 601 args->op.arg1.linear_addr = va; 602 } 603 604 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); 605 606 xen_mc_issue(PARAVIRT_LAZY_MMU); 607 } 608 609 static void xen_write_cr2(unsigned long cr2) 610 { 611 x86_read_percpu(xen_vcpu)->arch.cr2 = cr2; 612 } 613 614 static unsigned long xen_read_cr2(void) 615 { 616 return x86_read_percpu(xen_vcpu)->arch.cr2; 617 } 618 619 static unsigned long xen_read_cr2_direct(void) 620 { 621 return x86_read_percpu(xen_vcpu_info.arch.cr2); 622 } 623 624 static void xen_write_cr4(unsigned long cr4) 625 { 626 /* Just ignore cr4 changes; Xen doesn't allow us to do 627 anything anyway. */ 628 } 629 630 static unsigned long xen_read_cr3(void) 631 { 632 return x86_read_percpu(xen_cr3); 633 } 634 635 static void xen_write_cr3(unsigned long cr3) 636 { 637 BUG_ON(preemptible()); 638 639 if (cr3 == x86_read_percpu(xen_cr3)) { 640 /* just a simple tlb flush */ 641 xen_flush_tlb(); 642 return; 643 } 644 645 x86_write_percpu(xen_cr3, cr3); 646 647 648 { 649 struct mmuext_op *op; 650 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 651 unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3)); 652 653 op = mcs.args; 654 op->cmd = MMUEXT_NEW_BASEPTR; 655 op->arg1.mfn = mfn; 656 657 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 658 659 xen_mc_issue(PARAVIRT_LAZY_CPU); 660 } 661 } 662 663 /* Early in boot, while setting up the initial pagetable, assume 664 everything is pinned. */ 665 static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) 666 { 667 BUG_ON(mem_map); /* should only be used early */ 668 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 669 } 670 671 /* This needs to make sure the new pte page is pinned iff its being 672 attached to a pinned pagetable. */ 673 static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) 674 { 675 struct page *page = pfn_to_page(pfn); 676 677 if (PagePinned(virt_to_page(mm->pgd))) { 678 SetPagePinned(page); 679 680 if (!PageHighMem(page)) 681 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 682 else 683 /* make sure there are no stray mappings of 684 this page */ 685 kmap_flush_unused(); 686 } 687 } 688 689 /* This should never happen until we're OK to use struct page */ 690 static void xen_release_pt(u32 pfn) 691 { 692 struct page *page = pfn_to_page(pfn); 693 694 if (PagePinned(page)) { 695 if (!PageHighMem(page)) 696 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 697 } 698 } 699 700 #ifdef CONFIG_HIGHPTE 701 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) 702 { 703 pgprot_t prot = PAGE_KERNEL; 704 705 if (PagePinned(page)) 706 prot = PAGE_KERNEL_RO; 707 708 if (0 && PageHighMem(page)) 709 printk("mapping highpte %lx type %d prot %s\n", 710 page_to_pfn(page), type, 711 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); 712 713 return kmap_atomic_prot(page, type, prot); 714 } 715 #endif 716 717 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 718 { 719 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 720 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 721 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 722 pte_val_ma(pte)); 723 724 return pte; 725 } 726 727 /* Init-time set_pte while constructing initial pagetables, which 728 doesn't allow RO pagetable pages to be remapped RW */ 729 static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) 730 { 731 pte = mask_rw_pte(ptep, pte); 732 733 xen_set_pte(ptep, pte); 734 } 735 736 static __init void xen_pagetable_setup_start(pgd_t *base) 737 { 738 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; 739 740 /* special set_pte for pagetable initialization */ 741 paravirt_ops.set_pte = xen_set_pte_init; 742 743 init_mm.pgd = base; 744 /* 745 * copy top-level of Xen-supplied pagetable into place. For 746 * !PAE we can use this as-is, but for PAE it is a stand-in 747 * while we copy the pmd pages. 748 */ 749 memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 750 751 if (PTRS_PER_PMD > 1) { 752 int i; 753 /* 754 * For PAE, need to allocate new pmds, rather than 755 * share Xen's, since Xen doesn't like pmd's being 756 * shared between address spaces. 757 */ 758 for (i = 0; i < PTRS_PER_PGD; i++) { 759 if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) { 760 pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 761 762 memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]), 763 PAGE_SIZE); 764 765 make_lowmem_page_readonly(pmd); 766 767 set_pgd(&base[i], __pgd(1 + __pa(pmd))); 768 } else 769 pgd_clear(&base[i]); 770 } 771 } 772 773 /* make sure zero_page is mapped RO so we can use it in pagetables */ 774 make_lowmem_page_readonly(empty_zero_page); 775 make_lowmem_page_readonly(base); 776 /* 777 * Switch to new pagetable. This is done before 778 * pagetable_init has done anything so that the new pages 779 * added to the table can be prepared properly for Xen. 780 */ 781 xen_write_cr3(__pa(base)); 782 } 783 784 static __init void xen_pagetable_setup_done(pgd_t *base) 785 { 786 /* This will work as long as patching hasn't happened yet 787 (which it hasn't) */ 788 paravirt_ops.alloc_pt = xen_alloc_pt; 789 paravirt_ops.set_pte = xen_set_pte; 790 791 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 792 /* 793 * Create a mapping for the shared info page. 794 * Should be set_fixmap(), but shared_info is a machine 795 * address with no corresponding pseudo-phys address. 796 */ 797 set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP), 798 PFN_DOWN(xen_start_info->shared_info), 799 PAGE_KERNEL); 800 801 HYPERVISOR_shared_info = 802 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 803 804 } else 805 HYPERVISOR_shared_info = 806 (struct shared_info *)__va(xen_start_info->shared_info); 807 808 /* Actually pin the pagetable down, but we can't set PG_pinned 809 yet because the page structures don't exist yet. */ 810 { 811 struct mmuext_op op; 812 #ifdef CONFIG_X86_PAE 813 op.cmd = MMUEXT_PIN_L3_TABLE; 814 #else 815 op.cmd = MMUEXT_PIN_L3_TABLE; 816 #endif 817 op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base))); 818 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 819 BUG(); 820 } 821 } 822 823 /* This is called once we have the cpu_possible_map */ 824 void __init xen_setup_vcpu_info_placement(void) 825 { 826 int cpu; 827 828 for_each_possible_cpu(cpu) 829 xen_vcpu_setup(cpu); 830 831 /* xen_vcpu_setup managed to place the vcpu_info within the 832 percpu area for all cpus, so make use of it */ 833 if (have_vcpu_info_placement) { 834 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 835 836 paravirt_ops.save_fl = xen_save_fl_direct; 837 paravirt_ops.restore_fl = xen_restore_fl_direct; 838 paravirt_ops.irq_disable = xen_irq_disable_direct; 839 paravirt_ops.irq_enable = xen_irq_enable_direct; 840 paravirt_ops.read_cr2 = xen_read_cr2_direct; 841 paravirt_ops.iret = xen_iret_direct; 842 } 843 } 844 845 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 846 unsigned long addr, unsigned len) 847 { 848 char *start, *end, *reloc; 849 unsigned ret; 850 851 start = end = reloc = NULL; 852 853 #define SITE(x) \ 854 case PARAVIRT_PATCH(x): \ 855 if (have_vcpu_info_placement) { \ 856 start = (char *)xen_##x##_direct; \ 857 end = xen_##x##_direct_end; \ 858 reloc = xen_##x##_direct_reloc; \ 859 } \ 860 goto patch_site 861 862 switch (type) { 863 SITE(irq_enable); 864 SITE(irq_disable); 865 SITE(save_fl); 866 SITE(restore_fl); 867 #undef SITE 868 869 patch_site: 870 if (start == NULL || (end-start) > len) 871 goto default_patch; 872 873 ret = paravirt_patch_insns(insnbuf, len, start, end); 874 875 /* Note: because reloc is assigned from something that 876 appears to be an array, gcc assumes it's non-null, 877 but doesn't know its relationship with start and 878 end. */ 879 if (reloc > start && reloc < end) { 880 int reloc_off = reloc - start; 881 long *relocp = (long *)(insnbuf + reloc_off); 882 long delta = start - (char *)addr; 883 884 *relocp += delta; 885 } 886 break; 887 888 default_patch: 889 default: 890 ret = paravirt_patch_default(type, clobbers, insnbuf, 891 addr, len); 892 break; 893 } 894 895 return ret; 896 } 897 898 static const struct paravirt_ops xen_paravirt_ops __initdata = { 899 .paravirt_enabled = 1, 900 .shared_kernel_pmd = 0, 901 902 .name = "Xen", 903 .banner = xen_banner, 904 905 .patch = xen_patch, 906 907 .memory_setup = xen_memory_setup, 908 .arch_setup = xen_arch_setup, 909 .init_IRQ = xen_init_IRQ, 910 .post_allocator_init = xen_mark_init_mm_pinned, 911 912 .time_init = xen_time_init, 913 .set_wallclock = xen_set_wallclock, 914 .get_wallclock = xen_get_wallclock, 915 .get_cpu_khz = xen_cpu_khz, 916 .sched_clock = xen_sched_clock, 917 918 .cpuid = xen_cpuid, 919 920 .set_debugreg = xen_set_debugreg, 921 .get_debugreg = xen_get_debugreg, 922 923 .clts = native_clts, 924 925 .read_cr0 = native_read_cr0, 926 .write_cr0 = native_write_cr0, 927 928 .read_cr2 = xen_read_cr2, 929 .write_cr2 = xen_write_cr2, 930 931 .read_cr3 = xen_read_cr3, 932 .write_cr3 = xen_write_cr3, 933 934 .read_cr4 = native_read_cr4, 935 .read_cr4_safe = native_read_cr4_safe, 936 .write_cr4 = xen_write_cr4, 937 938 .save_fl = xen_save_fl, 939 .restore_fl = xen_restore_fl, 940 .irq_disable = xen_irq_disable, 941 .irq_enable = xen_irq_enable, 942 .safe_halt = xen_safe_halt, 943 .halt = xen_halt, 944 .wbinvd = native_wbinvd, 945 946 .read_msr = native_read_msr_safe, 947 .write_msr = native_write_msr_safe, 948 .read_tsc = native_read_tsc, 949 .read_pmc = native_read_pmc, 950 951 .iret = (void *)&hypercall_page[__HYPERVISOR_iret], 952 .irq_enable_sysexit = NULL, /* never called */ 953 954 .load_tr_desc = paravirt_nop, 955 .set_ldt = xen_set_ldt, 956 .load_gdt = xen_load_gdt, 957 .load_idt = xen_load_idt, 958 .load_tls = xen_load_tls, 959 960 .store_gdt = native_store_gdt, 961 .store_idt = native_store_idt, 962 .store_tr = xen_store_tr, 963 964 .write_ldt_entry = xen_write_ldt_entry, 965 .write_gdt_entry = xen_write_gdt_entry, 966 .write_idt_entry = xen_write_idt_entry, 967 .load_esp0 = xen_load_esp0, 968 969 .set_iopl_mask = xen_set_iopl_mask, 970 .io_delay = xen_io_delay, 971 972 #ifdef CONFIG_X86_LOCAL_APIC 973 .apic_write = xen_apic_write, 974 .apic_write_atomic = xen_apic_write, 975 .apic_read = xen_apic_read, 976 .setup_boot_clock = paravirt_nop, 977 .setup_secondary_clock = paravirt_nop, 978 .startup_ipi_hook = paravirt_nop, 979 #endif 980 981 .flush_tlb_user = xen_flush_tlb, 982 .flush_tlb_kernel = xen_flush_tlb, 983 .flush_tlb_single = xen_flush_tlb_single, 984 .flush_tlb_others = xen_flush_tlb_others, 985 986 .pte_update = paravirt_nop, 987 .pte_update_defer = paravirt_nop, 988 989 .pagetable_setup_start = xen_pagetable_setup_start, 990 .pagetable_setup_done = xen_pagetable_setup_done, 991 992 .alloc_pt = xen_alloc_pt_init, 993 .release_pt = xen_release_pt, 994 .alloc_pd = paravirt_nop, 995 .alloc_pd_clone = paravirt_nop, 996 .release_pd = paravirt_nop, 997 998 #ifdef CONFIG_HIGHPTE 999 .kmap_atomic_pte = xen_kmap_atomic_pte, 1000 #endif 1001 1002 .set_pte = NULL, /* see xen_pagetable_setup_* */ 1003 .set_pte_at = xen_set_pte_at, 1004 .set_pmd = xen_set_pmd, 1005 1006 .pte_val = xen_pte_val, 1007 .pgd_val = xen_pgd_val, 1008 1009 .make_pte = xen_make_pte, 1010 .make_pgd = xen_make_pgd, 1011 1012 #ifdef CONFIG_X86_PAE 1013 .set_pte_atomic = xen_set_pte_atomic, 1014 .set_pte_present = xen_set_pte_at, 1015 .set_pud = xen_set_pud, 1016 .pte_clear = xen_pte_clear, 1017 .pmd_clear = xen_pmd_clear, 1018 1019 .make_pmd = xen_make_pmd, 1020 .pmd_val = xen_pmd_val, 1021 #endif /* PAE */ 1022 1023 .activate_mm = xen_activate_mm, 1024 .dup_mmap = xen_dup_mmap, 1025 .exit_mmap = xen_exit_mmap, 1026 1027 .set_lazy_mode = xen_set_lazy_mode, 1028 }; 1029 1030 #ifdef CONFIG_SMP 1031 static const struct smp_ops xen_smp_ops __initdata = { 1032 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 1033 .smp_prepare_cpus = xen_smp_prepare_cpus, 1034 .cpu_up = xen_cpu_up, 1035 .smp_cpus_done = xen_smp_cpus_done, 1036 1037 .smp_send_stop = xen_smp_send_stop, 1038 .smp_send_reschedule = xen_smp_send_reschedule, 1039 .smp_call_function_mask = xen_smp_call_function_mask, 1040 }; 1041 #endif /* CONFIG_SMP */ 1042 1043 static void xen_reboot(int reason) 1044 { 1045 #ifdef CONFIG_SMP 1046 smp_send_stop(); 1047 #endif 1048 1049 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason)) 1050 BUG(); 1051 } 1052 1053 static void xen_restart(char *msg) 1054 { 1055 xen_reboot(SHUTDOWN_reboot); 1056 } 1057 1058 static void xen_emergency_restart(void) 1059 { 1060 xen_reboot(SHUTDOWN_reboot); 1061 } 1062 1063 static void xen_machine_halt(void) 1064 { 1065 xen_reboot(SHUTDOWN_poweroff); 1066 } 1067 1068 static void xen_crash_shutdown(struct pt_regs *regs) 1069 { 1070 xen_reboot(SHUTDOWN_crash); 1071 } 1072 1073 static const struct machine_ops __initdata xen_machine_ops = { 1074 .restart = xen_restart, 1075 .halt = xen_machine_halt, 1076 .power_off = xen_machine_halt, 1077 .shutdown = xen_machine_halt, 1078 .crash_shutdown = xen_crash_shutdown, 1079 .emergency_restart = xen_emergency_restart, 1080 }; 1081 1082 1083 /* First C function to be called on Xen boot */ 1084 asmlinkage void __init xen_start_kernel(void) 1085 { 1086 pgd_t *pgd; 1087 1088 if (!xen_start_info) 1089 return; 1090 1091 BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); 1092 1093 /* Install Xen paravirt ops */ 1094 paravirt_ops = xen_paravirt_ops; 1095 machine_ops = xen_machine_ops; 1096 1097 #ifdef CONFIG_SMP 1098 smp_ops = xen_smp_ops; 1099 #endif 1100 1101 xen_setup_features(); 1102 1103 /* Get mfn list */ 1104 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1105 phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; 1106 1107 pgd = (pgd_t *)xen_start_info->pt_base; 1108 1109 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; 1110 1111 init_mm.pgd = pgd; /* use the Xen pagetables to start */ 1112 1113 /* keep using Xen gdt for now; no urgent need to change it */ 1114 1115 x86_write_percpu(xen_cr3, __pa(pgd)); 1116 1117 #ifdef CONFIG_SMP 1118 /* Don't do the full vcpu_info placement stuff until we have a 1119 possible map. */ 1120 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1121 #else 1122 /* May as well do it now, since there's no good time to call 1123 it later on UP. */ 1124 xen_setup_vcpu_info_placement(); 1125 #endif 1126 1127 paravirt_ops.kernel_rpl = 1; 1128 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1129 paravirt_ops.kernel_rpl = 0; 1130 1131 /* set the limit of our address space */ 1132 reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); 1133 1134 /* set up basic CPUID stuff */ 1135 cpu_detect(&new_cpu_data); 1136 new_cpu_data.hard_math = 1; 1137 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1138 1139 /* Poke various useful things into boot_params */ 1140 LOADER_TYPE = (9 << 4) | 0; 1141 INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0; 1142 INITRD_SIZE = xen_start_info->mod_len; 1143 1144 /* Start the world */ 1145 start_kernel(); 1146 } 1147