1 /* 2 * Core of Xen paravirt_ops implementation. 3 * 4 * This file contains the xen_paravirt_ops structure itself, and the 5 * implementations for: 6 * - privileged instructions 7 * - interrupt flags 8 * - segment operations 9 * - booting and setup 10 * 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 */ 13 14 #include <linux/cpu.h> 15 #include <linux/kernel.h> 16 #include <linux/init.h> 17 #include <linux/smp.h> 18 #include <linux/preempt.h> 19 #include <linux/hardirq.h> 20 #include <linux/percpu.h> 21 #include <linux/delay.h> 22 #include <linux/start_kernel.h> 23 #include <linux/sched.h> 24 #include <linux/kprobes.h> 25 #include <linux/bootmem.h> 26 #include <linux/module.h> 27 #include <linux/mm.h> 28 #include <linux/page-flags.h> 29 #include <linux/highmem.h> 30 #include <linux/console.h> 31 #include <linux/pci.h> 32 #include <linux/gfp.h> 33 #include <linux/memblock.h> 34 #include <linux/edd.h> 35 #include <linux/frame.h> 36 37 #ifdef CONFIG_KEXEC_CORE 38 #include <linux/kexec.h> 39 #endif 40 41 #include <xen/xen.h> 42 #include <xen/events.h> 43 #include <xen/interface/xen.h> 44 #include <xen/interface/version.h> 45 #include <xen/interface/physdev.h> 46 #include <xen/interface/vcpu.h> 47 #include <xen/interface/memory.h> 48 #include <xen/interface/nmi.h> 49 #include <xen/interface/xen-mca.h> 50 #include <xen/features.h> 51 #include <xen/page.h> 52 #include <xen/hvm.h> 53 #include <xen/hvc-console.h> 54 #include <xen/acpi.h> 55 56 #include <asm/paravirt.h> 57 #include <asm/apic.h> 58 #include <asm/page.h> 59 #include <asm/xen/pci.h> 60 #include <asm/xen/hypercall.h> 61 #include <asm/xen/hypervisor.h> 62 #include <asm/fixmap.h> 63 #include <asm/processor.h> 64 #include <asm/proto.h> 65 #include <asm/msr-index.h> 66 #include <asm/traps.h> 67 #include <asm/setup.h> 68 #include <asm/desc.h> 69 #include <asm/pgalloc.h> 70 #include <asm/pgtable.h> 71 #include <asm/tlbflush.h> 72 #include <asm/reboot.h> 73 #include <asm/stackprotector.h> 74 #include <asm/hypervisor.h> 75 #include <asm/mach_traps.h> 76 #include <asm/mwait.h> 77 #include <asm/pci_x86.h> 78 #include <asm/pat.h> 79 #include <asm/cpu.h> 80 81 #ifdef CONFIG_ACPI 82 #include <linux/acpi.h> 83 #include <asm/acpi.h> 84 #include <acpi/pdc_intel.h> 85 #include <acpi/processor.h> 86 #include <xen/interface/platform.h> 87 #endif 88 89 #include "xen-ops.h" 90 #include "mmu.h" 91 #include "smp.h" 92 #include "multicalls.h" 93 #include "pmu.h" 94 95 EXPORT_SYMBOL_GPL(hypercall_page); 96 97 /* 98 * Pointer to the xen_vcpu_info structure or 99 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info 100 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info 101 * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point 102 * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to 103 * acknowledge pending events. 104 * Also more subtly it is used by the patched version of irq enable/disable 105 * e.g. xen_irq_enable_direct and xen_iret in PV mode. 106 * 107 * The desire to be able to do those mask/unmask operations as a single 108 * instruction by using the per-cpu offset held in %gs is the real reason 109 * vcpu info is in a per-cpu pointer and the original reason for this 110 * hypercall. 111 * 112 */ 113 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 114 115 /* 116 * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info 117 * hypercall. This can be used both in PV and PVHVM mode. The structure 118 * overrides the default per_cpu(xen_vcpu, cpu) value. 119 */ 120 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 121 122 enum xen_domain_type xen_domain_type = XEN_NATIVE; 123 EXPORT_SYMBOL_GPL(xen_domain_type); 124 125 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; 126 EXPORT_SYMBOL(machine_to_phys_mapping); 127 unsigned long machine_to_phys_nr; 128 EXPORT_SYMBOL(machine_to_phys_nr); 129 130 struct start_info *xen_start_info; 131 EXPORT_SYMBOL_GPL(xen_start_info); 132 133 struct shared_info xen_dummy_shared_info; 134 135 void *xen_initial_gdt; 136 137 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); 138 __read_mostly int xen_have_vector_callback; 139 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 140 141 /* 142 * Point at some empty memory to start with. We map the real shared_info 143 * page as soon as fixmap is up and running. 144 */ 145 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; 146 147 /* 148 * Flag to determine whether vcpu info placement is available on all 149 * VCPUs. We assume it is to start with, and then set it to zero on 150 * the first failure. This is because it can succeed on some VCPUs 151 * and not others, since it can involve hypervisor memory allocation, 152 * or because the guest failed to guarantee all the appropriate 153 * constraints on all VCPUs (ie buffer can't cross a page boundary). 154 * 155 * Note that any particular CPU may be using a placed vcpu structure, 156 * but we can only optimise if the all are. 157 * 158 * 0: not available, 1: available 159 */ 160 static int have_vcpu_info_placement = 1; 161 162 struct tls_descs { 163 struct desc_struct desc[3]; 164 }; 165 166 /* 167 * Updating the 3 TLS descriptors in the GDT on every task switch is 168 * surprisingly expensive so we avoid updating them if they haven't 169 * changed. Since Xen writes different descriptors than the one 170 * passed in the update_descriptor hypercall we keep shadow copies to 171 * compare against. 172 */ 173 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); 174 175 static void clamp_max_cpus(void) 176 { 177 #ifdef CONFIG_SMP 178 if (setup_max_cpus > MAX_VIRT_CPUS) 179 setup_max_cpus = MAX_VIRT_CPUS; 180 #endif 181 } 182 183 static void xen_vcpu_setup(int cpu) 184 { 185 struct vcpu_register_vcpu_info info; 186 int err; 187 struct vcpu_info *vcpup; 188 189 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 190 191 /* 192 * This path is called twice on PVHVM - first during bootup via 193 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being 194 * hotplugged: cpu_up -> xen_hvm_cpu_notify. 195 * As we can only do the VCPUOP_register_vcpu_info once lets 196 * not over-write its result. 197 * 198 * For PV it is called during restore (xen_vcpu_restore) and bootup 199 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not 200 * use this function. 201 */ 202 if (xen_hvm_domain()) { 203 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) 204 return; 205 } 206 if (cpu < MAX_VIRT_CPUS) 207 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 208 209 if (!have_vcpu_info_placement) { 210 if (cpu >= MAX_VIRT_CPUS) 211 clamp_max_cpus(); 212 return; 213 } 214 215 vcpup = &per_cpu(xen_vcpu_info, cpu); 216 info.mfn = arbitrary_virt_to_mfn(vcpup); 217 info.offset = offset_in_page(vcpup); 218 219 /* Check to see if the hypervisor will put the vcpu_info 220 structure where we want it, which allows direct access via 221 a percpu-variable. 222 N.B. This hypercall can _only_ be called once per CPU. Subsequent 223 calls will error out with -EINVAL. This is due to the fact that 224 hypervisor has no unregister variant and this hypercall does not 225 allow to over-write info.mfn and info.offset. 226 */ 227 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 228 229 if (err) { 230 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 231 have_vcpu_info_placement = 0; 232 clamp_max_cpus(); 233 } else { 234 /* This cpu is using the registered vcpu info, even if 235 later ones fail to. */ 236 per_cpu(xen_vcpu, cpu) = vcpup; 237 } 238 } 239 240 /* 241 * On restore, set the vcpu placement up again. 242 * If it fails, then we're in a bad state, since 243 * we can't back out from using it... 244 */ 245 void xen_vcpu_restore(void) 246 { 247 int cpu; 248 249 for_each_possible_cpu(cpu) { 250 bool other_cpu = (cpu != smp_processor_id()); 251 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); 252 253 if (other_cpu && is_up && 254 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 255 BUG(); 256 257 xen_setup_runstate_info(cpu); 258 259 if (have_vcpu_info_placement) 260 xen_vcpu_setup(cpu); 261 262 if (other_cpu && is_up && 263 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 264 BUG(); 265 } 266 } 267 268 static void __init xen_banner(void) 269 { 270 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); 271 struct xen_extraversion extra; 272 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 273 274 pr_info("Booting paravirtualized kernel %son %s\n", 275 xen_feature(XENFEAT_auto_translated_physmap) ? 276 "with PVH extensions " : "", pv_info.name); 277 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 278 version >> 16, version & 0xffff, extra.extraversion, 279 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 280 } 281 /* Check if running on Xen version (major, minor) or later */ 282 bool 283 xen_running_on_version_or_later(unsigned int major, unsigned int minor) 284 { 285 unsigned int version; 286 287 if (!xen_domain()) 288 return false; 289 290 version = HYPERVISOR_xen_version(XENVER_version, NULL); 291 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) || 292 ((version >> 16) > major)) 293 return true; 294 return false; 295 } 296 297 #define CPUID_THERM_POWER_LEAF 6 298 #define APERFMPERF_PRESENT 0 299 300 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 301 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 302 303 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask; 304 static __read_mostly unsigned int cpuid_leaf5_ecx_val; 305 static __read_mostly unsigned int cpuid_leaf5_edx_val; 306 307 static void xen_cpuid(unsigned int *ax, unsigned int *bx, 308 unsigned int *cx, unsigned int *dx) 309 { 310 unsigned maskebx = ~0; 311 unsigned maskecx = ~0; 312 unsigned maskedx = ~0; 313 unsigned setecx = 0; 314 /* 315 * Mask out inconvenient features, to try and disable as many 316 * unsupported kernel subsystems as possible. 317 */ 318 switch (*ax) { 319 case 1: 320 maskecx = cpuid_leaf1_ecx_mask; 321 setecx = cpuid_leaf1_ecx_set_mask; 322 maskedx = cpuid_leaf1_edx_mask; 323 break; 324 325 case CPUID_MWAIT_LEAF: 326 /* Synthesize the values.. */ 327 *ax = 0; 328 *bx = 0; 329 *cx = cpuid_leaf5_ecx_val; 330 *dx = cpuid_leaf5_edx_val; 331 return; 332 333 case CPUID_THERM_POWER_LEAF: 334 /* Disabling APERFMPERF for kernel usage */ 335 maskecx = ~(1 << APERFMPERF_PRESENT); 336 break; 337 338 case 0xb: 339 /* Suppress extended topology stuff */ 340 maskebx = 0; 341 break; 342 } 343 344 asm(XEN_EMULATE_PREFIX "cpuid" 345 : "=a" (*ax), 346 "=b" (*bx), 347 "=c" (*cx), 348 "=d" (*dx) 349 : "0" (*ax), "2" (*cx)); 350 351 *bx &= maskebx; 352 *cx &= maskecx; 353 *cx |= setecx; 354 *dx &= maskedx; 355 } 356 STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */ 357 358 static bool __init xen_check_mwait(void) 359 { 360 #ifdef CONFIG_ACPI 361 struct xen_platform_op op = { 362 .cmd = XENPF_set_processor_pminfo, 363 .u.set_pminfo.id = -1, 364 .u.set_pminfo.type = XEN_PM_PDC, 365 }; 366 uint32_t buf[3]; 367 unsigned int ax, bx, cx, dx; 368 unsigned int mwait_mask; 369 370 /* We need to determine whether it is OK to expose the MWAIT 371 * capability to the kernel to harvest deeper than C3 states from ACPI 372 * _CST using the processor_harvest_xen.c module. For this to work, we 373 * need to gather the MWAIT_LEAF values (which the cstate.c code 374 * checks against). The hypervisor won't expose the MWAIT flag because 375 * it would break backwards compatibility; so we will find out directly 376 * from the hardware and hypercall. 377 */ 378 if (!xen_initial_domain()) 379 return false; 380 381 /* 382 * When running under platform earlier than Xen4.2, do not expose 383 * mwait, to avoid the risk of loading native acpi pad driver 384 */ 385 if (!xen_running_on_version_or_later(4, 2)) 386 return false; 387 388 ax = 1; 389 cx = 0; 390 391 native_cpuid(&ax, &bx, &cx, &dx); 392 393 mwait_mask = (1 << (X86_FEATURE_EST % 32)) | 394 (1 << (X86_FEATURE_MWAIT % 32)); 395 396 if ((cx & mwait_mask) != mwait_mask) 397 return false; 398 399 /* We need to emulate the MWAIT_LEAF and for that we need both 400 * ecx and edx. The hypercall provides only partial information. 401 */ 402 403 ax = CPUID_MWAIT_LEAF; 404 bx = 0; 405 cx = 0; 406 dx = 0; 407 408 native_cpuid(&ax, &bx, &cx, &dx); 409 410 /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so, 411 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. 412 */ 413 buf[0] = ACPI_PDC_REVISION_ID; 414 buf[1] = 1; 415 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP); 416 417 set_xen_guest_handle(op.u.set_pminfo.pdc, buf); 418 419 if ((HYPERVISOR_platform_op(&op) == 0) && 420 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) { 421 cpuid_leaf5_ecx_val = cx; 422 cpuid_leaf5_edx_val = dx; 423 } 424 return true; 425 #else 426 return false; 427 #endif 428 } 429 static void __init xen_init_cpuid_mask(void) 430 { 431 unsigned int ax, bx, cx, dx; 432 unsigned int xsave_mask; 433 434 cpuid_leaf1_edx_mask = 435 ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */ 436 (1 << X86_FEATURE_ACC)); /* thermal monitoring */ 437 438 if (!xen_initial_domain()) 439 cpuid_leaf1_edx_mask &= 440 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */ 441 442 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32)); 443 444 ax = 1; 445 cx = 0; 446 cpuid(1, &ax, &bx, &cx, &dx); 447 448 xsave_mask = 449 (1 << (X86_FEATURE_XSAVE % 32)) | 450 (1 << (X86_FEATURE_OSXSAVE % 32)); 451 452 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ 453 if ((cx & xsave_mask) != xsave_mask) 454 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ 455 if (xen_check_mwait()) 456 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 457 } 458 459 static void xen_set_debugreg(int reg, unsigned long val) 460 { 461 HYPERVISOR_set_debugreg(reg, val); 462 } 463 464 static unsigned long xen_get_debugreg(int reg) 465 { 466 return HYPERVISOR_get_debugreg(reg); 467 } 468 469 static void xen_end_context_switch(struct task_struct *next) 470 { 471 xen_mc_flush(); 472 paravirt_end_context_switch(next); 473 } 474 475 static unsigned long xen_store_tr(void) 476 { 477 return 0; 478 } 479 480 /* 481 * Set the page permissions for a particular virtual address. If the 482 * address is a vmalloc mapping (or other non-linear mapping), then 483 * find the linear mapping of the page and also set its protections to 484 * match. 485 */ 486 static void set_aliased_prot(void *v, pgprot_t prot) 487 { 488 int level; 489 pte_t *ptep; 490 pte_t pte; 491 unsigned long pfn; 492 struct page *page; 493 unsigned char dummy; 494 495 ptep = lookup_address((unsigned long)v, &level); 496 BUG_ON(ptep == NULL); 497 498 pfn = pte_pfn(*ptep); 499 page = pfn_to_page(pfn); 500 501 pte = pfn_pte(pfn, prot); 502 503 /* 504 * Careful: update_va_mapping() will fail if the virtual address 505 * we're poking isn't populated in the page tables. We don't 506 * need to worry about the direct map (that's always in the page 507 * tables), but we need to be careful about vmap space. In 508 * particular, the top level page table can lazily propagate 509 * entries between processes, so if we've switched mms since we 510 * vmapped the target in the first place, we might not have the 511 * top-level page table entry populated. 512 * 513 * We disable preemption because we want the same mm active when 514 * we probe the target and when we issue the hypercall. We'll 515 * have the same nominal mm, but if we're a kernel thread, lazy 516 * mm dropping could change our pgd. 517 * 518 * Out of an abundance of caution, this uses __get_user() to fault 519 * in the target address just in case there's some obscure case 520 * in which the target address isn't readable. 521 */ 522 523 preempt_disable(); 524 525 pagefault_disable(); /* Avoid warnings due to being atomic. */ 526 __get_user(dummy, (unsigned char __user __force *)v); 527 pagefault_enable(); 528 529 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 530 BUG(); 531 532 if (!PageHighMem(page)) { 533 void *av = __va(PFN_PHYS(pfn)); 534 535 if (av != v) 536 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0)) 537 BUG(); 538 } else 539 kmap_flush_unused(); 540 541 preempt_enable(); 542 } 543 544 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 545 { 546 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 547 int i; 548 549 /* 550 * We need to mark the all aliases of the LDT pages RO. We 551 * don't need to call vm_flush_aliases(), though, since that's 552 * only responsible for flushing aliases out the TLBs, not the 553 * page tables, and Xen will flush the TLB for us if needed. 554 * 555 * To avoid confusing future readers: none of this is necessary 556 * to load the LDT. The hypervisor only checks this when the 557 * LDT is faulted in due to subsequent descriptor access. 558 */ 559 560 for(i = 0; i < entries; i += entries_per_page) 561 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 562 } 563 564 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) 565 { 566 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 567 int i; 568 569 for(i = 0; i < entries; i += entries_per_page) 570 set_aliased_prot(ldt + i, PAGE_KERNEL); 571 } 572 573 static void xen_set_ldt(const void *addr, unsigned entries) 574 { 575 struct mmuext_op *op; 576 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 577 578 trace_xen_cpu_set_ldt(addr, entries); 579 580 op = mcs.args; 581 op->cmd = MMUEXT_SET_LDT; 582 op->arg1.linear_addr = (unsigned long)addr; 583 op->arg2.nr_ents = entries; 584 585 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 586 587 xen_mc_issue(PARAVIRT_LAZY_CPU); 588 } 589 590 static void xen_load_gdt(const struct desc_ptr *dtr) 591 { 592 unsigned long va = dtr->address; 593 unsigned int size = dtr->size + 1; 594 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 595 unsigned long frames[pages]; 596 int f; 597 598 /* 599 * A GDT can be up to 64k in size, which corresponds to 8192 600 * 8-byte entries, or 16 4k pages.. 601 */ 602 603 BUG_ON(size > 65536); 604 BUG_ON(va & ~PAGE_MASK); 605 606 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 607 int level; 608 pte_t *ptep; 609 unsigned long pfn, mfn; 610 void *virt; 611 612 /* 613 * The GDT is per-cpu and is in the percpu data area. 614 * That can be virtually mapped, so we need to do a 615 * page-walk to get the underlying MFN for the 616 * hypercall. The page can also be in the kernel's 617 * linear range, so we need to RO that mapping too. 618 */ 619 ptep = lookup_address(va, &level); 620 BUG_ON(ptep == NULL); 621 622 pfn = pte_pfn(*ptep); 623 mfn = pfn_to_mfn(pfn); 624 virt = __va(PFN_PHYS(pfn)); 625 626 frames[f] = mfn; 627 628 make_lowmem_page_readonly((void *)va); 629 make_lowmem_page_readonly(virt); 630 } 631 632 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 633 BUG(); 634 } 635 636 /* 637 * load_gdt for early boot, when the gdt is only mapped once 638 */ 639 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) 640 { 641 unsigned long va = dtr->address; 642 unsigned int size = dtr->size + 1; 643 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 644 unsigned long frames[pages]; 645 int f; 646 647 /* 648 * A GDT can be up to 64k in size, which corresponds to 8192 649 * 8-byte entries, or 16 4k pages.. 650 */ 651 652 BUG_ON(size > 65536); 653 BUG_ON(va & ~PAGE_MASK); 654 655 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 656 pte_t pte; 657 unsigned long pfn, mfn; 658 659 pfn = virt_to_pfn(va); 660 mfn = pfn_to_mfn(pfn); 661 662 pte = pfn_pte(pfn, PAGE_KERNEL_RO); 663 664 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) 665 BUG(); 666 667 frames[f] = mfn; 668 } 669 670 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) 671 BUG(); 672 } 673 674 static inline bool desc_equal(const struct desc_struct *d1, 675 const struct desc_struct *d2) 676 { 677 return d1->a == d2->a && d1->b == d2->b; 678 } 679 680 static void load_TLS_descriptor(struct thread_struct *t, 681 unsigned int cpu, unsigned int i) 682 { 683 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; 684 struct desc_struct *gdt; 685 xmaddr_t maddr; 686 struct multicall_space mc; 687 688 if (desc_equal(shadow, &t->tls_array[i])) 689 return; 690 691 *shadow = t->tls_array[i]; 692 693 gdt = get_cpu_gdt_table(cpu); 694 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); 695 mc = __xen_mc_entry(0); 696 697 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); 698 } 699 700 static void xen_load_tls(struct thread_struct *t, unsigned int cpu) 701 { 702 /* 703 * XXX sleazy hack: If we're being called in a lazy-cpu zone 704 * and lazy gs handling is enabled, it means we're in a 705 * context switch, and %gs has just been saved. This means we 706 * can zero it out to prevent faults on exit from the 707 * hypervisor if the next process has no %gs. Either way, it 708 * has been saved, and the new value will get loaded properly. 709 * This will go away as soon as Xen has been modified to not 710 * save/restore %gs for normal hypercalls. 711 * 712 * On x86_64, this hack is not used for %gs, because gs points 713 * to KERNEL_GS_BASE (and uses it for PDA references), so we 714 * must not zero %gs on x86_64 715 * 716 * For x86_64, we need to zero %fs, otherwise we may get an 717 * exception between the new %fs descriptor being loaded and 718 * %fs being effectively cleared at __switch_to(). 719 */ 720 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { 721 #ifdef CONFIG_X86_32 722 lazy_load_gs(0); 723 #else 724 loadsegment(fs, 0); 725 #endif 726 } 727 728 xen_mc_batch(); 729 730 load_TLS_descriptor(t, cpu, 0); 731 load_TLS_descriptor(t, cpu, 1); 732 load_TLS_descriptor(t, cpu, 2); 733 734 xen_mc_issue(PARAVIRT_LAZY_CPU); 735 } 736 737 #ifdef CONFIG_X86_64 738 static void xen_load_gs_index(unsigned int idx) 739 { 740 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) 741 BUG(); 742 } 743 #endif 744 745 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, 746 const void *ptr) 747 { 748 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); 749 u64 entry = *(u64 *)ptr; 750 751 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry); 752 753 preempt_disable(); 754 755 xen_mc_flush(); 756 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) 757 BUG(); 758 759 preempt_enable(); 760 } 761 762 static int cvt_gate_to_trap(int vector, const gate_desc *val, 763 struct trap_info *info) 764 { 765 unsigned long addr; 766 767 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT) 768 return 0; 769 770 info->vector = vector; 771 772 addr = gate_offset(*val); 773 #ifdef CONFIG_X86_64 774 /* 775 * Look for known traps using IST, and substitute them 776 * appropriately. The debugger ones are the only ones we care 777 * about. Xen will handle faults like double_fault, 778 * so we should never see them. Warn if 779 * there's an unexpected IST-using fault handler. 780 */ 781 if (addr == (unsigned long)debug) 782 addr = (unsigned long)xen_debug; 783 else if (addr == (unsigned long)int3) 784 addr = (unsigned long)xen_int3; 785 else if (addr == (unsigned long)stack_segment) 786 addr = (unsigned long)xen_stack_segment; 787 else if (addr == (unsigned long)double_fault) { 788 /* Don't need to handle these */ 789 return 0; 790 #ifdef CONFIG_X86_MCE 791 } else if (addr == (unsigned long)machine_check) { 792 /* 793 * when xen hypervisor inject vMCE to guest, 794 * use native mce handler to handle it 795 */ 796 ; 797 #endif 798 } else if (addr == (unsigned long)nmi) 799 /* 800 * Use the native version as well. 801 */ 802 ; 803 else { 804 /* Some other trap using IST? */ 805 if (WARN_ON(val->ist != 0)) 806 return 0; 807 } 808 #endif /* CONFIG_X86_64 */ 809 info->address = addr; 810 811 info->cs = gate_segment(*val); 812 info->flags = val->dpl; 813 /* interrupt gates clear IF */ 814 if (val->type == GATE_INTERRUPT) 815 info->flags |= 1 << 2; 816 817 return 1; 818 } 819 820 /* Locations of each CPU's IDT */ 821 static DEFINE_PER_CPU(struct desc_ptr, idt_desc); 822 823 /* Set an IDT entry. If the entry is part of the current IDT, then 824 also update Xen. */ 825 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) 826 { 827 unsigned long p = (unsigned long)&dt[entrynum]; 828 unsigned long start, end; 829 830 trace_xen_cpu_write_idt_entry(dt, entrynum, g); 831 832 preempt_disable(); 833 834 start = __this_cpu_read(idt_desc.address); 835 end = start + __this_cpu_read(idt_desc.size) + 1; 836 837 xen_mc_flush(); 838 839 native_write_idt_entry(dt, entrynum, g); 840 841 if (p >= start && (p + 8) <= end) { 842 struct trap_info info[2]; 843 844 info[1].address = 0; 845 846 if (cvt_gate_to_trap(entrynum, g, &info[0])) 847 if (HYPERVISOR_set_trap_table(info)) 848 BUG(); 849 } 850 851 preempt_enable(); 852 } 853 854 static void xen_convert_trap_info(const struct desc_ptr *desc, 855 struct trap_info *traps) 856 { 857 unsigned in, out, count; 858 859 count = (desc->size+1) / sizeof(gate_desc); 860 BUG_ON(count > 256); 861 862 for (in = out = 0; in < count; in++) { 863 gate_desc *entry = (gate_desc*)(desc->address) + in; 864 865 if (cvt_gate_to_trap(in, entry, &traps[out])) 866 out++; 867 } 868 traps[out].address = 0; 869 } 870 871 void xen_copy_trap_info(struct trap_info *traps) 872 { 873 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); 874 875 xen_convert_trap_info(desc, traps); 876 } 877 878 /* Load a new IDT into Xen. In principle this can be per-CPU, so we 879 hold a spinlock to protect the static traps[] array (static because 880 it avoids allocation, and saves stack space). */ 881 static void xen_load_idt(const struct desc_ptr *desc) 882 { 883 static DEFINE_SPINLOCK(lock); 884 static struct trap_info traps[257]; 885 886 trace_xen_cpu_load_idt(desc); 887 888 spin_lock(&lock); 889 890 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); 891 892 xen_convert_trap_info(desc, traps); 893 894 xen_mc_flush(); 895 if (HYPERVISOR_set_trap_table(traps)) 896 BUG(); 897 898 spin_unlock(&lock); 899 } 900 901 /* Write a GDT descriptor entry. Ignore LDT descriptors, since 902 they're handled differently. */ 903 static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 904 const void *desc, int type) 905 { 906 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); 907 908 preempt_disable(); 909 910 switch (type) { 911 case DESC_LDT: 912 case DESC_TSS: 913 /* ignore */ 914 break; 915 916 default: { 917 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); 918 919 xen_mc_flush(); 920 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 921 BUG(); 922 } 923 924 } 925 926 preempt_enable(); 927 } 928 929 /* 930 * Version of write_gdt_entry for use at early boot-time needed to 931 * update an entry as simply as possible. 932 */ 933 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, 934 const void *desc, int type) 935 { 936 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); 937 938 switch (type) { 939 case DESC_LDT: 940 case DESC_TSS: 941 /* ignore */ 942 break; 943 944 default: { 945 xmaddr_t maddr = virt_to_machine(&dt[entry]); 946 947 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) 948 dt[entry] = *(struct desc_struct *)desc; 949 } 950 951 } 952 } 953 954 static void xen_load_sp0(struct tss_struct *tss, 955 struct thread_struct *thread) 956 { 957 struct multicall_space mcs; 958 959 mcs = xen_mc_entry(0); 960 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 961 xen_mc_issue(PARAVIRT_LAZY_CPU); 962 tss->x86_tss.sp0 = thread->sp0; 963 } 964 965 void xen_set_iopl_mask(unsigned mask) 966 { 967 struct physdev_set_iopl set_iopl; 968 969 /* Force the change at ring 0. */ 970 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; 971 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 972 } 973 974 static void xen_io_delay(void) 975 { 976 } 977 978 static void xen_clts(void) 979 { 980 struct multicall_space mcs; 981 982 mcs = xen_mc_entry(0); 983 984 MULTI_fpu_taskswitch(mcs.mc, 0); 985 986 xen_mc_issue(PARAVIRT_LAZY_CPU); 987 } 988 989 static DEFINE_PER_CPU(unsigned long, xen_cr0_value); 990 991 static unsigned long xen_read_cr0(void) 992 { 993 unsigned long cr0 = this_cpu_read(xen_cr0_value); 994 995 if (unlikely(cr0 == 0)) { 996 cr0 = native_read_cr0(); 997 this_cpu_write(xen_cr0_value, cr0); 998 } 999 1000 return cr0; 1001 } 1002 1003 static void xen_write_cr0(unsigned long cr0) 1004 { 1005 struct multicall_space mcs; 1006 1007 this_cpu_write(xen_cr0_value, cr0); 1008 1009 /* Only pay attention to cr0.TS; everything else is 1010 ignored. */ 1011 mcs = xen_mc_entry(0); 1012 1013 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); 1014 1015 xen_mc_issue(PARAVIRT_LAZY_CPU); 1016 } 1017 1018 static void xen_write_cr4(unsigned long cr4) 1019 { 1020 cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE); 1021 1022 native_write_cr4(cr4); 1023 } 1024 #ifdef CONFIG_X86_64 1025 static inline unsigned long xen_read_cr8(void) 1026 { 1027 return 0; 1028 } 1029 static inline void xen_write_cr8(unsigned long val) 1030 { 1031 BUG_ON(val); 1032 } 1033 #endif 1034 1035 static u64 xen_read_msr_safe(unsigned int msr, int *err) 1036 { 1037 u64 val; 1038 1039 if (pmu_msr_read(msr, &val, err)) 1040 return val; 1041 1042 val = native_read_msr_safe(msr, err); 1043 switch (msr) { 1044 case MSR_IA32_APICBASE: 1045 #ifdef CONFIG_X86_X2APIC 1046 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) 1047 #endif 1048 val &= ~X2APIC_ENABLE; 1049 break; 1050 } 1051 return val; 1052 } 1053 1054 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 1055 { 1056 int ret; 1057 1058 ret = 0; 1059 1060 switch (msr) { 1061 #ifdef CONFIG_X86_64 1062 unsigned which; 1063 u64 base; 1064 1065 case MSR_FS_BASE: which = SEGBASE_FS; goto set; 1066 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; 1067 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; 1068 1069 set: 1070 base = ((u64)high << 32) | low; 1071 if (HYPERVISOR_set_segment_base(which, base) != 0) 1072 ret = -EIO; 1073 break; 1074 #endif 1075 1076 case MSR_STAR: 1077 case MSR_CSTAR: 1078 case MSR_LSTAR: 1079 case MSR_SYSCALL_MASK: 1080 case MSR_IA32_SYSENTER_CS: 1081 case MSR_IA32_SYSENTER_ESP: 1082 case MSR_IA32_SYSENTER_EIP: 1083 /* Fast syscall setup is all done in hypercalls, so 1084 these are all ignored. Stub them out here to stop 1085 Xen console noise. */ 1086 break; 1087 1088 default: 1089 if (!pmu_msr_write(msr, low, high, &ret)) 1090 ret = native_write_msr_safe(msr, low, high); 1091 } 1092 1093 return ret; 1094 } 1095 1096 void xen_setup_shared_info(void) 1097 { 1098 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1099 set_fixmap(FIX_PARAVIRT_BOOTMAP, 1100 xen_start_info->shared_info); 1101 1102 HYPERVISOR_shared_info = 1103 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 1104 } else 1105 HYPERVISOR_shared_info = 1106 (struct shared_info *)__va(xen_start_info->shared_info); 1107 1108 #ifndef CONFIG_SMP 1109 /* In UP this is as good a place as any to set up shared info */ 1110 xen_setup_vcpu_info_placement(); 1111 #endif 1112 1113 xen_setup_mfn_list_list(); 1114 } 1115 1116 /* This is called once we have the cpu_possible_mask */ 1117 void xen_setup_vcpu_info_placement(void) 1118 { 1119 int cpu; 1120 1121 for_each_possible_cpu(cpu) 1122 xen_vcpu_setup(cpu); 1123 1124 /* xen_vcpu_setup managed to place the vcpu_info within the 1125 * percpu area for all cpus, so make use of it. Note that for 1126 * PVH we want to use native IRQ mechanism. */ 1127 if (have_vcpu_info_placement && !xen_pvh_domain()) { 1128 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 1129 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 1130 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); 1131 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); 1132 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 1133 } 1134 } 1135 1136 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, 1137 unsigned long addr, unsigned len) 1138 { 1139 char *start, *end, *reloc; 1140 unsigned ret; 1141 1142 start = end = reloc = NULL; 1143 1144 #define SITE(op, x) \ 1145 case PARAVIRT_PATCH(op.x): \ 1146 if (have_vcpu_info_placement) { \ 1147 start = (char *)xen_##x##_direct; \ 1148 end = xen_##x##_direct_end; \ 1149 reloc = xen_##x##_direct_reloc; \ 1150 } \ 1151 goto patch_site 1152 1153 switch (type) { 1154 SITE(pv_irq_ops, irq_enable); 1155 SITE(pv_irq_ops, irq_disable); 1156 SITE(pv_irq_ops, save_fl); 1157 SITE(pv_irq_ops, restore_fl); 1158 #undef SITE 1159 1160 patch_site: 1161 if (start == NULL || (end-start) > len) 1162 goto default_patch; 1163 1164 ret = paravirt_patch_insns(insnbuf, len, start, end); 1165 1166 /* Note: because reloc is assigned from something that 1167 appears to be an array, gcc assumes it's non-null, 1168 but doesn't know its relationship with start and 1169 end. */ 1170 if (reloc > start && reloc < end) { 1171 int reloc_off = reloc - start; 1172 long *relocp = (long *)(insnbuf + reloc_off); 1173 long delta = start - (char *)addr; 1174 1175 *relocp += delta; 1176 } 1177 break; 1178 1179 default_patch: 1180 default: 1181 ret = paravirt_patch_default(type, clobbers, insnbuf, 1182 addr, len); 1183 break; 1184 } 1185 1186 return ret; 1187 } 1188 1189 static const struct pv_info xen_info __initconst = { 1190 .paravirt_enabled = 1, 1191 .shared_kernel_pmd = 0, 1192 1193 #ifdef CONFIG_X86_64 1194 .extra_user_64bit_cs = FLAT_USER_CS64, 1195 #endif 1196 .features = 0, 1197 .name = "Xen", 1198 }; 1199 1200 static const struct pv_init_ops xen_init_ops __initconst = { 1201 .patch = xen_patch, 1202 }; 1203 1204 static const struct pv_cpu_ops xen_cpu_ops __initconst = { 1205 .cpuid = xen_cpuid, 1206 1207 .set_debugreg = xen_set_debugreg, 1208 .get_debugreg = xen_get_debugreg, 1209 1210 .clts = xen_clts, 1211 1212 .read_cr0 = xen_read_cr0, 1213 .write_cr0 = xen_write_cr0, 1214 1215 .read_cr4 = native_read_cr4, 1216 .read_cr4_safe = native_read_cr4_safe, 1217 .write_cr4 = xen_write_cr4, 1218 1219 #ifdef CONFIG_X86_64 1220 .read_cr8 = xen_read_cr8, 1221 .write_cr8 = xen_write_cr8, 1222 #endif 1223 1224 .wbinvd = native_wbinvd, 1225 1226 .read_msr = xen_read_msr_safe, 1227 .write_msr = xen_write_msr_safe, 1228 1229 .read_pmc = xen_read_pmc, 1230 1231 .iret = xen_iret, 1232 #ifdef CONFIG_X86_64 1233 .usergs_sysret64 = xen_sysret64, 1234 #endif 1235 1236 .load_tr_desc = paravirt_nop, 1237 .set_ldt = xen_set_ldt, 1238 .load_gdt = xen_load_gdt, 1239 .load_idt = xen_load_idt, 1240 .load_tls = xen_load_tls, 1241 #ifdef CONFIG_X86_64 1242 .load_gs_index = xen_load_gs_index, 1243 #endif 1244 1245 .alloc_ldt = xen_alloc_ldt, 1246 .free_ldt = xen_free_ldt, 1247 1248 .store_idt = native_store_idt, 1249 .store_tr = xen_store_tr, 1250 1251 .write_ldt_entry = xen_write_ldt_entry, 1252 .write_gdt_entry = xen_write_gdt_entry, 1253 .write_idt_entry = xen_write_idt_entry, 1254 .load_sp0 = xen_load_sp0, 1255 1256 .set_iopl_mask = xen_set_iopl_mask, 1257 .io_delay = xen_io_delay, 1258 1259 /* Xen takes care of %gs when switching to usermode for us */ 1260 .swapgs = paravirt_nop, 1261 1262 .start_context_switch = paravirt_start_context_switch, 1263 .end_context_switch = xen_end_context_switch, 1264 }; 1265 1266 static void xen_reboot(int reason) 1267 { 1268 struct sched_shutdown r = { .reason = reason }; 1269 int cpu; 1270 1271 for_each_online_cpu(cpu) 1272 xen_pmu_finish(cpu); 1273 1274 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1275 BUG(); 1276 } 1277 1278 static void xen_restart(char *msg) 1279 { 1280 xen_reboot(SHUTDOWN_reboot); 1281 } 1282 1283 static void xen_emergency_restart(void) 1284 { 1285 xen_reboot(SHUTDOWN_reboot); 1286 } 1287 1288 static void xen_machine_halt(void) 1289 { 1290 xen_reboot(SHUTDOWN_poweroff); 1291 } 1292 1293 static void xen_machine_power_off(void) 1294 { 1295 if (pm_power_off) 1296 pm_power_off(); 1297 xen_reboot(SHUTDOWN_poweroff); 1298 } 1299 1300 static void xen_crash_shutdown(struct pt_regs *regs) 1301 { 1302 xen_reboot(SHUTDOWN_crash); 1303 } 1304 1305 static int 1306 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 1307 { 1308 xen_reboot(SHUTDOWN_crash); 1309 return NOTIFY_DONE; 1310 } 1311 1312 static struct notifier_block xen_panic_block = { 1313 .notifier_call= xen_panic_event, 1314 .priority = INT_MIN 1315 }; 1316 1317 int xen_panic_handler_init(void) 1318 { 1319 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); 1320 return 0; 1321 } 1322 1323 static const struct machine_ops xen_machine_ops __initconst = { 1324 .restart = xen_restart, 1325 .halt = xen_machine_halt, 1326 .power_off = xen_machine_power_off, 1327 .shutdown = xen_machine_halt, 1328 .crash_shutdown = xen_crash_shutdown, 1329 .emergency_restart = xen_emergency_restart, 1330 }; 1331 1332 static unsigned char xen_get_nmi_reason(void) 1333 { 1334 unsigned char reason = 0; 1335 1336 /* Construct a value which looks like it came from port 0x61. */ 1337 if (test_bit(_XEN_NMIREASON_io_error, 1338 &HYPERVISOR_shared_info->arch.nmi_reason)) 1339 reason |= NMI_REASON_IOCHK; 1340 if (test_bit(_XEN_NMIREASON_pci_serr, 1341 &HYPERVISOR_shared_info->arch.nmi_reason)) 1342 reason |= NMI_REASON_SERR; 1343 1344 return reason; 1345 } 1346 1347 static void __init xen_boot_params_init_edd(void) 1348 { 1349 #if IS_ENABLED(CONFIG_EDD) 1350 struct xen_platform_op op; 1351 struct edd_info *edd_info; 1352 u32 *mbr_signature; 1353 unsigned nr; 1354 int ret; 1355 1356 edd_info = boot_params.eddbuf; 1357 mbr_signature = boot_params.edd_mbr_sig_buffer; 1358 1359 op.cmd = XENPF_firmware_info; 1360 1361 op.u.firmware_info.type = XEN_FW_DISK_INFO; 1362 for (nr = 0; nr < EDDMAXNR; nr++) { 1363 struct edd_info *info = edd_info + nr; 1364 1365 op.u.firmware_info.index = nr; 1366 info->params.length = sizeof(info->params); 1367 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, 1368 &info->params); 1369 ret = HYPERVISOR_platform_op(&op); 1370 if (ret) 1371 break; 1372 1373 #define C(x) info->x = op.u.firmware_info.u.disk_info.x 1374 C(device); 1375 C(version); 1376 C(interface_support); 1377 C(legacy_max_cylinder); 1378 C(legacy_max_head); 1379 C(legacy_sectors_per_track); 1380 #undef C 1381 } 1382 boot_params.eddbuf_entries = nr; 1383 1384 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; 1385 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { 1386 op.u.firmware_info.index = nr; 1387 ret = HYPERVISOR_platform_op(&op); 1388 if (ret) 1389 break; 1390 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; 1391 } 1392 boot_params.edd_mbr_sig_buf_entries = nr; 1393 #endif 1394 } 1395 1396 /* 1397 * Set up the GDT and segment registers for -fstack-protector. Until 1398 * we do this, we have to be careful not to call any stack-protected 1399 * function, which is most of the kernel. 1400 * 1401 * Note, that it is __ref because the only caller of this after init 1402 * is PVH which is not going to use xen_load_gdt_boot or other 1403 * __init functions. 1404 */ 1405 static void __ref xen_setup_gdt(int cpu) 1406 { 1407 if (xen_feature(XENFEAT_auto_translated_physmap)) { 1408 #ifdef CONFIG_X86_64 1409 unsigned long dummy; 1410 1411 load_percpu_segment(cpu); /* We need to access per-cpu area */ 1412 switch_to_new_gdt(cpu); /* GDT and GS set */ 1413 1414 /* We are switching of the Xen provided GDT to our HVM mode 1415 * GDT. The new GDT has __KERNEL_CS with CS.L = 1 1416 * and we are jumping to reload it. 1417 */ 1418 asm volatile ("pushq %0\n" 1419 "leaq 1f(%%rip),%0\n" 1420 "pushq %0\n" 1421 "lretq\n" 1422 "1:\n" 1423 : "=&r" (dummy) : "0" (__KERNEL_CS)); 1424 1425 /* 1426 * While not needed, we also set the %es, %ds, and %fs 1427 * to zero. We don't care about %ss as it is NULL. 1428 * Strictly speaking this is not needed as Xen zeros those 1429 * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE) 1430 * 1431 * Linux zeros them in cpu_init() and in secondary_startup_64 1432 * (for BSP). 1433 */ 1434 loadsegment(es, 0); 1435 loadsegment(ds, 0); 1436 loadsegment(fs, 0); 1437 #else 1438 /* PVH: TODO Implement. */ 1439 BUG(); 1440 #endif 1441 return; /* PVH does not need any PV GDT ops. */ 1442 } 1443 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot; 1444 pv_cpu_ops.load_gdt = xen_load_gdt_boot; 1445 1446 setup_stack_canary_segment(0); 1447 switch_to_new_gdt(0); 1448 1449 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry; 1450 pv_cpu_ops.load_gdt = xen_load_gdt; 1451 } 1452 1453 #ifdef CONFIG_XEN_PVH 1454 /* 1455 * A PV guest starts with default flags that are not set for PVH, set them 1456 * here asap. 1457 */ 1458 static void xen_pvh_set_cr_flags(int cpu) 1459 { 1460 1461 /* Some of these are setup in 'secondary_startup_64'. The others: 1462 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests 1463 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ 1464 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); 1465 1466 if (!cpu) 1467 return; 1468 /* 1469 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs 1470 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu(). 1471 */ 1472 if (cpu_has_pse) 1473 cr4_set_bits_and_update_boot(X86_CR4_PSE); 1474 1475 if (cpu_has_pge) 1476 cr4_set_bits_and_update_boot(X86_CR4_PGE); 1477 } 1478 1479 /* 1480 * Note, that it is ref - because the only caller of this after init 1481 * is PVH which is not going to use xen_load_gdt_boot or other 1482 * __init functions. 1483 */ 1484 void __ref xen_pvh_secondary_vcpu_init(int cpu) 1485 { 1486 xen_setup_gdt(cpu); 1487 xen_pvh_set_cr_flags(cpu); 1488 } 1489 1490 static void __init xen_pvh_early_guest_init(void) 1491 { 1492 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1493 return; 1494 1495 if (!xen_feature(XENFEAT_hvm_callback_vector)) 1496 return; 1497 1498 xen_have_vector_callback = 1; 1499 1500 xen_pvh_early_cpu_init(0, false); 1501 xen_pvh_set_cr_flags(0); 1502 1503 #ifdef CONFIG_X86_32 1504 BUG(); /* PVH: Implement proper support. */ 1505 #endif 1506 } 1507 #endif /* CONFIG_XEN_PVH */ 1508 1509 /* First C function to be called on Xen boot */ 1510 asmlinkage __visible void __init xen_start_kernel(void) 1511 { 1512 struct physdev_set_iopl set_iopl; 1513 unsigned long initrd_start = 0; 1514 u64 pat; 1515 int rc; 1516 1517 if (!xen_start_info) 1518 return; 1519 1520 xen_domain_type = XEN_PV_DOMAIN; 1521 1522 xen_setup_features(); 1523 #ifdef CONFIG_XEN_PVH 1524 xen_pvh_early_guest_init(); 1525 #endif 1526 xen_setup_machphys_mapping(); 1527 1528 /* Install Xen paravirt ops */ 1529 pv_info = xen_info; 1530 if (xen_initial_domain()) 1531 pv_info.features |= PV_SUPPORTED_RTC; 1532 pv_init_ops = xen_init_ops; 1533 if (!xen_pvh_domain()) { 1534 pv_cpu_ops = xen_cpu_ops; 1535 1536 x86_platform.get_nmi_reason = xen_get_nmi_reason; 1537 } 1538 1539 if (xen_feature(XENFEAT_auto_translated_physmap)) 1540 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; 1541 else 1542 x86_init.resources.memory_setup = xen_memory_setup; 1543 x86_init.oem.arch_setup = xen_arch_setup; 1544 x86_init.oem.banner = xen_banner; 1545 1546 xen_init_time_ops(); 1547 1548 /* 1549 * Set up some pagetable state before starting to set any ptes. 1550 */ 1551 1552 xen_init_mmu_ops(); 1553 1554 /* Prevent unwanted bits from being set in PTEs. */ 1555 __supported_pte_mask &= ~_PAGE_GLOBAL; 1556 1557 /* 1558 * Prevent page tables from being allocated in highmem, even 1559 * if CONFIG_HIGHPTE is enabled. 1560 */ 1561 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 1562 1563 /* Work out if we support NX */ 1564 x86_configure_nx(); 1565 1566 /* Get mfn list */ 1567 xen_build_dynamic_phys_to_machine(); 1568 1569 /* 1570 * Set up kernel GDT and segment registers, mainly so that 1571 * -fstack-protector code can be executed. 1572 */ 1573 xen_setup_gdt(0); 1574 1575 xen_init_irq_ops(); 1576 xen_init_cpuid_mask(); 1577 1578 #ifdef CONFIG_X86_LOCAL_APIC 1579 /* 1580 * set up the basic apic ops. 1581 */ 1582 xen_init_apic(); 1583 #endif 1584 1585 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 1586 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 1587 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 1588 } 1589 1590 machine_ops = xen_machine_ops; 1591 1592 /* 1593 * The only reliable way to retain the initial address of the 1594 * percpu gdt_page is to remember it here, so we can go and 1595 * mark it RW later, when the initial percpu area is freed. 1596 */ 1597 xen_initial_gdt = &per_cpu(gdt_page, 0); 1598 1599 xen_smp_init(); 1600 1601 #ifdef CONFIG_ACPI_NUMA 1602 /* 1603 * The pages we from Xen are not related to machine pages, so 1604 * any NUMA information the kernel tries to get from ACPI will 1605 * be meaningless. Prevent it from trying. 1606 */ 1607 acpi_numa = -1; 1608 #endif 1609 /* Don't do the full vcpu_info placement stuff until we have a 1610 possible map and a non-dummy shared_info. */ 1611 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1612 1613 local_irq_disable(); 1614 early_boot_irqs_disabled = true; 1615 1616 xen_raw_console_write("mapping kernel into physical memory\n"); 1617 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, 1618 xen_start_info->nr_pages); 1619 xen_reserve_special_pages(); 1620 1621 /* 1622 * Modify the cache mode translation tables to match Xen's PAT 1623 * configuration. 1624 */ 1625 rdmsrl(MSR_IA32_CR_PAT, pat); 1626 pat_init_cache_modes(pat); 1627 1628 /* keep using Xen gdt for now; no urgent need to change it */ 1629 1630 #ifdef CONFIG_X86_32 1631 pv_info.kernel_rpl = 1; 1632 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1633 pv_info.kernel_rpl = 0; 1634 #else 1635 pv_info.kernel_rpl = 0; 1636 #endif 1637 /* set the limit of our address space */ 1638 xen_reserve_top(); 1639 1640 /* PVH: runs at default kernel iopl of 0 */ 1641 if (!xen_pvh_domain()) { 1642 /* 1643 * We used to do this in xen_arch_setup, but that is too late 1644 * on AMD were early_cpu_init (run before ->arch_setup()) calls 1645 * early_amd_init which pokes 0xcf8 port. 1646 */ 1647 set_iopl.iopl = 1; 1648 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 1649 if (rc != 0) 1650 xen_raw_printk("physdev_op failed %d\n", rc); 1651 } 1652 1653 #ifdef CONFIG_X86_32 1654 /* set up basic CPUID stuff */ 1655 cpu_detect(&new_cpu_data); 1656 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU); 1657 new_cpu_data.wp_works_ok = 1; 1658 new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1); 1659 #endif 1660 1661 if (xen_start_info->mod_start) { 1662 if (xen_start_info->flags & SIF_MOD_START_PFN) 1663 initrd_start = PFN_PHYS(xen_start_info->mod_start); 1664 else 1665 initrd_start = __pa(xen_start_info->mod_start); 1666 } 1667 1668 /* Poke various useful things into boot_params */ 1669 boot_params.hdr.type_of_loader = (9 << 4) | 0; 1670 boot_params.hdr.ramdisk_image = initrd_start; 1671 boot_params.hdr.ramdisk_size = xen_start_info->mod_len; 1672 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); 1673 1674 if (!xen_initial_domain()) { 1675 add_preferred_console("xenboot", 0, NULL); 1676 add_preferred_console("tty", 0, NULL); 1677 add_preferred_console("hvc", 0, NULL); 1678 if (pci_xen) 1679 x86_init.pci.arch_init = pci_xen_init; 1680 } else { 1681 const struct dom0_vga_console_info *info = 1682 (void *)((char *)xen_start_info + 1683 xen_start_info->console.dom0.info_off); 1684 struct xen_platform_op op = { 1685 .cmd = XENPF_firmware_info, 1686 .interface_version = XENPF_INTERFACE_VERSION, 1687 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS, 1688 }; 1689 1690 xen_init_vga(info, xen_start_info->console.dom0.info_size); 1691 xen_start_info->console.domU.mfn = 0; 1692 xen_start_info->console.domU.evtchn = 0; 1693 1694 if (HYPERVISOR_platform_op(&op) == 0) 1695 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags; 1696 1697 /* Make sure ACS will be enabled */ 1698 pci_request_acs(); 1699 1700 xen_acpi_sleep_register(); 1701 1702 /* Avoid searching for BIOS MP tables */ 1703 x86_init.mpparse.find_smp_config = x86_init_noop; 1704 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 1705 1706 xen_boot_params_init_edd(); 1707 } 1708 #ifdef CONFIG_PCI 1709 /* PCI BIOS service won't work from a PV guest. */ 1710 pci_probe &= ~PCI_PROBE_BIOS; 1711 #endif 1712 xen_raw_console_write("about to get started...\n"); 1713 1714 xen_setup_runstate_info(0); 1715 1716 xen_efi_init(); 1717 1718 /* Start the world */ 1719 #ifdef CONFIG_X86_32 1720 i386_start_kernel(); 1721 #else 1722 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ 1723 x86_64_start_reservations((char *)__pa_symbol(&boot_params)); 1724 #endif 1725 } 1726 1727 void __ref xen_hvm_init_shared_info(void) 1728 { 1729 int cpu; 1730 struct xen_add_to_physmap xatp; 1731 static struct shared_info *shared_info_page = 0; 1732 1733 if (!shared_info_page) 1734 shared_info_page = (struct shared_info *) 1735 extend_brk(PAGE_SIZE, PAGE_SIZE); 1736 xatp.domid = DOMID_SELF; 1737 xatp.idx = 0; 1738 xatp.space = XENMAPSPACE_shared_info; 1739 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 1740 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1741 BUG(); 1742 1743 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 1744 1745 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1746 * page, we use it in the event channel upcall and in some pvclock 1747 * related functions. We don't need the vcpu_info placement 1748 * optimizations because we don't use any pv_mmu or pv_irq op on 1749 * HVM. 1750 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is 1751 * online but xen_hvm_init_shared_info is run at resume time too and 1752 * in that case multiple vcpus might be online. */ 1753 for_each_online_cpu(cpu) { 1754 /* Leave it to be NULL. */ 1755 if (cpu >= MAX_VIRT_CPUS) 1756 continue; 1757 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1758 } 1759 } 1760 1761 #ifdef CONFIG_XEN_PVHVM 1762 static void __init init_hvm_pv_info(void) 1763 { 1764 int major, minor; 1765 uint32_t eax, ebx, ecx, edx, pages, msr, base; 1766 u64 pfn; 1767 1768 base = xen_cpuid_base(); 1769 cpuid(base + 1, &eax, &ebx, &ecx, &edx); 1770 1771 major = eax >> 16; 1772 minor = eax & 0xffff; 1773 printk(KERN_INFO "Xen version %d.%d.\n", major, minor); 1774 1775 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1776 1777 pfn = __pa(hypercall_page); 1778 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); 1779 1780 xen_setup_features(); 1781 1782 pv_info.name = "Xen HVM"; 1783 1784 xen_domain_type = XEN_HVM_DOMAIN; 1785 } 1786 1787 static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, 1788 void *hcpu) 1789 { 1790 int cpu = (long)hcpu; 1791 switch (action) { 1792 case CPU_UP_PREPARE: 1793 xen_vcpu_setup(cpu); 1794 if (xen_have_vector_callback) { 1795 if (xen_feature(XENFEAT_hvm_safe_pvclock)) 1796 xen_setup_timer(cpu); 1797 } 1798 break; 1799 default: 1800 break; 1801 } 1802 return NOTIFY_OK; 1803 } 1804 1805 static struct notifier_block xen_hvm_cpu_notifier = { 1806 .notifier_call = xen_hvm_cpu_notify, 1807 }; 1808 1809 #ifdef CONFIG_KEXEC_CORE 1810 static void xen_hvm_shutdown(void) 1811 { 1812 native_machine_shutdown(); 1813 if (kexec_in_progress) 1814 xen_reboot(SHUTDOWN_soft_reset); 1815 } 1816 1817 static void xen_hvm_crash_shutdown(struct pt_regs *regs) 1818 { 1819 native_machine_crash_shutdown(regs); 1820 xen_reboot(SHUTDOWN_soft_reset); 1821 } 1822 #endif 1823 1824 static void __init xen_hvm_guest_init(void) 1825 { 1826 if (xen_pv_domain()) 1827 return; 1828 1829 init_hvm_pv_info(); 1830 1831 xen_hvm_init_shared_info(); 1832 1833 xen_panic_handler_init(); 1834 1835 if (xen_feature(XENFEAT_hvm_callback_vector)) 1836 xen_have_vector_callback = 1; 1837 xen_hvm_smp_init(); 1838 register_cpu_notifier(&xen_hvm_cpu_notifier); 1839 xen_unplug_emulated_devices(); 1840 x86_init.irqs.intr_init = xen_init_IRQ; 1841 xen_hvm_init_time_ops(); 1842 xen_hvm_init_mmu_ops(); 1843 #ifdef CONFIG_KEXEC_CORE 1844 machine_ops.shutdown = xen_hvm_shutdown; 1845 machine_ops.crash_shutdown = xen_hvm_crash_shutdown; 1846 #endif 1847 } 1848 #endif 1849 1850 static bool xen_nopv = false; 1851 static __init int xen_parse_nopv(char *arg) 1852 { 1853 xen_nopv = true; 1854 return 0; 1855 } 1856 early_param("xen_nopv", xen_parse_nopv); 1857 1858 static uint32_t __init xen_platform(void) 1859 { 1860 if (xen_nopv) 1861 return 0; 1862 1863 return xen_cpuid_base(); 1864 } 1865 1866 bool xen_hvm_need_lapic(void) 1867 { 1868 if (xen_nopv) 1869 return false; 1870 if (xen_pv_domain()) 1871 return false; 1872 if (!xen_hvm_domain()) 1873 return false; 1874 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) 1875 return false; 1876 return true; 1877 } 1878 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1879 1880 static void xen_set_cpu_features(struct cpuinfo_x86 *c) 1881 { 1882 if (xen_pv_domain()) { 1883 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1884 set_cpu_cap(c, X86_FEATURE_XENPV); 1885 } 1886 } 1887 1888 const struct hypervisor_x86 x86_hyper_xen = { 1889 .name = "Xen", 1890 .detect = xen_platform, 1891 #ifdef CONFIG_XEN_PVHVM 1892 .init_platform = xen_hvm_guest_init, 1893 #endif 1894 .x2apic_available = xen_x2apic_para_available, 1895 .set_cpu_features = xen_set_cpu_features, 1896 }; 1897 EXPORT_SYMBOL(x86_hyper_xen); 1898 1899 #ifdef CONFIG_HOTPLUG_CPU 1900 void xen_arch_register_cpu(int num) 1901 { 1902 arch_register_cpu(num); 1903 } 1904 EXPORT_SYMBOL(xen_arch_register_cpu); 1905 1906 void xen_arch_unregister_cpu(int num) 1907 { 1908 arch_unregister_cpu(num); 1909 } 1910 EXPORT_SYMBOL(xen_arch_unregister_cpu); 1911 #endif 1912