1 /* 2 * Xen SMP support 3 * 4 * This file implements the Xen versions of smp_ops. SMP under Xen is 5 * very straightforward. Bringing a CPU up is simply a matter of 6 * loading its initial context and setting it running. 7 * 8 * IPIs are handled through the Xen event mechanism. 9 * 10 * Because virtual CPUs can be scheduled onto any real CPU, there's no 11 * useful topology information for the kernel to make use of. As a 12 * result, all CPUs are treated as if they're single-core and 13 * single-threaded. 14 */ 15 #include <linux/sched.h> 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/smp.h> 19 #include <linux/irq_work.h> 20 21 #include <asm/paravirt.h> 22 #include <asm/desc.h> 23 #include <asm/pgtable.h> 24 #include <asm/cpu.h> 25 26 #include <xen/interface/xen.h> 27 #include <xen/interface/vcpu.h> 28 29 #include <asm/xen/interface.h> 30 #include <asm/xen/hypercall.h> 31 32 #include <xen/xen.h> 33 #include <xen/page.h> 34 #include <xen/events.h> 35 36 #include <xen/hvc-console.h> 37 #include "xen-ops.h" 38 #include "mmu.h" 39 40 cpumask_var_t xen_cpu_initialized_map; 41 42 static DEFINE_PER_CPU(int, xen_resched_irq); 43 static DEFINE_PER_CPU(int, xen_callfunc_irq); 44 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 45 static DEFINE_PER_CPU(int, xen_irq_work); 46 static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 47 48 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 49 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 50 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); 51 52 /* 53 * Reschedule call back. 54 */ 55 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 56 { 57 inc_irq_stat(irq_resched_count); 58 scheduler_ipi(); 59 60 return IRQ_HANDLED; 61 } 62 63 static void __cpuinit cpu_bringup(void) 64 { 65 int cpu; 66 67 cpu_init(); 68 touch_softlockup_watchdog(); 69 preempt_disable(); 70 71 xen_enable_sysenter(); 72 xen_enable_syscall(); 73 74 cpu = smp_processor_id(); 75 smp_store_cpu_info(cpu); 76 cpu_data(cpu).x86_max_cores = 1; 77 set_cpu_sibling_map(cpu); 78 79 xen_setup_cpu_clockevents(); 80 81 notify_cpu_starting(cpu); 82 83 set_cpu_online(cpu, true); 84 85 this_cpu_write(cpu_state, CPU_ONLINE); 86 87 wmb(); 88 89 /* We can take interrupts now: we're officially "up". */ 90 local_irq_enable(); 91 92 wmb(); /* make sure everything is out */ 93 } 94 95 static void __cpuinit cpu_bringup_and_idle(void) 96 { 97 cpu_bringup(); 98 cpu_startup_entry(CPUHP_ONLINE); 99 } 100 101 static int xen_smp_intr_init(unsigned int cpu) 102 { 103 int rc; 104 const char *resched_name, *callfunc_name, *debug_name; 105 106 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 107 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 108 cpu, 109 xen_reschedule_interrupt, 110 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 111 resched_name, 112 NULL); 113 if (rc < 0) 114 goto fail; 115 per_cpu(xen_resched_irq, cpu) = rc; 116 117 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 118 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 119 cpu, 120 xen_call_function_interrupt, 121 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 122 callfunc_name, 123 NULL); 124 if (rc < 0) 125 goto fail; 126 per_cpu(xen_callfunc_irq, cpu) = rc; 127 128 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 129 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 130 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, 131 debug_name, NULL); 132 if (rc < 0) 133 goto fail; 134 per_cpu(xen_debug_irq, cpu) = rc; 135 136 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 137 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 138 cpu, 139 xen_call_function_single_interrupt, 140 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 141 callfunc_name, 142 NULL); 143 if (rc < 0) 144 goto fail; 145 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 146 147 /* 148 * The IRQ worker on PVHVM goes through the native path and uses the 149 * IPI mechanism. 150 */ 151 if (xen_hvm_domain()) 152 return 0; 153 154 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 155 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 156 cpu, 157 xen_irq_work_interrupt, 158 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 159 callfunc_name, 160 NULL); 161 if (rc < 0) 162 goto fail; 163 per_cpu(xen_irq_work, cpu) = rc; 164 165 return 0; 166 167 fail: 168 if (per_cpu(xen_resched_irq, cpu) >= 0) 169 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 170 if (per_cpu(xen_callfunc_irq, cpu) >= 0) 171 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 172 if (per_cpu(xen_debug_irq, cpu) >= 0) 173 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 174 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 175 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 176 NULL); 177 if (xen_hvm_domain()) 178 return rc; 179 180 if (per_cpu(xen_irq_work, cpu) >= 0) 181 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 182 183 return rc; 184 } 185 186 static void __init xen_fill_possible_map(void) 187 { 188 int i, rc; 189 190 if (xen_initial_domain()) 191 return; 192 193 for (i = 0; i < nr_cpu_ids; i++) { 194 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 195 if (rc >= 0) { 196 num_processors++; 197 set_cpu_possible(i, true); 198 } 199 } 200 } 201 202 static void __init xen_filter_cpu_maps(void) 203 { 204 int i, rc; 205 unsigned int subtract = 0; 206 207 if (!xen_initial_domain()) 208 return; 209 210 num_processors = 0; 211 disabled_cpus = 0; 212 for (i = 0; i < nr_cpu_ids; i++) { 213 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 214 if (rc >= 0) { 215 num_processors++; 216 set_cpu_possible(i, true); 217 } else { 218 set_cpu_possible(i, false); 219 set_cpu_present(i, false); 220 subtract++; 221 } 222 } 223 #ifdef CONFIG_HOTPLUG_CPU 224 /* This is akin to using 'nr_cpus' on the Linux command line. 225 * Which is OK as when we use 'dom0_max_vcpus=X' we can only 226 * have up to X, while nr_cpu_ids is greater than X. This 227 * normally is not a problem, except when CPU hotplugging 228 * is involved and then there might be more than X CPUs 229 * in the guest - which will not work as there is no 230 * hypercall to expand the max number of VCPUs an already 231 * running guest has. So cap it up to X. */ 232 if (subtract) 233 nr_cpu_ids = nr_cpu_ids - subtract; 234 #endif 235 236 } 237 238 static void __init xen_smp_prepare_boot_cpu(void) 239 { 240 BUG_ON(smp_processor_id() != 0); 241 native_smp_prepare_boot_cpu(); 242 243 /* We've switched to the "real" per-cpu gdt, so make sure the 244 old memory can be recycled */ 245 make_lowmem_page_readwrite(xen_initial_gdt); 246 247 xen_filter_cpu_maps(); 248 xen_setup_vcpu_info_placement(); 249 } 250 251 static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 252 { 253 unsigned cpu; 254 unsigned int i; 255 256 if (skip_ioapic_setup) { 257 char *m = (max_cpus == 0) ? 258 "The nosmp parameter is incompatible with Xen; " \ 259 "use Xen dom0_max_vcpus=1 parameter" : 260 "The noapic parameter is incompatible with Xen"; 261 262 xen_raw_printk(m); 263 panic(m); 264 } 265 xen_init_lock_cpu(0); 266 267 smp_store_boot_cpu_info(); 268 cpu_data(0).x86_max_cores = 1; 269 270 for_each_possible_cpu(i) { 271 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 272 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 273 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 274 } 275 set_cpu_sibling_map(0); 276 277 if (xen_smp_intr_init(0)) 278 BUG(); 279 280 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 281 panic("could not allocate xen_cpu_initialized_map\n"); 282 283 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); 284 285 /* Restrict the possible_map according to max_cpus. */ 286 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 287 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 288 continue; 289 set_cpu_possible(cpu, false); 290 } 291 292 for_each_possible_cpu(cpu) 293 set_cpu_present(cpu, true); 294 } 295 296 static int __cpuinit 297 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 298 { 299 struct vcpu_guest_context *ctxt; 300 struct desc_struct *gdt; 301 unsigned long gdt_mfn; 302 303 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 304 return 0; 305 306 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 307 if (ctxt == NULL) 308 return -ENOMEM; 309 310 gdt = get_cpu_gdt_table(cpu); 311 312 ctxt->flags = VGCF_IN_KERNEL; 313 ctxt->user_regs.ss = __KERNEL_DS; 314 #ifdef CONFIG_X86_32 315 ctxt->user_regs.fs = __KERNEL_PERCPU; 316 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 317 #else 318 ctxt->gs_base_kernel = per_cpu_offset(cpu); 319 #endif 320 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 321 322 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 323 324 { 325 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 326 ctxt->user_regs.ds = __USER_DS; 327 ctxt->user_regs.es = __USER_DS; 328 329 xen_copy_trap_info(ctxt->trap_ctxt); 330 331 ctxt->ldt_ents = 0; 332 333 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 334 335 gdt_mfn = arbitrary_virt_to_mfn(gdt); 336 make_lowmem_page_readonly(gdt); 337 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); 338 339 ctxt->gdt_frames[0] = gdt_mfn; 340 ctxt->gdt_ents = GDT_ENTRIES; 341 342 ctxt->kernel_ss = __KERNEL_DS; 343 ctxt->kernel_sp = idle->thread.sp0; 344 345 #ifdef CONFIG_X86_32 346 ctxt->event_callback_cs = __KERNEL_CS; 347 ctxt->failsafe_callback_cs = __KERNEL_CS; 348 #endif 349 ctxt->event_callback_eip = 350 (unsigned long)xen_hypervisor_callback; 351 ctxt->failsafe_callback_eip = 352 (unsigned long)xen_failsafe_callback; 353 } 354 ctxt->user_regs.cs = __KERNEL_CS; 355 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 356 357 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 358 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 359 360 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 361 BUG(); 362 363 kfree(ctxt); 364 return 0; 365 } 366 367 static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 368 { 369 int rc; 370 371 per_cpu(current_task, cpu) = idle; 372 #ifdef CONFIG_X86_32 373 irq_ctx_init(cpu); 374 #else 375 clear_tsk_thread_flag(idle, TIF_FORK); 376 per_cpu(kernel_stack, cpu) = 377 (unsigned long)task_stack_page(idle) - 378 KERNEL_STACK_OFFSET + THREAD_SIZE; 379 #endif 380 xen_setup_runstate_info(cpu); 381 xen_setup_timer(cpu); 382 xen_init_lock_cpu(cpu); 383 384 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 385 386 /* make sure interrupts start blocked */ 387 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 388 389 rc = cpu_initialize_context(cpu, idle); 390 if (rc) 391 return rc; 392 393 if (num_online_cpus() == 1) 394 /* Just in case we booted with a single CPU. */ 395 alternatives_enable_smp(); 396 397 rc = xen_smp_intr_init(cpu); 398 if (rc) 399 return rc; 400 401 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 402 BUG_ON(rc); 403 404 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 405 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 406 barrier(); 407 } 408 409 return 0; 410 } 411 412 static void xen_smp_cpus_done(unsigned int max_cpus) 413 { 414 } 415 416 #ifdef CONFIG_HOTPLUG_CPU 417 static int xen_cpu_disable(void) 418 { 419 unsigned int cpu = smp_processor_id(); 420 if (cpu == 0) 421 return -EBUSY; 422 423 cpu_disable_common(); 424 425 load_cr3(swapper_pg_dir); 426 return 0; 427 } 428 429 static void xen_cpu_die(unsigned int cpu) 430 { 431 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 432 current->state = TASK_UNINTERRUPTIBLE; 433 schedule_timeout(HZ/10); 434 } 435 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 436 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 437 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 438 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 439 if (!xen_hvm_domain()) 440 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 441 xen_uninit_lock_cpu(cpu); 442 xen_teardown_timer(cpu); 443 } 444 445 static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 446 { 447 play_dead_common(); 448 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 449 cpu_bringup(); 450 } 451 452 #else /* !CONFIG_HOTPLUG_CPU */ 453 static int xen_cpu_disable(void) 454 { 455 return -ENOSYS; 456 } 457 458 static void xen_cpu_die(unsigned int cpu) 459 { 460 BUG(); 461 } 462 463 static void xen_play_dead(void) 464 { 465 BUG(); 466 } 467 468 #endif 469 static void stop_self(void *v) 470 { 471 int cpu = smp_processor_id(); 472 473 /* make sure we're not pinning something down */ 474 load_cr3(swapper_pg_dir); 475 /* should set up a minimal gdt */ 476 477 set_cpu_online(cpu, false); 478 479 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); 480 BUG(); 481 } 482 483 static void xen_stop_other_cpus(int wait) 484 { 485 smp_call_function(stop_self, NULL, wait); 486 } 487 488 static void xen_smp_send_reschedule(int cpu) 489 { 490 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 491 } 492 493 static void __xen_send_IPI_mask(const struct cpumask *mask, 494 int vector) 495 { 496 unsigned cpu; 497 498 for_each_cpu_and(cpu, mask, cpu_online_mask) 499 xen_send_IPI_one(cpu, vector); 500 } 501 502 static void xen_smp_send_call_function_ipi(const struct cpumask *mask) 503 { 504 int cpu; 505 506 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 507 508 /* Make sure other vcpus get a chance to run if they need to. */ 509 for_each_cpu(cpu, mask) { 510 if (xen_vcpu_stolen(cpu)) { 511 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 512 break; 513 } 514 } 515 } 516 517 static void xen_smp_send_call_function_single_ipi(int cpu) 518 { 519 __xen_send_IPI_mask(cpumask_of(cpu), 520 XEN_CALL_FUNCTION_SINGLE_VECTOR); 521 } 522 523 static inline int xen_map_vector(int vector) 524 { 525 int xen_vector; 526 527 switch (vector) { 528 case RESCHEDULE_VECTOR: 529 xen_vector = XEN_RESCHEDULE_VECTOR; 530 break; 531 case CALL_FUNCTION_VECTOR: 532 xen_vector = XEN_CALL_FUNCTION_VECTOR; 533 break; 534 case CALL_FUNCTION_SINGLE_VECTOR: 535 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; 536 break; 537 case IRQ_WORK_VECTOR: 538 xen_vector = XEN_IRQ_WORK_VECTOR; 539 break; 540 default: 541 xen_vector = -1; 542 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 543 vector); 544 } 545 546 return xen_vector; 547 } 548 549 void xen_send_IPI_mask(const struct cpumask *mask, 550 int vector) 551 { 552 int xen_vector = xen_map_vector(vector); 553 554 if (xen_vector >= 0) 555 __xen_send_IPI_mask(mask, xen_vector); 556 } 557 558 void xen_send_IPI_all(int vector) 559 { 560 int xen_vector = xen_map_vector(vector); 561 562 if (xen_vector >= 0) 563 __xen_send_IPI_mask(cpu_online_mask, xen_vector); 564 } 565 566 void xen_send_IPI_self(int vector) 567 { 568 int xen_vector = xen_map_vector(vector); 569 570 if (xen_vector >= 0) 571 xen_send_IPI_one(smp_processor_id(), xen_vector); 572 } 573 574 void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 575 int vector) 576 { 577 unsigned cpu; 578 unsigned int this_cpu = smp_processor_id(); 579 int xen_vector = xen_map_vector(vector); 580 581 if (!(num_online_cpus() > 1) || (xen_vector < 0)) 582 return; 583 584 for_each_cpu_and(cpu, mask, cpu_online_mask) { 585 if (this_cpu == cpu) 586 continue; 587 588 xen_send_IPI_one(cpu, xen_vector); 589 } 590 } 591 592 void xen_send_IPI_allbutself(int vector) 593 { 594 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); 595 } 596 597 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 598 { 599 irq_enter(); 600 generic_smp_call_function_interrupt(); 601 inc_irq_stat(irq_call_count); 602 irq_exit(); 603 604 return IRQ_HANDLED; 605 } 606 607 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) 608 { 609 irq_enter(); 610 generic_smp_call_function_single_interrupt(); 611 inc_irq_stat(irq_call_count); 612 irq_exit(); 613 614 return IRQ_HANDLED; 615 } 616 617 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) 618 { 619 irq_enter(); 620 irq_work_run(); 621 inc_irq_stat(apic_irq_work_irqs); 622 irq_exit(); 623 624 return IRQ_HANDLED; 625 } 626 627 static const struct smp_ops xen_smp_ops __initconst = { 628 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 629 .smp_prepare_cpus = xen_smp_prepare_cpus, 630 .smp_cpus_done = xen_smp_cpus_done, 631 632 .cpu_up = xen_cpu_up, 633 .cpu_die = xen_cpu_die, 634 .cpu_disable = xen_cpu_disable, 635 .play_dead = xen_play_dead, 636 637 .stop_other_cpus = xen_stop_other_cpus, 638 .smp_send_reschedule = xen_smp_send_reschedule, 639 640 .send_call_func_ipi = xen_smp_send_call_function_ipi, 641 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, 642 }; 643 644 void __init xen_smp_init(void) 645 { 646 smp_ops = xen_smp_ops; 647 xen_fill_possible_map(); 648 xen_init_spinlocks(); 649 } 650 651 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 652 { 653 native_smp_prepare_cpus(max_cpus); 654 WARN_ON(xen_smp_intr_init(0)); 655 656 xen_init_lock_cpu(0); 657 } 658 659 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 660 { 661 int rc; 662 rc = native_cpu_up(cpu, tidle); 663 WARN_ON (xen_smp_intr_init(cpu)); 664 return rc; 665 } 666 667 static void xen_hvm_cpu_die(unsigned int cpu) 668 { 669 xen_cpu_die(cpu); 670 native_cpu_die(cpu); 671 } 672 673 void __init xen_hvm_smp_init(void) 674 { 675 if (!xen_have_vector_callback) 676 return; 677 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 678 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 679 smp_ops.cpu_up = xen_hvm_cpu_up; 680 smp_ops.cpu_die = xen_hvm_cpu_die; 681 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 682 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 683 } 684