1 /* 2 * Xen SMP support 3 * 4 * This file implements the Xen versions of smp_ops. SMP under Xen is 5 * very straightforward. Bringing a CPU up is simply a matter of 6 * loading its initial context and setting it running. 7 * 8 * IPIs are handled through the Xen event mechanism. 9 * 10 * Because virtual CPUs can be scheduled onto any real CPU, there's no 11 * useful topology information for the kernel to make use of. As a 12 * result, all CPUs are treated as if they're single-core and 13 * single-threaded. 14 */ 15 #include <linux/sched.h> 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/smp.h> 19 #include <linux/irq_work.h> 20 #include <linux/tick.h> 21 #include <linux/nmi.h> 22 23 #include <asm/paravirt.h> 24 #include <asm/desc.h> 25 #include <asm/pgtable.h> 26 #include <asm/cpu.h> 27 28 #include <xen/interface/xen.h> 29 #include <xen/interface/vcpu.h> 30 #include <xen/interface/xenpmu.h> 31 32 #include <asm/xen/interface.h> 33 #include <asm/xen/hypercall.h> 34 35 #include <xen/xen.h> 36 #include <xen/page.h> 37 #include <xen/events.h> 38 39 #include <xen/hvc-console.h> 40 #include "xen-ops.h" 41 #include "mmu.h" 42 #include "smp.h" 43 #include "pmu.h" 44 45 cpumask_var_t xen_cpu_initialized_map; 46 47 struct xen_common_irq { 48 int irq; 49 char *name; 50 }; 51 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; 52 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; 53 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; 54 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; 55 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; 56 static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 }; 57 58 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 59 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 60 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); 61 62 /* 63 * Reschedule call back. 64 */ 65 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 66 { 67 inc_irq_stat(irq_resched_count); 68 scheduler_ipi(); 69 70 return IRQ_HANDLED; 71 } 72 73 static void cpu_bringup(void) 74 { 75 int cpu; 76 77 cpu_init(); 78 touch_softlockup_watchdog(); 79 preempt_disable(); 80 81 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ 82 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { 83 xen_enable_sysenter(); 84 xen_enable_syscall(); 85 } 86 cpu = smp_processor_id(); 87 smp_store_cpu_info(cpu); 88 cpu_data(cpu).x86_max_cores = 1; 89 set_cpu_sibling_map(cpu); 90 91 xen_setup_cpu_clockevents(); 92 93 notify_cpu_starting(cpu); 94 95 set_cpu_online(cpu, true); 96 97 cpu_set_state_online(cpu); /* Implies full memory barrier. */ 98 99 /* We can take interrupts now: we're officially "up". */ 100 local_irq_enable(); 101 } 102 103 asmlinkage __visible void cpu_bringup_and_idle(void) 104 { 105 cpu_bringup(); 106 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 107 } 108 109 void xen_smp_intr_free(unsigned int cpu) 110 { 111 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { 112 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); 113 per_cpu(xen_resched_irq, cpu).irq = -1; 114 kfree(per_cpu(xen_resched_irq, cpu).name); 115 per_cpu(xen_resched_irq, cpu).name = NULL; 116 } 117 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { 118 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); 119 per_cpu(xen_callfunc_irq, cpu).irq = -1; 120 kfree(per_cpu(xen_callfunc_irq, cpu).name); 121 per_cpu(xen_callfunc_irq, cpu).name = NULL; 122 } 123 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { 124 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); 125 per_cpu(xen_debug_irq, cpu).irq = -1; 126 kfree(per_cpu(xen_debug_irq, cpu).name); 127 per_cpu(xen_debug_irq, cpu).name = NULL; 128 } 129 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { 130 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, 131 NULL); 132 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; 133 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); 134 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; 135 } 136 if (xen_hvm_domain()) 137 return; 138 139 if (per_cpu(xen_irq_work, cpu).irq >= 0) { 140 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); 141 per_cpu(xen_irq_work, cpu).irq = -1; 142 kfree(per_cpu(xen_irq_work, cpu).name); 143 per_cpu(xen_irq_work, cpu).name = NULL; 144 } 145 146 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { 147 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); 148 per_cpu(xen_pmu_irq, cpu).irq = -1; 149 kfree(per_cpu(xen_pmu_irq, cpu).name); 150 per_cpu(xen_pmu_irq, cpu).name = NULL; 151 } 152 }; 153 int xen_smp_intr_init(unsigned int cpu) 154 { 155 int rc; 156 char *resched_name, *callfunc_name, *debug_name, *pmu_name; 157 158 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 159 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 160 cpu, 161 xen_reschedule_interrupt, 162 IRQF_PERCPU|IRQF_NOBALANCING, 163 resched_name, 164 NULL); 165 if (rc < 0) 166 goto fail; 167 per_cpu(xen_resched_irq, cpu).irq = rc; 168 per_cpu(xen_resched_irq, cpu).name = resched_name; 169 170 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 171 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 172 cpu, 173 xen_call_function_interrupt, 174 IRQF_PERCPU|IRQF_NOBALANCING, 175 callfunc_name, 176 NULL); 177 if (rc < 0) 178 goto fail; 179 per_cpu(xen_callfunc_irq, cpu).irq = rc; 180 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; 181 182 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 183 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 184 IRQF_PERCPU | IRQF_NOBALANCING, 185 debug_name, NULL); 186 if (rc < 0) 187 goto fail; 188 per_cpu(xen_debug_irq, cpu).irq = rc; 189 per_cpu(xen_debug_irq, cpu).name = debug_name; 190 191 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 192 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 193 cpu, 194 xen_call_function_single_interrupt, 195 IRQF_PERCPU|IRQF_NOBALANCING, 196 callfunc_name, 197 NULL); 198 if (rc < 0) 199 goto fail; 200 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; 201 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; 202 203 /* 204 * The IRQ worker on PVHVM goes through the native path and uses the 205 * IPI mechanism. 206 */ 207 if (xen_hvm_domain()) 208 return 0; 209 210 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 211 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 212 cpu, 213 xen_irq_work_interrupt, 214 IRQF_PERCPU|IRQF_NOBALANCING, 215 callfunc_name, 216 NULL); 217 if (rc < 0) 218 goto fail; 219 per_cpu(xen_irq_work, cpu).irq = rc; 220 per_cpu(xen_irq_work, cpu).name = callfunc_name; 221 222 if (is_xen_pmu(cpu)) { 223 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); 224 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, 225 xen_pmu_irq_handler, 226 IRQF_PERCPU|IRQF_NOBALANCING, 227 pmu_name, NULL); 228 if (rc < 0) 229 goto fail; 230 per_cpu(xen_pmu_irq, cpu).irq = rc; 231 per_cpu(xen_pmu_irq, cpu).name = pmu_name; 232 } 233 234 return 0; 235 236 fail: 237 xen_smp_intr_free(cpu); 238 return rc; 239 } 240 241 static void __init xen_fill_possible_map(void) 242 { 243 int i, rc; 244 245 if (xen_initial_domain()) 246 return; 247 248 for (i = 0; i < nr_cpu_ids; i++) { 249 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 250 if (rc >= 0) { 251 num_processors++; 252 set_cpu_possible(i, true); 253 } 254 } 255 } 256 257 static void __init xen_filter_cpu_maps(void) 258 { 259 int i, rc; 260 unsigned int subtract = 0; 261 262 if (!xen_initial_domain()) 263 return; 264 265 num_processors = 0; 266 disabled_cpus = 0; 267 for (i = 0; i < nr_cpu_ids; i++) { 268 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 269 if (rc >= 0) { 270 num_processors++; 271 set_cpu_possible(i, true); 272 } else { 273 set_cpu_possible(i, false); 274 set_cpu_present(i, false); 275 subtract++; 276 } 277 } 278 #ifdef CONFIG_HOTPLUG_CPU 279 /* This is akin to using 'nr_cpus' on the Linux command line. 280 * Which is OK as when we use 'dom0_max_vcpus=X' we can only 281 * have up to X, while nr_cpu_ids is greater than X. This 282 * normally is not a problem, except when CPU hotplugging 283 * is involved and then there might be more than X CPUs 284 * in the guest - which will not work as there is no 285 * hypercall to expand the max number of VCPUs an already 286 * running guest has. So cap it up to X. */ 287 if (subtract) 288 nr_cpu_ids = nr_cpu_ids - subtract; 289 #endif 290 291 } 292 293 static void __init xen_smp_prepare_boot_cpu(void) 294 { 295 BUG_ON(smp_processor_id() != 0); 296 native_smp_prepare_boot_cpu(); 297 298 if (xen_pv_domain()) { 299 if (!xen_feature(XENFEAT_writable_page_tables)) 300 /* We've switched to the "real" per-cpu gdt, so make 301 * sure the old memory can be recycled. */ 302 make_lowmem_page_readwrite(xen_initial_gdt); 303 304 #ifdef CONFIG_X86_32 305 /* 306 * Xen starts us with XEN_FLAT_RING1_DS, but linux code 307 * expects __USER_DS 308 */ 309 loadsegment(ds, __USER_DS); 310 loadsegment(es, __USER_DS); 311 #endif 312 313 xen_filter_cpu_maps(); 314 xen_setup_vcpu_info_placement(); 315 } 316 317 /* 318 * Setup vcpu_info for boot CPU. 319 */ 320 if (xen_hvm_domain()) 321 xen_vcpu_setup(0); 322 323 /* 324 * The alternative logic (which patches the unlock/lock) runs before 325 * the smp bootup up code is activated. Hence we need to set this up 326 * the core kernel is being patched. Otherwise we will have only 327 * modules patched but not core code. 328 */ 329 xen_init_spinlocks(); 330 } 331 332 static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 333 { 334 unsigned cpu; 335 unsigned int i; 336 337 if (skip_ioapic_setup) { 338 char *m = (max_cpus == 0) ? 339 "The nosmp parameter is incompatible with Xen; " \ 340 "use Xen dom0_max_vcpus=1 parameter" : 341 "The noapic parameter is incompatible with Xen"; 342 343 xen_raw_printk(m); 344 panic(m); 345 } 346 xen_init_lock_cpu(0); 347 348 smp_store_boot_cpu_info(); 349 cpu_data(0).x86_max_cores = 1; 350 351 for_each_possible_cpu(i) { 352 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 353 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 354 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 355 } 356 set_cpu_sibling_map(0); 357 358 xen_pmu_init(0); 359 360 if (xen_smp_intr_init(0)) 361 BUG(); 362 363 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 364 panic("could not allocate xen_cpu_initialized_map\n"); 365 366 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); 367 368 /* Restrict the possible_map according to max_cpus. */ 369 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 370 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 371 continue; 372 set_cpu_possible(cpu, false); 373 } 374 375 for_each_possible_cpu(cpu) 376 set_cpu_present(cpu, true); 377 } 378 379 static int 380 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 381 { 382 struct vcpu_guest_context *ctxt; 383 struct desc_struct *gdt; 384 unsigned long gdt_mfn; 385 386 /* used to tell cpu_init() that it can proceed with initialization */ 387 cpumask_set_cpu(cpu, cpu_callout_mask); 388 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 389 return 0; 390 391 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 392 if (ctxt == NULL) 393 return -ENOMEM; 394 395 gdt = get_cpu_gdt_table(cpu); 396 397 #ifdef CONFIG_X86_32 398 ctxt->user_regs.fs = __KERNEL_PERCPU; 399 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 400 #endif 401 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 402 403 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 404 ctxt->flags = VGCF_IN_KERNEL; 405 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 406 ctxt->user_regs.ds = __USER_DS; 407 ctxt->user_regs.es = __USER_DS; 408 ctxt->user_regs.ss = __KERNEL_DS; 409 410 xen_copy_trap_info(ctxt->trap_ctxt); 411 412 ctxt->ldt_ents = 0; 413 414 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 415 416 gdt_mfn = arbitrary_virt_to_mfn(gdt); 417 make_lowmem_page_readonly(gdt); 418 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); 419 420 ctxt->gdt_frames[0] = gdt_mfn; 421 ctxt->gdt_ents = GDT_ENTRIES; 422 423 ctxt->kernel_ss = __KERNEL_DS; 424 ctxt->kernel_sp = idle->thread.sp0; 425 426 #ifdef CONFIG_X86_32 427 ctxt->event_callback_cs = __KERNEL_CS; 428 ctxt->failsafe_callback_cs = __KERNEL_CS; 429 #else 430 ctxt->gs_base_kernel = per_cpu_offset(cpu); 431 #endif 432 ctxt->event_callback_eip = 433 (unsigned long)xen_hypervisor_callback; 434 ctxt->failsafe_callback_eip = 435 (unsigned long)xen_failsafe_callback; 436 ctxt->user_regs.cs = __KERNEL_CS; 437 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 438 439 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 440 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 441 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) 442 BUG(); 443 444 kfree(ctxt); 445 return 0; 446 } 447 448 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) 449 { 450 int rc; 451 452 common_cpu_up(cpu, idle); 453 454 xen_setup_runstate_info(cpu); 455 456 /* 457 * PV VCPUs are always successfully taken down (see 'while' loop 458 * in xen_cpu_die()), so -EBUSY is an error. 459 */ 460 rc = cpu_check_up_prepare(cpu); 461 if (rc) 462 return rc; 463 464 /* make sure interrupts start blocked */ 465 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 466 467 rc = cpu_initialize_context(cpu, idle); 468 if (rc) 469 return rc; 470 471 xen_pmu_init(cpu); 472 473 rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); 474 BUG_ON(rc); 475 476 while (cpu_report_state(cpu) != CPU_ONLINE) 477 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 478 479 return 0; 480 } 481 482 static void xen_smp_cpus_done(unsigned int max_cpus) 483 { 484 } 485 486 #ifdef CONFIG_HOTPLUG_CPU 487 static int xen_cpu_disable(void) 488 { 489 unsigned int cpu = smp_processor_id(); 490 if (cpu == 0) 491 return -EBUSY; 492 493 cpu_disable_common(); 494 495 load_cr3(swapper_pg_dir); 496 return 0; 497 } 498 499 static void xen_cpu_die(unsigned int cpu) 500 { 501 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, 502 xen_vcpu_nr(cpu), NULL)) { 503 __set_current_state(TASK_UNINTERRUPTIBLE); 504 schedule_timeout(HZ/10); 505 } 506 507 if (common_cpu_die(cpu) == 0) { 508 xen_smp_intr_free(cpu); 509 xen_uninit_lock_cpu(cpu); 510 xen_teardown_timer(cpu); 511 xen_pmu_finish(cpu); 512 } 513 } 514 515 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 516 { 517 play_dead_common(); 518 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); 519 cpu_bringup(); 520 /* 521 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) 522 * clears certain data that the cpu_idle loop (which called us 523 * and that we return from) expects. The only way to get that 524 * data back is to call: 525 */ 526 tick_nohz_idle_enter(); 527 528 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 529 } 530 531 #else /* !CONFIG_HOTPLUG_CPU */ 532 static int xen_cpu_disable(void) 533 { 534 return -ENOSYS; 535 } 536 537 static void xen_cpu_die(unsigned int cpu) 538 { 539 BUG(); 540 } 541 542 static void xen_play_dead(void) 543 { 544 BUG(); 545 } 546 547 #endif 548 static void stop_self(void *v) 549 { 550 int cpu = smp_processor_id(); 551 552 /* make sure we're not pinning something down */ 553 load_cr3(swapper_pg_dir); 554 /* should set up a minimal gdt */ 555 556 set_cpu_online(cpu, false); 557 558 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL); 559 BUG(); 560 } 561 562 static void xen_stop_other_cpus(int wait) 563 { 564 smp_call_function(stop_self, NULL, wait); 565 } 566 567 static void xen_smp_send_reschedule(int cpu) 568 { 569 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 570 } 571 572 static void __xen_send_IPI_mask(const struct cpumask *mask, 573 int vector) 574 { 575 unsigned cpu; 576 577 for_each_cpu_and(cpu, mask, cpu_online_mask) 578 xen_send_IPI_one(cpu, vector); 579 } 580 581 static void xen_smp_send_call_function_ipi(const struct cpumask *mask) 582 { 583 int cpu; 584 585 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 586 587 /* Make sure other vcpus get a chance to run if they need to. */ 588 for_each_cpu(cpu, mask) { 589 if (xen_vcpu_stolen(cpu)) { 590 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 591 break; 592 } 593 } 594 } 595 596 static void xen_smp_send_call_function_single_ipi(int cpu) 597 { 598 __xen_send_IPI_mask(cpumask_of(cpu), 599 XEN_CALL_FUNCTION_SINGLE_VECTOR); 600 } 601 602 static inline int xen_map_vector(int vector) 603 { 604 int xen_vector; 605 606 switch (vector) { 607 case RESCHEDULE_VECTOR: 608 xen_vector = XEN_RESCHEDULE_VECTOR; 609 break; 610 case CALL_FUNCTION_VECTOR: 611 xen_vector = XEN_CALL_FUNCTION_VECTOR; 612 break; 613 case CALL_FUNCTION_SINGLE_VECTOR: 614 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; 615 break; 616 case IRQ_WORK_VECTOR: 617 xen_vector = XEN_IRQ_WORK_VECTOR; 618 break; 619 #ifdef CONFIG_X86_64 620 case NMI_VECTOR: 621 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */ 622 xen_vector = XEN_NMI_VECTOR; 623 break; 624 #endif 625 default: 626 xen_vector = -1; 627 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 628 vector); 629 } 630 631 return xen_vector; 632 } 633 634 void xen_send_IPI_mask(const struct cpumask *mask, 635 int vector) 636 { 637 int xen_vector = xen_map_vector(vector); 638 639 if (xen_vector >= 0) 640 __xen_send_IPI_mask(mask, xen_vector); 641 } 642 643 void xen_send_IPI_all(int vector) 644 { 645 int xen_vector = xen_map_vector(vector); 646 647 if (xen_vector >= 0) 648 __xen_send_IPI_mask(cpu_online_mask, xen_vector); 649 } 650 651 void xen_send_IPI_self(int vector) 652 { 653 int xen_vector = xen_map_vector(vector); 654 655 if (xen_vector >= 0) 656 xen_send_IPI_one(smp_processor_id(), xen_vector); 657 } 658 659 void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 660 int vector) 661 { 662 unsigned cpu; 663 unsigned int this_cpu = smp_processor_id(); 664 int xen_vector = xen_map_vector(vector); 665 666 if (!(num_online_cpus() > 1) || (xen_vector < 0)) 667 return; 668 669 for_each_cpu_and(cpu, mask, cpu_online_mask) { 670 if (this_cpu == cpu) 671 continue; 672 673 xen_send_IPI_one(cpu, xen_vector); 674 } 675 } 676 677 void xen_send_IPI_allbutself(int vector) 678 { 679 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); 680 } 681 682 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 683 { 684 irq_enter(); 685 generic_smp_call_function_interrupt(); 686 inc_irq_stat(irq_call_count); 687 irq_exit(); 688 689 return IRQ_HANDLED; 690 } 691 692 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) 693 { 694 irq_enter(); 695 generic_smp_call_function_single_interrupt(); 696 inc_irq_stat(irq_call_count); 697 irq_exit(); 698 699 return IRQ_HANDLED; 700 } 701 702 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) 703 { 704 irq_enter(); 705 irq_work_run(); 706 inc_irq_stat(apic_irq_work_irqs); 707 irq_exit(); 708 709 return IRQ_HANDLED; 710 } 711 712 static const struct smp_ops xen_smp_ops __initconst = { 713 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 714 .smp_prepare_cpus = xen_smp_prepare_cpus, 715 .smp_cpus_done = xen_smp_cpus_done, 716 717 .cpu_up = xen_cpu_up, 718 .cpu_die = xen_cpu_die, 719 .cpu_disable = xen_cpu_disable, 720 .play_dead = xen_play_dead, 721 722 .stop_other_cpus = xen_stop_other_cpus, 723 .smp_send_reschedule = xen_smp_send_reschedule, 724 725 .send_call_func_ipi = xen_smp_send_call_function_ipi, 726 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, 727 }; 728 729 void __init xen_smp_init(void) 730 { 731 smp_ops = xen_smp_ops; 732 xen_fill_possible_map(); 733 } 734 735 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 736 { 737 native_smp_prepare_cpus(max_cpus); 738 WARN_ON(xen_smp_intr_init(0)); 739 740 xen_init_lock_cpu(0); 741 } 742 743 void __init xen_hvm_smp_init(void) 744 { 745 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 746 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 747 smp_ops.cpu_die = xen_cpu_die; 748 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 749 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 750 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; 751 } 752