1 /* 2 * Xen SMP support 3 * 4 * This file implements the Xen versions of smp_ops. SMP under Xen is 5 * very straightforward. Bringing a CPU up is simply a matter of 6 * loading its initial context and setting it running. 7 * 8 * IPIs are handled through the Xen event mechanism. 9 * 10 * Because virtual CPUs can be scheduled onto any real CPU, there's no 11 * useful topology information for the kernel to make use of. As a 12 * result, all CPUs are treated as if they're single-core and 13 * single-threaded. 14 */ 15 #include <linux/sched.h> 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/smp.h> 19 #include <linux/irq_work.h> 20 #include <linux/tick.h> 21 22 #include <asm/paravirt.h> 23 #include <asm/desc.h> 24 #include <asm/pgtable.h> 25 #include <asm/cpu.h> 26 27 #include <xen/interface/xen.h> 28 #include <xen/interface/vcpu.h> 29 30 #include <asm/xen/interface.h> 31 #include <asm/xen/hypercall.h> 32 33 #include <xen/xen.h> 34 #include <xen/page.h> 35 #include <xen/events.h> 36 37 #include <xen/hvc-console.h> 38 #include "xen-ops.h" 39 #include "mmu.h" 40 41 cpumask_var_t xen_cpu_initialized_map; 42 43 struct xen_common_irq { 44 int irq; 45 char *name; 46 }; 47 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; 48 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; 49 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; 50 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; 51 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; 52 53 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 54 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 55 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); 56 57 /* 58 * Reschedule call back. 59 */ 60 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 61 { 62 inc_irq_stat(irq_resched_count); 63 scheduler_ipi(); 64 65 return IRQ_HANDLED; 66 } 67 68 static void cpu_bringup(void) 69 { 70 int cpu; 71 72 cpu_init(); 73 touch_softlockup_watchdog(); 74 preempt_disable(); 75 76 xen_enable_sysenter(); 77 xen_enable_syscall(); 78 79 cpu = smp_processor_id(); 80 smp_store_cpu_info(cpu); 81 cpu_data(cpu).x86_max_cores = 1; 82 set_cpu_sibling_map(cpu); 83 84 xen_setup_cpu_clockevents(); 85 86 notify_cpu_starting(cpu); 87 88 set_cpu_online(cpu, true); 89 90 this_cpu_write(cpu_state, CPU_ONLINE); 91 92 wmb(); 93 94 /* We can take interrupts now: we're officially "up". */ 95 local_irq_enable(); 96 97 wmb(); /* make sure everything is out */ 98 } 99 100 static void cpu_bringup_and_idle(void) 101 { 102 cpu_bringup(); 103 cpu_startup_entry(CPUHP_ONLINE); 104 } 105 106 static void xen_smp_intr_free(unsigned int cpu) 107 { 108 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { 109 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); 110 per_cpu(xen_resched_irq, cpu).irq = -1; 111 kfree(per_cpu(xen_resched_irq, cpu).name); 112 per_cpu(xen_resched_irq, cpu).name = NULL; 113 } 114 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { 115 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); 116 per_cpu(xen_callfunc_irq, cpu).irq = -1; 117 kfree(per_cpu(xen_callfunc_irq, cpu).name); 118 per_cpu(xen_callfunc_irq, cpu).name = NULL; 119 } 120 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { 121 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); 122 per_cpu(xen_debug_irq, cpu).irq = -1; 123 kfree(per_cpu(xen_debug_irq, cpu).name); 124 per_cpu(xen_debug_irq, cpu).name = NULL; 125 } 126 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { 127 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, 128 NULL); 129 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; 130 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); 131 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; 132 } 133 if (xen_hvm_domain()) 134 return; 135 136 if (per_cpu(xen_irq_work, cpu).irq >= 0) { 137 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); 138 per_cpu(xen_irq_work, cpu).irq = -1; 139 kfree(per_cpu(xen_irq_work, cpu).name); 140 per_cpu(xen_irq_work, cpu).name = NULL; 141 } 142 }; 143 static int xen_smp_intr_init(unsigned int cpu) 144 { 145 int rc; 146 char *resched_name, *callfunc_name, *debug_name; 147 148 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 150 cpu, 151 xen_reschedule_interrupt, 152 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 153 resched_name, 154 NULL); 155 if (rc < 0) 156 goto fail; 157 per_cpu(xen_resched_irq, cpu).irq = rc; 158 per_cpu(xen_resched_irq, cpu).name = resched_name; 159 160 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 162 cpu, 163 xen_call_function_interrupt, 164 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 165 callfunc_name, 166 NULL); 167 if (rc < 0) 168 goto fail; 169 per_cpu(xen_callfunc_irq, cpu).irq = rc; 170 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; 171 172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 174 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, 175 debug_name, NULL); 176 if (rc < 0) 177 goto fail; 178 per_cpu(xen_debug_irq, cpu).irq = rc; 179 per_cpu(xen_debug_irq, cpu).name = debug_name; 180 181 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 183 cpu, 184 xen_call_function_single_interrupt, 185 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 186 callfunc_name, 187 NULL); 188 if (rc < 0) 189 goto fail; 190 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; 191 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; 192 193 /* 194 * The IRQ worker on PVHVM goes through the native path and uses the 195 * IPI mechanism. 196 */ 197 if (xen_hvm_domain()) 198 return 0; 199 200 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 201 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 202 cpu, 203 xen_irq_work_interrupt, 204 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 205 callfunc_name, 206 NULL); 207 if (rc < 0) 208 goto fail; 209 per_cpu(xen_irq_work, cpu).irq = rc; 210 per_cpu(xen_irq_work, cpu).name = callfunc_name; 211 212 return 0; 213 214 fail: 215 xen_smp_intr_free(cpu); 216 return rc; 217 } 218 219 static void __init xen_fill_possible_map(void) 220 { 221 int i, rc; 222 223 if (xen_initial_domain()) 224 return; 225 226 for (i = 0; i < nr_cpu_ids; i++) { 227 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 228 if (rc >= 0) { 229 num_processors++; 230 set_cpu_possible(i, true); 231 } 232 } 233 } 234 235 static void __init xen_filter_cpu_maps(void) 236 { 237 int i, rc; 238 unsigned int subtract = 0; 239 240 if (!xen_initial_domain()) 241 return; 242 243 num_processors = 0; 244 disabled_cpus = 0; 245 for (i = 0; i < nr_cpu_ids; i++) { 246 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 247 if (rc >= 0) { 248 num_processors++; 249 set_cpu_possible(i, true); 250 } else { 251 set_cpu_possible(i, false); 252 set_cpu_present(i, false); 253 subtract++; 254 } 255 } 256 #ifdef CONFIG_HOTPLUG_CPU 257 /* This is akin to using 'nr_cpus' on the Linux command line. 258 * Which is OK as when we use 'dom0_max_vcpus=X' we can only 259 * have up to X, while nr_cpu_ids is greater than X. This 260 * normally is not a problem, except when CPU hotplugging 261 * is involved and then there might be more than X CPUs 262 * in the guest - which will not work as there is no 263 * hypercall to expand the max number of VCPUs an already 264 * running guest has. So cap it up to X. */ 265 if (subtract) 266 nr_cpu_ids = nr_cpu_ids - subtract; 267 #endif 268 269 } 270 271 static void __init xen_smp_prepare_boot_cpu(void) 272 { 273 BUG_ON(smp_processor_id() != 0); 274 native_smp_prepare_boot_cpu(); 275 276 /* We've switched to the "real" per-cpu gdt, so make sure the 277 old memory can be recycled */ 278 make_lowmem_page_readwrite(xen_initial_gdt); 279 280 xen_filter_cpu_maps(); 281 xen_setup_vcpu_info_placement(); 282 } 283 284 static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 285 { 286 unsigned cpu; 287 unsigned int i; 288 289 if (skip_ioapic_setup) { 290 char *m = (max_cpus == 0) ? 291 "The nosmp parameter is incompatible with Xen; " \ 292 "use Xen dom0_max_vcpus=1 parameter" : 293 "The noapic parameter is incompatible with Xen"; 294 295 xen_raw_printk(m); 296 panic(m); 297 } 298 xen_init_lock_cpu(0); 299 300 smp_store_boot_cpu_info(); 301 cpu_data(0).x86_max_cores = 1; 302 303 for_each_possible_cpu(i) { 304 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 305 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 306 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 307 } 308 set_cpu_sibling_map(0); 309 310 if (xen_smp_intr_init(0)) 311 BUG(); 312 313 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 314 panic("could not allocate xen_cpu_initialized_map\n"); 315 316 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); 317 318 /* Restrict the possible_map according to max_cpus. */ 319 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 320 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 321 continue; 322 set_cpu_possible(cpu, false); 323 } 324 325 for_each_possible_cpu(cpu) 326 set_cpu_present(cpu, true); 327 } 328 329 static int 330 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 331 { 332 struct vcpu_guest_context *ctxt; 333 struct desc_struct *gdt; 334 unsigned long gdt_mfn; 335 336 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 337 return 0; 338 339 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 340 if (ctxt == NULL) 341 return -ENOMEM; 342 343 gdt = get_cpu_gdt_table(cpu); 344 345 ctxt->flags = VGCF_IN_KERNEL; 346 ctxt->user_regs.ss = __KERNEL_DS; 347 #ifdef CONFIG_X86_32 348 ctxt->user_regs.fs = __KERNEL_PERCPU; 349 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 350 #else 351 ctxt->gs_base_kernel = per_cpu_offset(cpu); 352 #endif 353 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 354 355 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 356 357 { 358 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 359 ctxt->user_regs.ds = __USER_DS; 360 ctxt->user_regs.es = __USER_DS; 361 362 xen_copy_trap_info(ctxt->trap_ctxt); 363 364 ctxt->ldt_ents = 0; 365 366 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 367 368 gdt_mfn = arbitrary_virt_to_mfn(gdt); 369 make_lowmem_page_readonly(gdt); 370 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); 371 372 ctxt->gdt_frames[0] = gdt_mfn; 373 ctxt->gdt_ents = GDT_ENTRIES; 374 375 ctxt->kernel_ss = __KERNEL_DS; 376 ctxt->kernel_sp = idle->thread.sp0; 377 378 #ifdef CONFIG_X86_32 379 ctxt->event_callback_cs = __KERNEL_CS; 380 ctxt->failsafe_callback_cs = __KERNEL_CS; 381 #endif 382 ctxt->event_callback_eip = 383 (unsigned long)xen_hypervisor_callback; 384 ctxt->failsafe_callback_eip = 385 (unsigned long)xen_failsafe_callback; 386 } 387 ctxt->user_regs.cs = __KERNEL_CS; 388 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 389 390 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 391 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 392 393 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 394 BUG(); 395 396 kfree(ctxt); 397 return 0; 398 } 399 400 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) 401 { 402 int rc; 403 404 per_cpu(current_task, cpu) = idle; 405 #ifdef CONFIG_X86_32 406 irq_ctx_init(cpu); 407 #else 408 clear_tsk_thread_flag(idle, TIF_FORK); 409 per_cpu(kernel_stack, cpu) = 410 (unsigned long)task_stack_page(idle) - 411 KERNEL_STACK_OFFSET + THREAD_SIZE; 412 #endif 413 xen_setup_runstate_info(cpu); 414 xen_setup_timer(cpu); 415 xen_init_lock_cpu(cpu); 416 417 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 418 419 /* make sure interrupts start blocked */ 420 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 421 422 rc = cpu_initialize_context(cpu, idle); 423 if (rc) 424 return rc; 425 426 if (num_online_cpus() == 1) 427 /* Just in case we booted with a single CPU. */ 428 alternatives_enable_smp(); 429 430 rc = xen_smp_intr_init(cpu); 431 if (rc) 432 return rc; 433 434 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 435 BUG_ON(rc); 436 437 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 438 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 439 barrier(); 440 } 441 442 return 0; 443 } 444 445 static void xen_smp_cpus_done(unsigned int max_cpus) 446 { 447 } 448 449 #ifdef CONFIG_HOTPLUG_CPU 450 static int xen_cpu_disable(void) 451 { 452 unsigned int cpu = smp_processor_id(); 453 if (cpu == 0) 454 return -EBUSY; 455 456 cpu_disable_common(); 457 458 load_cr3(swapper_pg_dir); 459 return 0; 460 } 461 462 static void xen_cpu_die(unsigned int cpu) 463 { 464 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 465 current->state = TASK_UNINTERRUPTIBLE; 466 schedule_timeout(HZ/10); 467 } 468 xen_smp_intr_free(cpu); 469 xen_uninit_lock_cpu(cpu); 470 xen_teardown_timer(cpu); 471 } 472 473 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 474 { 475 play_dead_common(); 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 477 cpu_bringup(); 478 /* 479 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) 480 * clears certain data that the cpu_idle loop (which called us 481 * and that we return from) expects. The only way to get that 482 * data back is to call: 483 */ 484 tick_nohz_idle_enter(); 485 } 486 487 #else /* !CONFIG_HOTPLUG_CPU */ 488 static int xen_cpu_disable(void) 489 { 490 return -ENOSYS; 491 } 492 493 static void xen_cpu_die(unsigned int cpu) 494 { 495 BUG(); 496 } 497 498 static void xen_play_dead(void) 499 { 500 BUG(); 501 } 502 503 #endif 504 static void stop_self(void *v) 505 { 506 int cpu = smp_processor_id(); 507 508 /* make sure we're not pinning something down */ 509 load_cr3(swapper_pg_dir); 510 /* should set up a minimal gdt */ 511 512 set_cpu_online(cpu, false); 513 514 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); 515 BUG(); 516 } 517 518 static void xen_stop_other_cpus(int wait) 519 { 520 smp_call_function(stop_self, NULL, wait); 521 } 522 523 static void xen_smp_send_reschedule(int cpu) 524 { 525 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 526 } 527 528 static void __xen_send_IPI_mask(const struct cpumask *mask, 529 int vector) 530 { 531 unsigned cpu; 532 533 for_each_cpu_and(cpu, mask, cpu_online_mask) 534 xen_send_IPI_one(cpu, vector); 535 } 536 537 static void xen_smp_send_call_function_ipi(const struct cpumask *mask) 538 { 539 int cpu; 540 541 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 542 543 /* Make sure other vcpus get a chance to run if they need to. */ 544 for_each_cpu(cpu, mask) { 545 if (xen_vcpu_stolen(cpu)) { 546 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 547 break; 548 } 549 } 550 } 551 552 static void xen_smp_send_call_function_single_ipi(int cpu) 553 { 554 __xen_send_IPI_mask(cpumask_of(cpu), 555 XEN_CALL_FUNCTION_SINGLE_VECTOR); 556 } 557 558 static inline int xen_map_vector(int vector) 559 { 560 int xen_vector; 561 562 switch (vector) { 563 case RESCHEDULE_VECTOR: 564 xen_vector = XEN_RESCHEDULE_VECTOR; 565 break; 566 case CALL_FUNCTION_VECTOR: 567 xen_vector = XEN_CALL_FUNCTION_VECTOR; 568 break; 569 case CALL_FUNCTION_SINGLE_VECTOR: 570 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; 571 break; 572 case IRQ_WORK_VECTOR: 573 xen_vector = XEN_IRQ_WORK_VECTOR; 574 break; 575 default: 576 xen_vector = -1; 577 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 578 vector); 579 } 580 581 return xen_vector; 582 } 583 584 void xen_send_IPI_mask(const struct cpumask *mask, 585 int vector) 586 { 587 int xen_vector = xen_map_vector(vector); 588 589 if (xen_vector >= 0) 590 __xen_send_IPI_mask(mask, xen_vector); 591 } 592 593 void xen_send_IPI_all(int vector) 594 { 595 int xen_vector = xen_map_vector(vector); 596 597 if (xen_vector >= 0) 598 __xen_send_IPI_mask(cpu_online_mask, xen_vector); 599 } 600 601 void xen_send_IPI_self(int vector) 602 { 603 int xen_vector = xen_map_vector(vector); 604 605 if (xen_vector >= 0) 606 xen_send_IPI_one(smp_processor_id(), xen_vector); 607 } 608 609 void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 610 int vector) 611 { 612 unsigned cpu; 613 unsigned int this_cpu = smp_processor_id(); 614 int xen_vector = xen_map_vector(vector); 615 616 if (!(num_online_cpus() > 1) || (xen_vector < 0)) 617 return; 618 619 for_each_cpu_and(cpu, mask, cpu_online_mask) { 620 if (this_cpu == cpu) 621 continue; 622 623 xen_send_IPI_one(cpu, xen_vector); 624 } 625 } 626 627 void xen_send_IPI_allbutself(int vector) 628 { 629 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); 630 } 631 632 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 633 { 634 irq_enter(); 635 generic_smp_call_function_interrupt(); 636 inc_irq_stat(irq_call_count); 637 irq_exit(); 638 639 return IRQ_HANDLED; 640 } 641 642 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) 643 { 644 irq_enter(); 645 generic_smp_call_function_single_interrupt(); 646 inc_irq_stat(irq_call_count); 647 irq_exit(); 648 649 return IRQ_HANDLED; 650 } 651 652 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) 653 { 654 irq_enter(); 655 irq_work_run(); 656 inc_irq_stat(apic_irq_work_irqs); 657 irq_exit(); 658 659 return IRQ_HANDLED; 660 } 661 662 static const struct smp_ops xen_smp_ops __initconst = { 663 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 664 .smp_prepare_cpus = xen_smp_prepare_cpus, 665 .smp_cpus_done = xen_smp_cpus_done, 666 667 .cpu_up = xen_cpu_up, 668 .cpu_die = xen_cpu_die, 669 .cpu_disable = xen_cpu_disable, 670 .play_dead = xen_play_dead, 671 672 .stop_other_cpus = xen_stop_other_cpus, 673 .smp_send_reschedule = xen_smp_send_reschedule, 674 675 .send_call_func_ipi = xen_smp_send_call_function_ipi, 676 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, 677 }; 678 679 void __init xen_smp_init(void) 680 { 681 smp_ops = xen_smp_ops; 682 xen_fill_possible_map(); 683 xen_init_spinlocks(); 684 } 685 686 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 687 { 688 native_smp_prepare_cpus(max_cpus); 689 WARN_ON(xen_smp_intr_init(0)); 690 691 xen_init_lock_cpu(0); 692 } 693 694 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 695 { 696 int rc; 697 rc = native_cpu_up(cpu, tidle); 698 WARN_ON (xen_smp_intr_init(cpu)); 699 return rc; 700 } 701 702 static void xen_hvm_cpu_die(unsigned int cpu) 703 { 704 xen_cpu_die(cpu); 705 native_cpu_die(cpu); 706 } 707 708 void __init xen_hvm_smp_init(void) 709 { 710 if (!xen_have_vector_callback) 711 return; 712 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 713 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 714 smp_ops.cpu_up = xen_hvm_cpu_up; 715 smp_ops.cpu_die = xen_hvm_cpu_die; 716 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 717 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 718 } 719