1 /* 2 * KVM paravirt_ops implementation 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 19 * Copyright IBM Corporation, 2007 20 * Authors: Anthony Liguori <aliguori@us.ibm.com> 21 */ 22 23 #include <linux/module.h> 24 #include <linux/kernel.h> 25 #include <linux/kvm_para.h> 26 #include <linux/cpu.h> 27 #include <linux/mm.h> 28 #include <linux/highmem.h> 29 #include <linux/hardirq.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/hash.h> 33 #include <linux/sched.h> 34 #include <linux/slab.h> 35 #include <linux/kprobes.h> 36 #include <asm/timer.h> 37 #include <asm/cpu.h> 38 #include <asm/traps.h> 39 #include <asm/desc.h> 40 #include <asm/tlbflush.h> 41 #include <asm/idle.h> 42 #include <asm/apic.h> 43 #include <asm/apicdef.h> 44 #include <asm/hypervisor.h> 45 #include <asm/kvm_guest.h> 46 #include <asm/context_tracking.h> 47 48 static int kvmapf = 1; 49 50 static int parse_no_kvmapf(char *arg) 51 { 52 kvmapf = 0; 53 return 0; 54 } 55 56 early_param("no-kvmapf", parse_no_kvmapf); 57 58 static int steal_acc = 1; 59 static int parse_no_stealacc(char *arg) 60 { 61 steal_acc = 0; 62 return 0; 63 } 64 65 early_param("no-steal-acc", parse_no_stealacc); 66 67 static int kvmclock_vsyscall = 1; 68 static int parse_no_kvmclock_vsyscall(char *arg) 69 { 70 kvmclock_vsyscall = 0; 71 return 0; 72 } 73 74 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); 75 76 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 77 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); 78 static int has_steal_clock = 0; 79 80 /* 81 * No need for any "IO delay" on KVM 82 */ 83 static void kvm_io_delay(void) 84 { 85 } 86 87 #define KVM_TASK_SLEEP_HASHBITS 8 88 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) 89 90 struct kvm_task_sleep_node { 91 struct hlist_node link; 92 wait_queue_head_t wq; 93 u32 token; 94 int cpu; 95 bool halted; 96 }; 97 98 static struct kvm_task_sleep_head { 99 spinlock_t lock; 100 struct hlist_head list; 101 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; 102 103 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, 104 u32 token) 105 { 106 struct hlist_node *p; 107 108 hlist_for_each(p, &b->list) { 109 struct kvm_task_sleep_node *n = 110 hlist_entry(p, typeof(*n), link); 111 if (n->token == token) 112 return n; 113 } 114 115 return NULL; 116 } 117 118 void kvm_async_pf_task_wait(u32 token) 119 { 120 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 121 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 122 struct kvm_task_sleep_node n, *e; 123 DEFINE_WAIT(wait); 124 125 rcu_irq_enter(); 126 127 spin_lock(&b->lock); 128 e = _find_apf_task(b, token); 129 if (e) { 130 /* dummy entry exist -> wake up was delivered ahead of PF */ 131 hlist_del(&e->link); 132 kfree(e); 133 spin_unlock(&b->lock); 134 135 rcu_irq_exit(); 136 return; 137 } 138 139 n.token = token; 140 n.cpu = smp_processor_id(); 141 n.halted = is_idle_task(current) || preempt_count() > 1; 142 init_waitqueue_head(&n.wq); 143 hlist_add_head(&n.link, &b->list); 144 spin_unlock(&b->lock); 145 146 for (;;) { 147 if (!n.halted) 148 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 149 if (hlist_unhashed(&n.link)) 150 break; 151 152 if (!n.halted) { 153 local_irq_enable(); 154 schedule(); 155 local_irq_disable(); 156 } else { 157 /* 158 * We cannot reschedule. So halt. 159 */ 160 rcu_irq_exit(); 161 native_safe_halt(); 162 rcu_irq_enter(); 163 local_irq_disable(); 164 } 165 } 166 if (!n.halted) 167 finish_wait(&n.wq, &wait); 168 169 rcu_irq_exit(); 170 return; 171 } 172 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 173 174 static void apf_task_wake_one(struct kvm_task_sleep_node *n) 175 { 176 hlist_del_init(&n->link); 177 if (n->halted) 178 smp_send_reschedule(n->cpu); 179 else if (waitqueue_active(&n->wq)) 180 wake_up(&n->wq); 181 } 182 183 static void apf_task_wake_all(void) 184 { 185 int i; 186 187 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { 188 struct hlist_node *p, *next; 189 struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; 190 spin_lock(&b->lock); 191 hlist_for_each_safe(p, next, &b->list) { 192 struct kvm_task_sleep_node *n = 193 hlist_entry(p, typeof(*n), link); 194 if (n->cpu == smp_processor_id()) 195 apf_task_wake_one(n); 196 } 197 spin_unlock(&b->lock); 198 } 199 } 200 201 void kvm_async_pf_task_wake(u32 token) 202 { 203 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 204 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 205 struct kvm_task_sleep_node *n; 206 207 if (token == ~0) { 208 apf_task_wake_all(); 209 return; 210 } 211 212 again: 213 spin_lock(&b->lock); 214 n = _find_apf_task(b, token); 215 if (!n) { 216 /* 217 * async PF was not yet handled. 218 * Add dummy entry for the token. 219 */ 220 n = kzalloc(sizeof(*n), GFP_ATOMIC); 221 if (!n) { 222 /* 223 * Allocation failed! Busy wait while other cpu 224 * handles async PF. 225 */ 226 spin_unlock(&b->lock); 227 cpu_relax(); 228 goto again; 229 } 230 n->token = token; 231 n->cpu = smp_processor_id(); 232 init_waitqueue_head(&n->wq); 233 hlist_add_head(&n->link, &b->list); 234 } else 235 apf_task_wake_one(n); 236 spin_unlock(&b->lock); 237 return; 238 } 239 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); 240 241 u32 kvm_read_and_reset_pf_reason(void) 242 { 243 u32 reason = 0; 244 245 if (__get_cpu_var(apf_reason).enabled) { 246 reason = __get_cpu_var(apf_reason).reason; 247 __get_cpu_var(apf_reason).reason = 0; 248 } 249 250 return reason; 251 } 252 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 253 254 dotraplinkage void __kprobes 255 do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 256 { 257 switch (kvm_read_and_reset_pf_reason()) { 258 default: 259 do_page_fault(regs, error_code); 260 break; 261 case KVM_PV_REASON_PAGE_NOT_PRESENT: 262 /* page is swapped out by the host. */ 263 exception_enter(regs); 264 exit_idle(); 265 kvm_async_pf_task_wait((u32)read_cr2()); 266 exception_exit(regs); 267 break; 268 case KVM_PV_REASON_PAGE_READY: 269 rcu_irq_enter(); 270 exit_idle(); 271 kvm_async_pf_task_wake((u32)read_cr2()); 272 rcu_irq_exit(); 273 break; 274 } 275 } 276 277 static void __init paravirt_ops_setup(void) 278 { 279 pv_info.name = "KVM"; 280 pv_info.paravirt_enabled = 1; 281 282 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) 283 pv_cpu_ops.io_delay = kvm_io_delay; 284 285 #ifdef CONFIG_X86_IO_APIC 286 no_timer_check = 1; 287 #endif 288 } 289 290 static void kvm_register_steal_time(void) 291 { 292 int cpu = smp_processor_id(); 293 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); 294 295 if (!has_steal_clock) 296 return; 297 298 memset(st, 0, sizeof(*st)); 299 300 wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED)); 301 printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n", 302 cpu, __pa(st)); 303 } 304 305 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; 306 307 static void kvm_guest_apic_eoi_write(u32 reg, u32 val) 308 { 309 /** 310 * This relies on __test_and_clear_bit to modify the memory 311 * in a way that is atomic with respect to the local CPU. 312 * The hypervisor only accesses this memory from the local CPU so 313 * there's no need for lock or memory barriers. 314 * An optimization barrier is implied in apic write. 315 */ 316 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) 317 return; 318 apic_write(APIC_EOI, APIC_EOI_ACK); 319 } 320 321 void __cpuinit kvm_guest_cpu_init(void) 322 { 323 if (!kvm_para_available()) 324 return; 325 326 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 327 u64 pa = __pa(&__get_cpu_var(apf_reason)); 328 329 #ifdef CONFIG_PREEMPT 330 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 331 #endif 332 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); 333 __get_cpu_var(apf_reason).enabled = 1; 334 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 335 smp_processor_id()); 336 } 337 338 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { 339 unsigned long pa; 340 /* Size alignment is implied but just to make it explicit. */ 341 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 342 __get_cpu_var(kvm_apic_eoi) = 0; 343 pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED; 344 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 345 } 346 347 if (has_steal_clock) 348 kvm_register_steal_time(); 349 } 350 351 static void kvm_pv_disable_apf(void) 352 { 353 if (!__get_cpu_var(apf_reason).enabled) 354 return; 355 356 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 357 __get_cpu_var(apf_reason).enabled = 0; 358 359 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 360 smp_processor_id()); 361 } 362 363 static void kvm_pv_guest_cpu_reboot(void *unused) 364 { 365 /* 366 * We disable PV EOI before we load a new kernel by kexec, 367 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. 368 * New kernel can re-enable when it boots. 369 */ 370 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 371 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 372 kvm_pv_disable_apf(); 373 kvm_disable_steal_time(); 374 } 375 376 static int kvm_pv_reboot_notify(struct notifier_block *nb, 377 unsigned long code, void *unused) 378 { 379 if (code == SYS_RESTART) 380 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); 381 return NOTIFY_DONE; 382 } 383 384 static struct notifier_block kvm_pv_reboot_nb = { 385 .notifier_call = kvm_pv_reboot_notify, 386 }; 387 388 static u64 kvm_steal_clock(int cpu) 389 { 390 u64 steal; 391 struct kvm_steal_time *src; 392 int version; 393 394 src = &per_cpu(steal_time, cpu); 395 do { 396 version = src->version; 397 rmb(); 398 steal = src->steal; 399 rmb(); 400 } while ((version & 1) || (version != src->version)); 401 402 return steal; 403 } 404 405 void kvm_disable_steal_time(void) 406 { 407 if (!has_steal_clock) 408 return; 409 410 wrmsr(MSR_KVM_STEAL_TIME, 0, 0); 411 } 412 413 #ifdef CONFIG_SMP 414 static void __init kvm_smp_prepare_boot_cpu(void) 415 { 416 WARN_ON(kvm_register_clock("primary cpu clock")); 417 kvm_guest_cpu_init(); 418 native_smp_prepare_boot_cpu(); 419 } 420 421 static void __cpuinit kvm_guest_cpu_online(void *dummy) 422 { 423 kvm_guest_cpu_init(); 424 } 425 426 static void kvm_guest_cpu_offline(void *dummy) 427 { 428 kvm_disable_steal_time(); 429 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 430 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 431 kvm_pv_disable_apf(); 432 apf_task_wake_all(); 433 } 434 435 static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 436 unsigned long action, void *hcpu) 437 { 438 int cpu = (unsigned long)hcpu; 439 switch (action) { 440 case CPU_ONLINE: 441 case CPU_DOWN_FAILED: 442 case CPU_ONLINE_FROZEN: 443 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); 444 break; 445 case CPU_DOWN_PREPARE: 446 case CPU_DOWN_PREPARE_FROZEN: 447 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); 448 break; 449 default: 450 break; 451 } 452 return NOTIFY_OK; 453 } 454 455 static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 456 .notifier_call = kvm_cpu_notify, 457 }; 458 #endif 459 460 static void __init kvm_apf_trap_init(void) 461 { 462 set_intr_gate(14, &async_page_fault); 463 } 464 465 void __init kvm_guest_init(void) 466 { 467 int i; 468 469 if (!kvm_para_available()) 470 return; 471 472 paravirt_ops_setup(); 473 register_reboot_notifier(&kvm_pv_reboot_nb); 474 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) 475 spin_lock_init(&async_pf_sleepers[i].lock); 476 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) 477 x86_init.irqs.trap_init = kvm_apf_trap_init; 478 479 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 480 has_steal_clock = 1; 481 pv_time_ops.steal_clock = kvm_steal_clock; 482 } 483 484 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 485 apic_set_eoi_write(kvm_guest_apic_eoi_write); 486 487 if (kvmclock_vsyscall) 488 kvm_setup_vsyscall_timeinfo(); 489 490 #ifdef CONFIG_SMP 491 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 492 register_cpu_notifier(&kvm_cpu_notifier); 493 #else 494 kvm_guest_cpu_init(); 495 #endif 496 } 497 498 static bool __init kvm_detect(void) 499 { 500 if (!kvm_para_available()) 501 return false; 502 return true; 503 } 504 505 const struct hypervisor_x86 x86_hyper_kvm __refconst = { 506 .name = "KVM", 507 .detect = kvm_detect, 508 }; 509 EXPORT_SYMBOL_GPL(x86_hyper_kvm); 510 511 static __init int activate_jump_labels(void) 512 { 513 if (has_steal_clock) { 514 static_key_slow_inc(¶virt_steal_enabled); 515 if (steal_acc) 516 static_key_slow_inc(¶virt_steal_rq_enabled); 517 } 518 519 return 0; 520 } 521 arch_initcall(activate_jump_labels); 522