1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 #include <linux/fs.h> 27 #include <asm/cputable.h> 28 #include <asm/uaccess.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/tlbflush.h> 31 #include "timing.h" 32 #include "../mm/mmu_decl.h" 33 34 #define CREATE_TRACE_POINTS 35 #include "trace.h" 36 37 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 38 { 39 return gfn; 40 } 41 42 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 43 { 44 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 45 } 46 47 48 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 49 { 50 enum emulation_result er; 51 int r; 52 53 er = kvmppc_emulate_instruction(run, vcpu); 54 switch (er) { 55 case EMULATE_DONE: 56 /* Future optimization: only reload non-volatiles if they were 57 * actually modified. */ 58 r = RESUME_GUEST_NV; 59 break; 60 case EMULATE_DO_MMIO: 61 run->exit_reason = KVM_EXIT_MMIO; 62 /* We must reload nonvolatiles because "update" load/store 63 * instructions modify register state. */ 64 /* Future optimization: only reload non-volatiles if they were 65 * actually modified. */ 66 r = RESUME_HOST_NV; 67 break; 68 case EMULATE_FAIL: 69 /* XXX Deliver Program interrupt to guest. */ 70 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 71 vcpu->arch.last_inst); 72 r = RESUME_HOST; 73 break; 74 default: 75 BUG(); 76 } 77 78 return r; 79 } 80 81 void kvm_arch_hardware_enable(void *garbage) 82 { 83 } 84 85 void kvm_arch_hardware_disable(void *garbage) 86 { 87 } 88 89 int kvm_arch_hardware_setup(void) 90 { 91 return 0; 92 } 93 94 void kvm_arch_hardware_unsetup(void) 95 { 96 } 97 98 void kvm_arch_check_processor_compat(void *rtn) 99 { 100 *(int *)rtn = kvmppc_core_check_processor_compat(); 101 } 102 103 struct kvm *kvm_arch_create_vm(void) 104 { 105 struct kvm *kvm; 106 107 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 108 if (!kvm) 109 return ERR_PTR(-ENOMEM); 110 111 return kvm; 112 } 113 114 static void kvmppc_free_vcpus(struct kvm *kvm) 115 { 116 unsigned int i; 117 struct kvm_vcpu *vcpu; 118 119 kvm_for_each_vcpu(i, vcpu, kvm) 120 kvm_arch_vcpu_free(vcpu); 121 122 mutex_lock(&kvm->lock); 123 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 124 kvm->vcpus[i] = NULL; 125 126 atomic_set(&kvm->online_vcpus, 0); 127 mutex_unlock(&kvm->lock); 128 } 129 130 void kvm_arch_sync_events(struct kvm *kvm) 131 { 132 } 133 134 void kvm_arch_destroy_vm(struct kvm *kvm) 135 { 136 kvmppc_free_vcpus(kvm); 137 kvm_free_physmem(kvm); 138 kfree(kvm); 139 } 140 141 int kvm_dev_ioctl_check_extension(long ext) 142 { 143 int r; 144 145 switch (ext) { 146 case KVM_CAP_COALESCED_MMIO: 147 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 148 break; 149 default: 150 r = 0; 151 break; 152 } 153 return r; 154 155 } 156 157 long kvm_arch_dev_ioctl(struct file *filp, 158 unsigned int ioctl, unsigned long arg) 159 { 160 return -EINVAL; 161 } 162 163 int kvm_arch_set_memory_region(struct kvm *kvm, 164 struct kvm_userspace_memory_region *mem, 165 struct kvm_memory_slot old, 166 int user_alloc) 167 { 168 return 0; 169 } 170 171 void kvm_arch_flush_shadow(struct kvm *kvm) 172 { 173 } 174 175 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 176 { 177 struct kvm_vcpu *vcpu; 178 vcpu = kvmppc_core_vcpu_create(kvm, id); 179 kvmppc_create_vcpu_debugfs(vcpu, id); 180 return vcpu; 181 } 182 183 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 184 { 185 kvmppc_remove_vcpu_debugfs(vcpu); 186 kvmppc_core_vcpu_free(vcpu); 187 } 188 189 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 190 { 191 kvm_arch_vcpu_free(vcpu); 192 } 193 194 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 195 { 196 return kvmppc_core_pending_dec(vcpu); 197 } 198 199 static void kvmppc_decrementer_func(unsigned long data) 200 { 201 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 202 203 kvmppc_core_queue_dec(vcpu); 204 205 if (waitqueue_active(&vcpu->wq)) { 206 wake_up_interruptible(&vcpu->wq); 207 vcpu->stat.halt_wakeup++; 208 } 209 } 210 211 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 212 { 213 setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, 214 (unsigned long)vcpu); 215 216 return 0; 217 } 218 219 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 220 { 221 kvmppc_mmu_destroy(vcpu); 222 } 223 224 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 225 { 226 kvmppc_core_vcpu_load(vcpu, cpu); 227 } 228 229 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 230 { 231 kvmppc_core_vcpu_put(vcpu); 232 } 233 234 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 235 struct kvm_guest_debug *dbg) 236 { 237 return -EINVAL; 238 } 239 240 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 241 struct kvm_run *run) 242 { 243 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 244 *gpr = run->dcr.data; 245 } 246 247 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 248 struct kvm_run *run) 249 { 250 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 251 252 if (run->mmio.len > sizeof(*gpr)) { 253 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 254 return; 255 } 256 257 if (vcpu->arch.mmio_is_bigendian) { 258 switch (run->mmio.len) { 259 case 4: *gpr = *(u32 *)run->mmio.data; break; 260 case 2: *gpr = *(u16 *)run->mmio.data; break; 261 case 1: *gpr = *(u8 *)run->mmio.data; break; 262 } 263 } else { 264 /* Convert BE data from userland back to LE. */ 265 switch (run->mmio.len) { 266 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 267 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 268 case 1: *gpr = *(u8 *)run->mmio.data; break; 269 } 270 } 271 } 272 273 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 274 unsigned int rt, unsigned int bytes, int is_bigendian) 275 { 276 if (bytes > sizeof(run->mmio.data)) { 277 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 278 run->mmio.len); 279 } 280 281 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 282 run->mmio.len = bytes; 283 run->mmio.is_write = 0; 284 285 vcpu->arch.io_gpr = rt; 286 vcpu->arch.mmio_is_bigendian = is_bigendian; 287 vcpu->mmio_needed = 1; 288 vcpu->mmio_is_write = 0; 289 290 return EMULATE_DO_MMIO; 291 } 292 293 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 294 u32 val, unsigned int bytes, int is_bigendian) 295 { 296 void *data = run->mmio.data; 297 298 if (bytes > sizeof(run->mmio.data)) { 299 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 300 run->mmio.len); 301 } 302 303 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 304 run->mmio.len = bytes; 305 run->mmio.is_write = 1; 306 vcpu->mmio_needed = 1; 307 vcpu->mmio_is_write = 1; 308 309 /* Store the value at the lowest bytes in 'data'. */ 310 if (is_bigendian) { 311 switch (bytes) { 312 case 4: *(u32 *)data = val; break; 313 case 2: *(u16 *)data = val; break; 314 case 1: *(u8 *)data = val; break; 315 } 316 } else { 317 /* Store LE value into 'data'. */ 318 switch (bytes) { 319 case 4: st_le32(data, val); break; 320 case 2: st_le16(data, val); break; 321 case 1: *(u8 *)data = val; break; 322 } 323 } 324 325 return EMULATE_DO_MMIO; 326 } 327 328 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 329 { 330 int r; 331 sigset_t sigsaved; 332 333 vcpu_load(vcpu); 334 335 if (vcpu->sigset_active) 336 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 337 338 if (vcpu->mmio_needed) { 339 if (!vcpu->mmio_is_write) 340 kvmppc_complete_mmio_load(vcpu, run); 341 vcpu->mmio_needed = 0; 342 } else if (vcpu->arch.dcr_needed) { 343 if (!vcpu->arch.dcr_is_write) 344 kvmppc_complete_dcr_load(vcpu, run); 345 vcpu->arch.dcr_needed = 0; 346 } 347 348 kvmppc_core_deliver_interrupts(vcpu); 349 350 local_irq_disable(); 351 kvm_guest_enter(); 352 r = __kvmppc_vcpu_run(run, vcpu); 353 kvm_guest_exit(); 354 local_irq_enable(); 355 356 if (vcpu->sigset_active) 357 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 358 359 vcpu_put(vcpu); 360 361 return r; 362 } 363 364 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 365 { 366 kvmppc_core_queue_external(vcpu, irq); 367 368 if (waitqueue_active(&vcpu->wq)) { 369 wake_up_interruptible(&vcpu->wq); 370 vcpu->stat.halt_wakeup++; 371 } 372 373 return 0; 374 } 375 376 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 377 struct kvm_mp_state *mp_state) 378 { 379 return -EINVAL; 380 } 381 382 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 383 struct kvm_mp_state *mp_state) 384 { 385 return -EINVAL; 386 } 387 388 long kvm_arch_vcpu_ioctl(struct file *filp, 389 unsigned int ioctl, unsigned long arg) 390 { 391 struct kvm_vcpu *vcpu = filp->private_data; 392 void __user *argp = (void __user *)arg; 393 long r; 394 395 switch (ioctl) { 396 case KVM_INTERRUPT: { 397 struct kvm_interrupt irq; 398 r = -EFAULT; 399 if (copy_from_user(&irq, argp, sizeof(irq))) 400 goto out; 401 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 402 break; 403 } 404 default: 405 r = -EINVAL; 406 } 407 408 out: 409 return r; 410 } 411 412 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 413 { 414 return -ENOTSUPP; 415 } 416 417 long kvm_arch_vm_ioctl(struct file *filp, 418 unsigned int ioctl, unsigned long arg) 419 { 420 long r; 421 422 switch (ioctl) { 423 default: 424 r = -EINVAL; 425 } 426 427 return r; 428 } 429 430 int kvm_arch_init(void *opaque) 431 { 432 return 0; 433 } 434 435 void kvm_arch_exit(void) 436 { 437 } 438