1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 #include <linux/fs.h> 27 #include <asm/cputable.h> 28 #include <asm/uaccess.h> 29 #include <asm/kvm_ppc.h> 30 31 32 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 33 { 34 return gfn; 35 } 36 37 int kvm_cpu_has_interrupt(struct kvm_vcpu *v) 38 { 39 return !!(v->arch.pending_exceptions); 40 } 41 42 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 43 { 44 return !(v->arch.msr & MSR_WE); 45 } 46 47 48 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 49 { 50 enum emulation_result er; 51 int r; 52 53 er = kvmppc_emulate_instruction(run, vcpu); 54 switch (er) { 55 case EMULATE_DONE: 56 /* Future optimization: only reload non-volatiles if they were 57 * actually modified. */ 58 r = RESUME_GUEST_NV; 59 break; 60 case EMULATE_DO_MMIO: 61 run->exit_reason = KVM_EXIT_MMIO; 62 /* We must reload nonvolatiles because "update" load/store 63 * instructions modify register state. */ 64 /* Future optimization: only reload non-volatiles if they were 65 * actually modified. */ 66 r = RESUME_HOST_NV; 67 break; 68 case EMULATE_FAIL: 69 /* XXX Deliver Program interrupt to guest. */ 70 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 71 vcpu->arch.last_inst); 72 r = RESUME_HOST; 73 break; 74 default: 75 BUG(); 76 } 77 78 return r; 79 } 80 81 void kvm_arch_hardware_enable(void *garbage) 82 { 83 } 84 85 void kvm_arch_hardware_disable(void *garbage) 86 { 87 } 88 89 int kvm_arch_hardware_setup(void) 90 { 91 return 0; 92 } 93 94 void kvm_arch_hardware_unsetup(void) 95 { 96 } 97 98 void kvm_arch_check_processor_compat(void *rtn) 99 { 100 int r; 101 102 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) 103 r = 0; 104 else 105 r = -ENOTSUPP; 106 107 *(int *)rtn = r; 108 } 109 110 struct kvm *kvm_arch_create_vm(void) 111 { 112 struct kvm *kvm; 113 114 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 115 if (!kvm) 116 return ERR_PTR(-ENOMEM); 117 118 return kvm; 119 } 120 121 static void kvmppc_free_vcpus(struct kvm *kvm) 122 { 123 unsigned int i; 124 125 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 126 if (kvm->vcpus[i]) { 127 kvm_arch_vcpu_free(kvm->vcpus[i]); 128 kvm->vcpus[i] = NULL; 129 } 130 } 131 } 132 133 void kvm_arch_destroy_vm(struct kvm *kvm) 134 { 135 kvmppc_free_vcpus(kvm); 136 kvm_free_physmem(kvm); 137 kfree(kvm); 138 } 139 140 int kvm_dev_ioctl_check_extension(long ext) 141 { 142 int r; 143 144 switch (ext) { 145 case KVM_CAP_USER_MEMORY: 146 r = 1; 147 break; 148 case KVM_CAP_COALESCED_MMIO: 149 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 150 break; 151 default: 152 r = 0; 153 break; 154 } 155 return r; 156 157 } 158 159 long kvm_arch_dev_ioctl(struct file *filp, 160 unsigned int ioctl, unsigned long arg) 161 { 162 return -EINVAL; 163 } 164 165 int kvm_arch_set_memory_region(struct kvm *kvm, 166 struct kvm_userspace_memory_region *mem, 167 struct kvm_memory_slot old, 168 int user_alloc) 169 { 170 return 0; 171 } 172 173 void kvm_arch_flush_shadow(struct kvm *kvm) 174 { 175 } 176 177 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 178 { 179 struct kvm_vcpu *vcpu; 180 int err; 181 182 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 183 if (!vcpu) { 184 err = -ENOMEM; 185 goto out; 186 } 187 188 err = kvm_vcpu_init(vcpu, kvm, id); 189 if (err) 190 goto free_vcpu; 191 192 return vcpu; 193 194 free_vcpu: 195 kmem_cache_free(kvm_vcpu_cache, vcpu); 196 out: 197 return ERR_PTR(err); 198 } 199 200 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 201 { 202 kvm_vcpu_uninit(vcpu); 203 kmem_cache_free(kvm_vcpu_cache, vcpu); 204 } 205 206 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 207 { 208 kvm_arch_vcpu_free(vcpu); 209 } 210 211 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 212 { 213 unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; 214 215 return test_bit(priority, &vcpu->arch.pending_exceptions); 216 } 217 218 static void kvmppc_decrementer_func(unsigned long data) 219 { 220 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 221 222 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); 223 224 if (waitqueue_active(&vcpu->wq)) { 225 wake_up_interruptible(&vcpu->wq); 226 vcpu->stat.halt_wakeup++; 227 } 228 } 229 230 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 231 { 232 setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, 233 (unsigned long)vcpu); 234 235 return 0; 236 } 237 238 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 239 { 240 } 241 242 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 243 { 244 } 245 246 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 247 { 248 } 249 250 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 251 struct kvm_debug_guest *dbg) 252 { 253 return -ENOTSUPP; 254 } 255 256 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 257 struct kvm_run *run) 258 { 259 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 260 *gpr = run->dcr.data; 261 } 262 263 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 264 struct kvm_run *run) 265 { 266 u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 267 268 if (run->mmio.len > sizeof(*gpr)) { 269 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 270 return; 271 } 272 273 if (vcpu->arch.mmio_is_bigendian) { 274 switch (run->mmio.len) { 275 case 4: *gpr = *(u32 *)run->mmio.data; break; 276 case 2: *gpr = *(u16 *)run->mmio.data; break; 277 case 1: *gpr = *(u8 *)run->mmio.data; break; 278 } 279 } else { 280 /* Convert BE data from userland back to LE. */ 281 switch (run->mmio.len) { 282 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 283 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 284 case 1: *gpr = *(u8 *)run->mmio.data; break; 285 } 286 } 287 } 288 289 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 290 unsigned int rt, unsigned int bytes, int is_bigendian) 291 { 292 if (bytes > sizeof(run->mmio.data)) { 293 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 294 run->mmio.len); 295 } 296 297 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 298 run->mmio.len = bytes; 299 run->mmio.is_write = 0; 300 301 vcpu->arch.io_gpr = rt; 302 vcpu->arch.mmio_is_bigendian = is_bigendian; 303 vcpu->mmio_needed = 1; 304 vcpu->mmio_is_write = 0; 305 306 return EMULATE_DO_MMIO; 307 } 308 309 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 310 u32 val, unsigned int bytes, int is_bigendian) 311 { 312 void *data = run->mmio.data; 313 314 if (bytes > sizeof(run->mmio.data)) { 315 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 316 run->mmio.len); 317 } 318 319 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 320 run->mmio.len = bytes; 321 run->mmio.is_write = 1; 322 vcpu->mmio_needed = 1; 323 vcpu->mmio_is_write = 1; 324 325 /* Store the value at the lowest bytes in 'data'. */ 326 if (is_bigendian) { 327 switch (bytes) { 328 case 4: *(u32 *)data = val; break; 329 case 2: *(u16 *)data = val; break; 330 case 1: *(u8 *)data = val; break; 331 } 332 } else { 333 /* Store LE value into 'data'. */ 334 switch (bytes) { 335 case 4: st_le32(data, val); break; 336 case 2: st_le16(data, val); break; 337 case 1: *(u8 *)data = val; break; 338 } 339 } 340 341 return EMULATE_DO_MMIO; 342 } 343 344 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 345 { 346 int r; 347 sigset_t sigsaved; 348 349 vcpu_load(vcpu); 350 351 if (vcpu->sigset_active) 352 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 353 354 if (vcpu->mmio_needed) { 355 if (!vcpu->mmio_is_write) 356 kvmppc_complete_mmio_load(vcpu, run); 357 vcpu->mmio_needed = 0; 358 } else if (vcpu->arch.dcr_needed) { 359 if (!vcpu->arch.dcr_is_write) 360 kvmppc_complete_dcr_load(vcpu, run); 361 vcpu->arch.dcr_needed = 0; 362 } 363 364 kvmppc_check_and_deliver_interrupts(vcpu); 365 366 local_irq_disable(); 367 kvm_guest_enter(); 368 r = __kvmppc_vcpu_run(run, vcpu); 369 kvm_guest_exit(); 370 local_irq_enable(); 371 372 if (vcpu->sigset_active) 373 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 374 375 vcpu_put(vcpu); 376 377 return r; 378 } 379 380 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 381 { 382 kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); 383 384 if (waitqueue_active(&vcpu->wq)) { 385 wake_up_interruptible(&vcpu->wq); 386 vcpu->stat.halt_wakeup++; 387 } 388 389 return 0; 390 } 391 392 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 393 struct kvm_mp_state *mp_state) 394 { 395 return -EINVAL; 396 } 397 398 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 399 struct kvm_mp_state *mp_state) 400 { 401 return -EINVAL; 402 } 403 404 long kvm_arch_vcpu_ioctl(struct file *filp, 405 unsigned int ioctl, unsigned long arg) 406 { 407 struct kvm_vcpu *vcpu = filp->private_data; 408 void __user *argp = (void __user *)arg; 409 long r; 410 411 switch (ioctl) { 412 case KVM_INTERRUPT: { 413 struct kvm_interrupt irq; 414 r = -EFAULT; 415 if (copy_from_user(&irq, argp, sizeof(irq))) 416 goto out; 417 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 418 break; 419 } 420 default: 421 r = -EINVAL; 422 } 423 424 out: 425 return r; 426 } 427 428 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 429 { 430 return -ENOTSUPP; 431 } 432 433 long kvm_arch_vm_ioctl(struct file *filp, 434 unsigned int ioctl, unsigned long arg) 435 { 436 long r; 437 438 switch (ioctl) { 439 default: 440 r = -EINVAL; 441 } 442 443 return r; 444 } 445 446 int kvm_arch_init(void *opaque) 447 { 448 return 0; 449 } 450 451 void kvm_arch_exit(void) 452 { 453 } 454