1*fcf5ef2aSThomas Huth /* 2*fcf5ef2aSThomas Huth * This file is subject to the terms and conditions of the GNU General Public 3*fcf5ef2aSThomas Huth * License. See the file "COPYING" in the main directory of this archive 4*fcf5ef2aSThomas Huth * for more details. 5*fcf5ef2aSThomas Huth * 6*fcf5ef2aSThomas Huth * KVM/MIPS: MIPS specific KVM APIs 7*fcf5ef2aSThomas Huth * 8*fcf5ef2aSThomas Huth * Copyright (C) 2012-2014 Imagination Technologies Ltd. 9*fcf5ef2aSThomas Huth * Authors: Sanjay Lal <sanjayl@kymasys.com> 10*fcf5ef2aSThomas Huth */ 11*fcf5ef2aSThomas Huth 12*fcf5ef2aSThomas Huth #include "qemu/osdep.h" 13*fcf5ef2aSThomas Huth #include <sys/ioctl.h> 14*fcf5ef2aSThomas Huth 15*fcf5ef2aSThomas Huth #include <linux/kvm.h> 16*fcf5ef2aSThomas Huth 17*fcf5ef2aSThomas Huth #include "qemu-common.h" 18*fcf5ef2aSThomas Huth #include "cpu.h" 19*fcf5ef2aSThomas Huth #include "qemu/error-report.h" 20*fcf5ef2aSThomas Huth #include "qemu/timer.h" 21*fcf5ef2aSThomas Huth #include "sysemu/sysemu.h" 22*fcf5ef2aSThomas Huth #include "sysemu/kvm.h" 23*fcf5ef2aSThomas Huth #include "sysemu/cpus.h" 24*fcf5ef2aSThomas Huth #include "kvm_mips.h" 25*fcf5ef2aSThomas Huth #include "exec/memattrs.h" 26*fcf5ef2aSThomas Huth 27*fcf5ef2aSThomas Huth #define DEBUG_KVM 0 28*fcf5ef2aSThomas Huth 29*fcf5ef2aSThomas Huth #define DPRINTF(fmt, ...) \ 30*fcf5ef2aSThomas Huth do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) 31*fcf5ef2aSThomas Huth 32*fcf5ef2aSThomas Huth static int kvm_mips_fpu_cap; 33*fcf5ef2aSThomas Huth static int kvm_mips_msa_cap; 34*fcf5ef2aSThomas Huth 35*fcf5ef2aSThomas Huth const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 36*fcf5ef2aSThomas Huth KVM_CAP_LAST_INFO 37*fcf5ef2aSThomas Huth }; 38*fcf5ef2aSThomas Huth 39*fcf5ef2aSThomas Huth static void kvm_mips_update_state(void *opaque, int running, RunState state); 40*fcf5ef2aSThomas Huth 41*fcf5ef2aSThomas Huth unsigned long kvm_arch_vcpu_id(CPUState *cs) 42*fcf5ef2aSThomas Huth { 43*fcf5ef2aSThomas Huth return cs->cpu_index; 44*fcf5ef2aSThomas Huth } 45*fcf5ef2aSThomas Huth 46*fcf5ef2aSThomas Huth int kvm_arch_init(MachineState *ms, KVMState *s) 47*fcf5ef2aSThomas Huth { 48*fcf5ef2aSThomas Huth /* MIPS has 128 signals */ 49*fcf5ef2aSThomas Huth kvm_set_sigmask_len(s, 16); 50*fcf5ef2aSThomas Huth 51*fcf5ef2aSThomas Huth kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); 52*fcf5ef2aSThomas Huth kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); 53*fcf5ef2aSThomas Huth 54*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 55*fcf5ef2aSThomas Huth return 0; 56*fcf5ef2aSThomas Huth } 57*fcf5ef2aSThomas Huth 58*fcf5ef2aSThomas Huth int kvm_arch_init_vcpu(CPUState *cs) 59*fcf5ef2aSThomas Huth { 60*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 61*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 62*fcf5ef2aSThomas Huth int ret = 0; 63*fcf5ef2aSThomas Huth 64*fcf5ef2aSThomas Huth qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); 65*fcf5ef2aSThomas Huth 66*fcf5ef2aSThomas Huth if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 67*fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); 68*fcf5ef2aSThomas Huth if (ret < 0) { 69*fcf5ef2aSThomas Huth /* mark unsupported so it gets disabled on reset */ 70*fcf5ef2aSThomas Huth kvm_mips_fpu_cap = 0; 71*fcf5ef2aSThomas Huth ret = 0; 72*fcf5ef2aSThomas Huth } 73*fcf5ef2aSThomas Huth } 74*fcf5ef2aSThomas Huth 75*fcf5ef2aSThomas Huth if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 76*fcf5ef2aSThomas Huth ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); 77*fcf5ef2aSThomas Huth if (ret < 0) { 78*fcf5ef2aSThomas Huth /* mark unsupported so it gets disabled on reset */ 79*fcf5ef2aSThomas Huth kvm_mips_msa_cap = 0; 80*fcf5ef2aSThomas Huth ret = 0; 81*fcf5ef2aSThomas Huth } 82*fcf5ef2aSThomas Huth } 83*fcf5ef2aSThomas Huth 84*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 85*fcf5ef2aSThomas Huth return ret; 86*fcf5ef2aSThomas Huth } 87*fcf5ef2aSThomas Huth 88*fcf5ef2aSThomas Huth void kvm_mips_reset_vcpu(MIPSCPU *cpu) 89*fcf5ef2aSThomas Huth { 90*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 91*fcf5ef2aSThomas Huth 92*fcf5ef2aSThomas Huth if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { 93*fcf5ef2aSThomas Huth fprintf(stderr, "Warning: KVM does not support FPU, disabling\n"); 94*fcf5ef2aSThomas Huth env->CP0_Config1 &= ~(1 << CP0C1_FP); 95*fcf5ef2aSThomas Huth } 96*fcf5ef2aSThomas Huth if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { 97*fcf5ef2aSThomas Huth fprintf(stderr, "Warning: KVM does not support MSA, disabling\n"); 98*fcf5ef2aSThomas Huth env->CP0_Config3 &= ~(1 << CP0C3_MSAP); 99*fcf5ef2aSThomas Huth } 100*fcf5ef2aSThomas Huth 101*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 102*fcf5ef2aSThomas Huth } 103*fcf5ef2aSThomas Huth 104*fcf5ef2aSThomas Huth int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 105*fcf5ef2aSThomas Huth { 106*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 107*fcf5ef2aSThomas Huth return 0; 108*fcf5ef2aSThomas Huth } 109*fcf5ef2aSThomas Huth 110*fcf5ef2aSThomas Huth int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 111*fcf5ef2aSThomas Huth { 112*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 113*fcf5ef2aSThomas Huth return 0; 114*fcf5ef2aSThomas Huth } 115*fcf5ef2aSThomas Huth 116*fcf5ef2aSThomas Huth static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) 117*fcf5ef2aSThomas Huth { 118*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 119*fcf5ef2aSThomas Huth 120*fcf5ef2aSThomas Huth return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); 121*fcf5ef2aSThomas Huth } 122*fcf5ef2aSThomas Huth 123*fcf5ef2aSThomas Huth 124*fcf5ef2aSThomas Huth void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 125*fcf5ef2aSThomas Huth { 126*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 127*fcf5ef2aSThomas Huth int r; 128*fcf5ef2aSThomas Huth struct kvm_mips_interrupt intr; 129*fcf5ef2aSThomas Huth 130*fcf5ef2aSThomas Huth qemu_mutex_lock_iothread(); 131*fcf5ef2aSThomas Huth 132*fcf5ef2aSThomas Huth if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 133*fcf5ef2aSThomas Huth cpu_mips_io_interrupts_pending(cpu)) { 134*fcf5ef2aSThomas Huth intr.cpu = -1; 135*fcf5ef2aSThomas Huth intr.irq = 2; 136*fcf5ef2aSThomas Huth r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 137*fcf5ef2aSThomas Huth if (r < 0) { 138*fcf5ef2aSThomas Huth error_report("%s: cpu %d: failed to inject IRQ %x", 139*fcf5ef2aSThomas Huth __func__, cs->cpu_index, intr.irq); 140*fcf5ef2aSThomas Huth } 141*fcf5ef2aSThomas Huth } 142*fcf5ef2aSThomas Huth 143*fcf5ef2aSThomas Huth qemu_mutex_unlock_iothread(); 144*fcf5ef2aSThomas Huth } 145*fcf5ef2aSThomas Huth 146*fcf5ef2aSThomas Huth MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 147*fcf5ef2aSThomas Huth { 148*fcf5ef2aSThomas Huth return MEMTXATTRS_UNSPECIFIED; 149*fcf5ef2aSThomas Huth } 150*fcf5ef2aSThomas Huth 151*fcf5ef2aSThomas Huth int kvm_arch_process_async_events(CPUState *cs) 152*fcf5ef2aSThomas Huth { 153*fcf5ef2aSThomas Huth return cs->halted; 154*fcf5ef2aSThomas Huth } 155*fcf5ef2aSThomas Huth 156*fcf5ef2aSThomas Huth int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 157*fcf5ef2aSThomas Huth { 158*fcf5ef2aSThomas Huth int ret; 159*fcf5ef2aSThomas Huth 160*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 161*fcf5ef2aSThomas Huth switch (run->exit_reason) { 162*fcf5ef2aSThomas Huth default: 163*fcf5ef2aSThomas Huth error_report("%s: unknown exit reason %d", 164*fcf5ef2aSThomas Huth __func__, run->exit_reason); 165*fcf5ef2aSThomas Huth ret = -1; 166*fcf5ef2aSThomas Huth break; 167*fcf5ef2aSThomas Huth } 168*fcf5ef2aSThomas Huth 169*fcf5ef2aSThomas Huth return ret; 170*fcf5ef2aSThomas Huth } 171*fcf5ef2aSThomas Huth 172*fcf5ef2aSThomas Huth bool kvm_arch_stop_on_emulation_error(CPUState *cs) 173*fcf5ef2aSThomas Huth { 174*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 175*fcf5ef2aSThomas Huth return true; 176*fcf5ef2aSThomas Huth } 177*fcf5ef2aSThomas Huth 178*fcf5ef2aSThomas Huth int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr) 179*fcf5ef2aSThomas Huth { 180*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 181*fcf5ef2aSThomas Huth return 1; 182*fcf5ef2aSThomas Huth } 183*fcf5ef2aSThomas Huth 184*fcf5ef2aSThomas Huth int kvm_arch_on_sigbus(int code, void *addr) 185*fcf5ef2aSThomas Huth { 186*fcf5ef2aSThomas Huth DPRINTF("%s\n", __func__); 187*fcf5ef2aSThomas Huth return 1; 188*fcf5ef2aSThomas Huth } 189*fcf5ef2aSThomas Huth 190*fcf5ef2aSThomas Huth void kvm_arch_init_irq_routing(KVMState *s) 191*fcf5ef2aSThomas Huth { 192*fcf5ef2aSThomas Huth } 193*fcf5ef2aSThomas Huth 194*fcf5ef2aSThomas Huth int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) 195*fcf5ef2aSThomas Huth { 196*fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu); 197*fcf5ef2aSThomas Huth struct kvm_mips_interrupt intr; 198*fcf5ef2aSThomas Huth 199*fcf5ef2aSThomas Huth if (!kvm_enabled()) { 200*fcf5ef2aSThomas Huth return 0; 201*fcf5ef2aSThomas Huth } 202*fcf5ef2aSThomas Huth 203*fcf5ef2aSThomas Huth intr.cpu = -1; 204*fcf5ef2aSThomas Huth 205*fcf5ef2aSThomas Huth if (level) { 206*fcf5ef2aSThomas Huth intr.irq = irq; 207*fcf5ef2aSThomas Huth } else { 208*fcf5ef2aSThomas Huth intr.irq = -irq; 209*fcf5ef2aSThomas Huth } 210*fcf5ef2aSThomas Huth 211*fcf5ef2aSThomas Huth kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 212*fcf5ef2aSThomas Huth 213*fcf5ef2aSThomas Huth return 0; 214*fcf5ef2aSThomas Huth } 215*fcf5ef2aSThomas Huth 216*fcf5ef2aSThomas Huth int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) 217*fcf5ef2aSThomas Huth { 218*fcf5ef2aSThomas Huth CPUState *cs = current_cpu; 219*fcf5ef2aSThomas Huth CPUState *dest_cs = CPU(cpu); 220*fcf5ef2aSThomas Huth struct kvm_mips_interrupt intr; 221*fcf5ef2aSThomas Huth 222*fcf5ef2aSThomas Huth if (!kvm_enabled()) { 223*fcf5ef2aSThomas Huth return 0; 224*fcf5ef2aSThomas Huth } 225*fcf5ef2aSThomas Huth 226*fcf5ef2aSThomas Huth intr.cpu = dest_cs->cpu_index; 227*fcf5ef2aSThomas Huth 228*fcf5ef2aSThomas Huth if (level) { 229*fcf5ef2aSThomas Huth intr.irq = irq; 230*fcf5ef2aSThomas Huth } else { 231*fcf5ef2aSThomas Huth intr.irq = -irq; 232*fcf5ef2aSThomas Huth } 233*fcf5ef2aSThomas Huth 234*fcf5ef2aSThomas Huth DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); 235*fcf5ef2aSThomas Huth 236*fcf5ef2aSThomas Huth kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); 237*fcf5ef2aSThomas Huth 238*fcf5ef2aSThomas Huth return 0; 239*fcf5ef2aSThomas Huth } 240*fcf5ef2aSThomas Huth 241*fcf5ef2aSThomas Huth #define MIPS_CP0_32(_R, _S) \ 242*fcf5ef2aSThomas Huth (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 243*fcf5ef2aSThomas Huth 244*fcf5ef2aSThomas Huth #define MIPS_CP0_64(_R, _S) \ 245*fcf5ef2aSThomas Huth (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 246*fcf5ef2aSThomas Huth 247*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 248*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 249*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 250*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 251*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 252*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 253*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 254*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 255*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 256*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 257*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 258*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 259*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 260*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 261*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 262*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 263*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 264*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 265*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 266*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 267*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 268*fcf5ef2aSThomas Huth 269*fcf5ef2aSThomas Huth static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, 270*fcf5ef2aSThomas Huth int32_t *addr) 271*fcf5ef2aSThomas Huth { 272*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 273*fcf5ef2aSThomas Huth .id = reg_id, 274*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 275*fcf5ef2aSThomas Huth }; 276*fcf5ef2aSThomas Huth 277*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 278*fcf5ef2aSThomas Huth } 279*fcf5ef2aSThomas Huth 280*fcf5ef2aSThomas Huth static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, 281*fcf5ef2aSThomas Huth uint32_t *addr) 282*fcf5ef2aSThomas Huth { 283*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 284*fcf5ef2aSThomas Huth .id = reg_id, 285*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 286*fcf5ef2aSThomas Huth }; 287*fcf5ef2aSThomas Huth 288*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 289*fcf5ef2aSThomas Huth } 290*fcf5ef2aSThomas Huth 291*fcf5ef2aSThomas Huth static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, 292*fcf5ef2aSThomas Huth target_ulong *addr) 293*fcf5ef2aSThomas Huth { 294*fcf5ef2aSThomas Huth uint64_t val64 = *addr; 295*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 296*fcf5ef2aSThomas Huth .id = reg_id, 297*fcf5ef2aSThomas Huth .addr = (uintptr_t)&val64 298*fcf5ef2aSThomas Huth }; 299*fcf5ef2aSThomas Huth 300*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 301*fcf5ef2aSThomas Huth } 302*fcf5ef2aSThomas Huth 303*fcf5ef2aSThomas Huth static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, 304*fcf5ef2aSThomas Huth int64_t *addr) 305*fcf5ef2aSThomas Huth { 306*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 307*fcf5ef2aSThomas Huth .id = reg_id, 308*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 309*fcf5ef2aSThomas Huth }; 310*fcf5ef2aSThomas Huth 311*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 312*fcf5ef2aSThomas Huth } 313*fcf5ef2aSThomas Huth 314*fcf5ef2aSThomas Huth static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, 315*fcf5ef2aSThomas Huth uint64_t *addr) 316*fcf5ef2aSThomas Huth { 317*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 318*fcf5ef2aSThomas Huth .id = reg_id, 319*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 320*fcf5ef2aSThomas Huth }; 321*fcf5ef2aSThomas Huth 322*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); 323*fcf5ef2aSThomas Huth } 324*fcf5ef2aSThomas Huth 325*fcf5ef2aSThomas Huth static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, 326*fcf5ef2aSThomas Huth int32_t *addr) 327*fcf5ef2aSThomas Huth { 328*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 329*fcf5ef2aSThomas Huth .id = reg_id, 330*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 331*fcf5ef2aSThomas Huth }; 332*fcf5ef2aSThomas Huth 333*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 334*fcf5ef2aSThomas Huth } 335*fcf5ef2aSThomas Huth 336*fcf5ef2aSThomas Huth static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, 337*fcf5ef2aSThomas Huth uint32_t *addr) 338*fcf5ef2aSThomas Huth { 339*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 340*fcf5ef2aSThomas Huth .id = reg_id, 341*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 342*fcf5ef2aSThomas Huth }; 343*fcf5ef2aSThomas Huth 344*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 345*fcf5ef2aSThomas Huth } 346*fcf5ef2aSThomas Huth 347*fcf5ef2aSThomas Huth static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, 348*fcf5ef2aSThomas Huth target_ulong *addr) 349*fcf5ef2aSThomas Huth { 350*fcf5ef2aSThomas Huth int ret; 351*fcf5ef2aSThomas Huth uint64_t val64 = 0; 352*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 353*fcf5ef2aSThomas Huth .id = reg_id, 354*fcf5ef2aSThomas Huth .addr = (uintptr_t)&val64 355*fcf5ef2aSThomas Huth }; 356*fcf5ef2aSThomas Huth 357*fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 358*fcf5ef2aSThomas Huth if (ret >= 0) { 359*fcf5ef2aSThomas Huth *addr = val64; 360*fcf5ef2aSThomas Huth } 361*fcf5ef2aSThomas Huth return ret; 362*fcf5ef2aSThomas Huth } 363*fcf5ef2aSThomas Huth 364*fcf5ef2aSThomas Huth static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, 365*fcf5ef2aSThomas Huth int64_t *addr) 366*fcf5ef2aSThomas Huth { 367*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 368*fcf5ef2aSThomas Huth .id = reg_id, 369*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 370*fcf5ef2aSThomas Huth }; 371*fcf5ef2aSThomas Huth 372*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 373*fcf5ef2aSThomas Huth } 374*fcf5ef2aSThomas Huth 375*fcf5ef2aSThomas Huth static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, 376*fcf5ef2aSThomas Huth uint64_t *addr) 377*fcf5ef2aSThomas Huth { 378*fcf5ef2aSThomas Huth struct kvm_one_reg cp0reg = { 379*fcf5ef2aSThomas Huth .id = reg_id, 380*fcf5ef2aSThomas Huth .addr = (uintptr_t)addr 381*fcf5ef2aSThomas Huth }; 382*fcf5ef2aSThomas Huth 383*fcf5ef2aSThomas Huth return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); 384*fcf5ef2aSThomas Huth } 385*fcf5ef2aSThomas Huth 386*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) 387*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ 388*fcf5ef2aSThomas Huth (1U << CP0C1_FP)) 389*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) 390*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ 391*fcf5ef2aSThomas Huth (1U << CP0C3_MSAP)) 392*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) 393*fcf5ef2aSThomas Huth #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ 394*fcf5ef2aSThomas Huth (1U << CP0C5_UFE) | \ 395*fcf5ef2aSThomas Huth (1U << CP0C5_FRE) | \ 396*fcf5ef2aSThomas Huth (1U << CP0C5_UFR)) 397*fcf5ef2aSThomas Huth 398*fcf5ef2aSThomas Huth static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, 399*fcf5ef2aSThomas Huth int32_t *addr, int32_t mask) 400*fcf5ef2aSThomas Huth { 401*fcf5ef2aSThomas Huth int err; 402*fcf5ef2aSThomas Huth int32_t tmp, change; 403*fcf5ef2aSThomas Huth 404*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, reg_id, &tmp); 405*fcf5ef2aSThomas Huth if (err < 0) { 406*fcf5ef2aSThomas Huth return err; 407*fcf5ef2aSThomas Huth } 408*fcf5ef2aSThomas Huth 409*fcf5ef2aSThomas Huth /* only change bits in mask */ 410*fcf5ef2aSThomas Huth change = (*addr ^ tmp) & mask; 411*fcf5ef2aSThomas Huth if (!change) { 412*fcf5ef2aSThomas Huth return 0; 413*fcf5ef2aSThomas Huth } 414*fcf5ef2aSThomas Huth 415*fcf5ef2aSThomas Huth tmp = tmp ^ change; 416*fcf5ef2aSThomas Huth return kvm_mips_put_one_reg(cs, reg_id, &tmp); 417*fcf5ef2aSThomas Huth } 418*fcf5ef2aSThomas Huth 419*fcf5ef2aSThomas Huth /* 420*fcf5ef2aSThomas Huth * We freeze the KVM timer when either the VM clock is stopped or the state is 421*fcf5ef2aSThomas Huth * saved (the state is dirty). 422*fcf5ef2aSThomas Huth */ 423*fcf5ef2aSThomas Huth 424*fcf5ef2aSThomas Huth /* 425*fcf5ef2aSThomas Huth * Save the state of the KVM timer when VM clock is stopped or state is synced 426*fcf5ef2aSThomas Huth * to QEMU. 427*fcf5ef2aSThomas Huth */ 428*fcf5ef2aSThomas Huth static int kvm_mips_save_count(CPUState *cs) 429*fcf5ef2aSThomas Huth { 430*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 431*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 432*fcf5ef2aSThomas Huth uint64_t count_ctl; 433*fcf5ef2aSThomas Huth int err, ret = 0; 434*fcf5ef2aSThomas Huth 435*fcf5ef2aSThomas Huth /* freeze KVM timer */ 436*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 437*fcf5ef2aSThomas Huth if (err < 0) { 438*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); 439*fcf5ef2aSThomas Huth ret = err; 440*fcf5ef2aSThomas Huth } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 441*fcf5ef2aSThomas Huth count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 442*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 443*fcf5ef2aSThomas Huth if (err < 0) { 444*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 445*fcf5ef2aSThomas Huth ret = err; 446*fcf5ef2aSThomas Huth } 447*fcf5ef2aSThomas Huth } 448*fcf5ef2aSThomas Huth 449*fcf5ef2aSThomas Huth /* read CP0_Cause */ 450*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 451*fcf5ef2aSThomas Huth if (err < 0) { 452*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); 453*fcf5ef2aSThomas Huth ret = err; 454*fcf5ef2aSThomas Huth } 455*fcf5ef2aSThomas Huth 456*fcf5ef2aSThomas Huth /* read CP0_Count */ 457*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 458*fcf5ef2aSThomas Huth if (err < 0) { 459*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); 460*fcf5ef2aSThomas Huth ret = err; 461*fcf5ef2aSThomas Huth } 462*fcf5ef2aSThomas Huth 463*fcf5ef2aSThomas Huth return ret; 464*fcf5ef2aSThomas Huth } 465*fcf5ef2aSThomas Huth 466*fcf5ef2aSThomas Huth /* 467*fcf5ef2aSThomas Huth * Restore the state of the KVM timer when VM clock is restarted or state is 468*fcf5ef2aSThomas Huth * synced to KVM. 469*fcf5ef2aSThomas Huth */ 470*fcf5ef2aSThomas Huth static int kvm_mips_restore_count(CPUState *cs) 471*fcf5ef2aSThomas Huth { 472*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 473*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 474*fcf5ef2aSThomas Huth uint64_t count_ctl; 475*fcf5ef2aSThomas Huth int err_dc, err, ret = 0; 476*fcf5ef2aSThomas Huth 477*fcf5ef2aSThomas Huth /* check the timer is frozen */ 478*fcf5ef2aSThomas Huth err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 479*fcf5ef2aSThomas Huth if (err_dc < 0) { 480*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); 481*fcf5ef2aSThomas Huth ret = err_dc; 482*fcf5ef2aSThomas Huth } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { 483*fcf5ef2aSThomas Huth /* freeze timer (sets COUNT_RESUME for us) */ 484*fcf5ef2aSThomas Huth count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; 485*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 486*fcf5ef2aSThomas Huth if (err < 0) { 487*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); 488*fcf5ef2aSThomas Huth ret = err; 489*fcf5ef2aSThomas Huth } 490*fcf5ef2aSThomas Huth } 491*fcf5ef2aSThomas Huth 492*fcf5ef2aSThomas Huth /* load CP0_Cause */ 493*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); 494*fcf5ef2aSThomas Huth if (err < 0) { 495*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); 496*fcf5ef2aSThomas Huth ret = err; 497*fcf5ef2aSThomas Huth } 498*fcf5ef2aSThomas Huth 499*fcf5ef2aSThomas Huth /* load CP0_Count */ 500*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); 501*fcf5ef2aSThomas Huth if (err < 0) { 502*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); 503*fcf5ef2aSThomas Huth ret = err; 504*fcf5ef2aSThomas Huth } 505*fcf5ef2aSThomas Huth 506*fcf5ef2aSThomas Huth /* resume KVM timer */ 507*fcf5ef2aSThomas Huth if (err_dc >= 0) { 508*fcf5ef2aSThomas Huth count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; 509*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); 510*fcf5ef2aSThomas Huth if (err < 0) { 511*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); 512*fcf5ef2aSThomas Huth ret = err; 513*fcf5ef2aSThomas Huth } 514*fcf5ef2aSThomas Huth } 515*fcf5ef2aSThomas Huth 516*fcf5ef2aSThomas Huth return ret; 517*fcf5ef2aSThomas Huth } 518*fcf5ef2aSThomas Huth 519*fcf5ef2aSThomas Huth /* 520*fcf5ef2aSThomas Huth * Handle the VM clock being started or stopped 521*fcf5ef2aSThomas Huth */ 522*fcf5ef2aSThomas Huth static void kvm_mips_update_state(void *opaque, int running, RunState state) 523*fcf5ef2aSThomas Huth { 524*fcf5ef2aSThomas Huth CPUState *cs = opaque; 525*fcf5ef2aSThomas Huth int ret; 526*fcf5ef2aSThomas Huth uint64_t count_resume; 527*fcf5ef2aSThomas Huth 528*fcf5ef2aSThomas Huth /* 529*fcf5ef2aSThomas Huth * If state is already dirty (synced to QEMU) then the KVM timer state is 530*fcf5ef2aSThomas Huth * already saved and can be restored when it is synced back to KVM. 531*fcf5ef2aSThomas Huth */ 532*fcf5ef2aSThomas Huth if (!running) { 533*fcf5ef2aSThomas Huth if (!cs->kvm_vcpu_dirty) { 534*fcf5ef2aSThomas Huth ret = kvm_mips_save_count(cs); 535*fcf5ef2aSThomas Huth if (ret < 0) { 536*fcf5ef2aSThomas Huth fprintf(stderr, "Failed saving count\n"); 537*fcf5ef2aSThomas Huth } 538*fcf5ef2aSThomas Huth } 539*fcf5ef2aSThomas Huth } else { 540*fcf5ef2aSThomas Huth /* Set clock restore time to now */ 541*fcf5ef2aSThomas Huth count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 542*fcf5ef2aSThomas Huth ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, 543*fcf5ef2aSThomas Huth &count_resume); 544*fcf5ef2aSThomas Huth if (ret < 0) { 545*fcf5ef2aSThomas Huth fprintf(stderr, "Failed setting COUNT_RESUME\n"); 546*fcf5ef2aSThomas Huth return; 547*fcf5ef2aSThomas Huth } 548*fcf5ef2aSThomas Huth 549*fcf5ef2aSThomas Huth if (!cs->kvm_vcpu_dirty) { 550*fcf5ef2aSThomas Huth ret = kvm_mips_restore_count(cs); 551*fcf5ef2aSThomas Huth if (ret < 0) { 552*fcf5ef2aSThomas Huth fprintf(stderr, "Failed restoring count\n"); 553*fcf5ef2aSThomas Huth } 554*fcf5ef2aSThomas Huth } 555*fcf5ef2aSThomas Huth } 556*fcf5ef2aSThomas Huth } 557*fcf5ef2aSThomas Huth 558*fcf5ef2aSThomas Huth static int kvm_mips_put_fpu_registers(CPUState *cs, int level) 559*fcf5ef2aSThomas Huth { 560*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 561*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 562*fcf5ef2aSThomas Huth int err, ret = 0; 563*fcf5ef2aSThomas Huth unsigned int i; 564*fcf5ef2aSThomas Huth 565*fcf5ef2aSThomas Huth /* Only put FPU state if we're emulating a CPU with an FPU */ 566*fcf5ef2aSThomas Huth if (env->CP0_Config1 & (1 << CP0C1_FP)) { 567*fcf5ef2aSThomas Huth /* FPU Control Registers */ 568*fcf5ef2aSThomas Huth if (level == KVM_PUT_FULL_STATE) { 569*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 570*fcf5ef2aSThomas Huth &env->active_fpu.fcr0); 571*fcf5ef2aSThomas Huth if (err < 0) { 572*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); 573*fcf5ef2aSThomas Huth ret = err; 574*fcf5ef2aSThomas Huth } 575*fcf5ef2aSThomas Huth } 576*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 577*fcf5ef2aSThomas Huth &env->active_fpu.fcr31); 578*fcf5ef2aSThomas Huth if (err < 0) { 579*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); 580*fcf5ef2aSThomas Huth ret = err; 581*fcf5ef2aSThomas Huth } 582*fcf5ef2aSThomas Huth 583*fcf5ef2aSThomas Huth /* 584*fcf5ef2aSThomas Huth * FPU register state is a subset of MSA vector state, so don't put FPU 585*fcf5ef2aSThomas Huth * registers if we're emulating a CPU with MSA. 586*fcf5ef2aSThomas Huth */ 587*fcf5ef2aSThomas Huth if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 588*fcf5ef2aSThomas Huth /* Floating point registers */ 589*fcf5ef2aSThomas Huth for (i = 0; i < 32; ++i) { 590*fcf5ef2aSThomas Huth if (env->CP0_Status & (1 << CP0St_FR)) { 591*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 592*fcf5ef2aSThomas Huth &env->active_fpu.fpr[i].d); 593*fcf5ef2aSThomas Huth } else { 594*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 595*fcf5ef2aSThomas Huth &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 596*fcf5ef2aSThomas Huth } 597*fcf5ef2aSThomas Huth if (err < 0) { 598*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); 599*fcf5ef2aSThomas Huth ret = err; 600*fcf5ef2aSThomas Huth } 601*fcf5ef2aSThomas Huth } 602*fcf5ef2aSThomas Huth } 603*fcf5ef2aSThomas Huth } 604*fcf5ef2aSThomas Huth 605*fcf5ef2aSThomas Huth /* Only put MSA state if we're emulating a CPU with MSA */ 606*fcf5ef2aSThomas Huth if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 607*fcf5ef2aSThomas Huth /* MSA Control Registers */ 608*fcf5ef2aSThomas Huth if (level == KVM_PUT_FULL_STATE) { 609*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, 610*fcf5ef2aSThomas Huth &env->msair); 611*fcf5ef2aSThomas Huth if (err < 0) { 612*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); 613*fcf5ef2aSThomas Huth ret = err; 614*fcf5ef2aSThomas Huth } 615*fcf5ef2aSThomas Huth } 616*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 617*fcf5ef2aSThomas Huth &env->active_tc.msacsr); 618*fcf5ef2aSThomas Huth if (err < 0) { 619*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); 620*fcf5ef2aSThomas Huth ret = err; 621*fcf5ef2aSThomas Huth } 622*fcf5ef2aSThomas Huth 623*fcf5ef2aSThomas Huth /* Vector registers (includes FP registers) */ 624*fcf5ef2aSThomas Huth for (i = 0; i < 32; ++i) { 625*fcf5ef2aSThomas Huth /* Big endian MSA not supported by QEMU yet anyway */ 626*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 627*fcf5ef2aSThomas Huth env->active_fpu.fpr[i].wr.d); 628*fcf5ef2aSThomas Huth if (err < 0) { 629*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); 630*fcf5ef2aSThomas Huth ret = err; 631*fcf5ef2aSThomas Huth } 632*fcf5ef2aSThomas Huth } 633*fcf5ef2aSThomas Huth } 634*fcf5ef2aSThomas Huth 635*fcf5ef2aSThomas Huth return ret; 636*fcf5ef2aSThomas Huth } 637*fcf5ef2aSThomas Huth 638*fcf5ef2aSThomas Huth static int kvm_mips_get_fpu_registers(CPUState *cs) 639*fcf5ef2aSThomas Huth { 640*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 641*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 642*fcf5ef2aSThomas Huth int err, ret = 0; 643*fcf5ef2aSThomas Huth unsigned int i; 644*fcf5ef2aSThomas Huth 645*fcf5ef2aSThomas Huth /* Only get FPU state if we're emulating a CPU with an FPU */ 646*fcf5ef2aSThomas Huth if (env->CP0_Config1 & (1 << CP0C1_FP)) { 647*fcf5ef2aSThomas Huth /* FPU Control Registers */ 648*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, 649*fcf5ef2aSThomas Huth &env->active_fpu.fcr0); 650*fcf5ef2aSThomas Huth if (err < 0) { 651*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); 652*fcf5ef2aSThomas Huth ret = err; 653*fcf5ef2aSThomas Huth } 654*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, 655*fcf5ef2aSThomas Huth &env->active_fpu.fcr31); 656*fcf5ef2aSThomas Huth if (err < 0) { 657*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); 658*fcf5ef2aSThomas Huth ret = err; 659*fcf5ef2aSThomas Huth } else { 660*fcf5ef2aSThomas Huth restore_fp_status(env); 661*fcf5ef2aSThomas Huth } 662*fcf5ef2aSThomas Huth 663*fcf5ef2aSThomas Huth /* 664*fcf5ef2aSThomas Huth * FPU register state is a subset of MSA vector state, so don't save FPU 665*fcf5ef2aSThomas Huth * registers if we're emulating a CPU with MSA. 666*fcf5ef2aSThomas Huth */ 667*fcf5ef2aSThomas Huth if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { 668*fcf5ef2aSThomas Huth /* Floating point registers */ 669*fcf5ef2aSThomas Huth for (i = 0; i < 32; ++i) { 670*fcf5ef2aSThomas Huth if (env->CP0_Status & (1 << CP0St_FR)) { 671*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), 672*fcf5ef2aSThomas Huth &env->active_fpu.fpr[i].d); 673*fcf5ef2aSThomas Huth } else { 674*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), 675*fcf5ef2aSThomas Huth &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); 676*fcf5ef2aSThomas Huth } 677*fcf5ef2aSThomas Huth if (err < 0) { 678*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); 679*fcf5ef2aSThomas Huth ret = err; 680*fcf5ef2aSThomas Huth } 681*fcf5ef2aSThomas Huth } 682*fcf5ef2aSThomas Huth } 683*fcf5ef2aSThomas Huth } 684*fcf5ef2aSThomas Huth 685*fcf5ef2aSThomas Huth /* Only get MSA state if we're emulating a CPU with MSA */ 686*fcf5ef2aSThomas Huth if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { 687*fcf5ef2aSThomas Huth /* MSA Control Registers */ 688*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, 689*fcf5ef2aSThomas Huth &env->msair); 690*fcf5ef2aSThomas Huth if (err < 0) { 691*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); 692*fcf5ef2aSThomas Huth ret = err; 693*fcf5ef2aSThomas Huth } 694*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, 695*fcf5ef2aSThomas Huth &env->active_tc.msacsr); 696*fcf5ef2aSThomas Huth if (err < 0) { 697*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); 698*fcf5ef2aSThomas Huth ret = err; 699*fcf5ef2aSThomas Huth } else { 700*fcf5ef2aSThomas Huth restore_msa_fp_status(env); 701*fcf5ef2aSThomas Huth } 702*fcf5ef2aSThomas Huth 703*fcf5ef2aSThomas Huth /* Vector registers (includes FP registers) */ 704*fcf5ef2aSThomas Huth for (i = 0; i < 32; ++i) { 705*fcf5ef2aSThomas Huth /* Big endian MSA not supported by QEMU yet anyway */ 706*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), 707*fcf5ef2aSThomas Huth env->active_fpu.fpr[i].wr.d); 708*fcf5ef2aSThomas Huth if (err < 0) { 709*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); 710*fcf5ef2aSThomas Huth ret = err; 711*fcf5ef2aSThomas Huth } 712*fcf5ef2aSThomas Huth } 713*fcf5ef2aSThomas Huth } 714*fcf5ef2aSThomas Huth 715*fcf5ef2aSThomas Huth return ret; 716*fcf5ef2aSThomas Huth } 717*fcf5ef2aSThomas Huth 718*fcf5ef2aSThomas Huth 719*fcf5ef2aSThomas Huth static int kvm_mips_put_cp0_registers(CPUState *cs, int level) 720*fcf5ef2aSThomas Huth { 721*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 722*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 723*fcf5ef2aSThomas Huth int err, ret = 0; 724*fcf5ef2aSThomas Huth 725*fcf5ef2aSThomas Huth (void)level; 726*fcf5ef2aSThomas Huth 727*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 728*fcf5ef2aSThomas Huth if (err < 0) { 729*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); 730*fcf5ef2aSThomas Huth ret = err; 731*fcf5ef2aSThomas Huth } 732*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 733*fcf5ef2aSThomas Huth &env->CP0_Context); 734*fcf5ef2aSThomas Huth if (err < 0) { 735*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); 736*fcf5ef2aSThomas Huth ret = err; 737*fcf5ef2aSThomas Huth } 738*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 739*fcf5ef2aSThomas Huth &env->active_tc.CP0_UserLocal); 740*fcf5ef2aSThomas Huth if (err < 0) { 741*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); 742*fcf5ef2aSThomas Huth ret = err; 743*fcf5ef2aSThomas Huth } 744*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 745*fcf5ef2aSThomas Huth &env->CP0_PageMask); 746*fcf5ef2aSThomas Huth if (err < 0) { 747*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); 748*fcf5ef2aSThomas Huth ret = err; 749*fcf5ef2aSThomas Huth } 750*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 751*fcf5ef2aSThomas Huth if (err < 0) { 752*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); 753*fcf5ef2aSThomas Huth ret = err; 754*fcf5ef2aSThomas Huth } 755*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 756*fcf5ef2aSThomas Huth if (err < 0) { 757*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); 758*fcf5ef2aSThomas Huth ret = err; 759*fcf5ef2aSThomas Huth } 760*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 761*fcf5ef2aSThomas Huth &env->CP0_BadVAddr); 762*fcf5ef2aSThomas Huth if (err < 0) { 763*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); 764*fcf5ef2aSThomas Huth ret = err; 765*fcf5ef2aSThomas Huth } 766*fcf5ef2aSThomas Huth 767*fcf5ef2aSThomas Huth /* If VM clock stopped then state will be restored when it is restarted */ 768*fcf5ef2aSThomas Huth if (runstate_is_running()) { 769*fcf5ef2aSThomas Huth err = kvm_mips_restore_count(cs); 770*fcf5ef2aSThomas Huth if (err < 0) { 771*fcf5ef2aSThomas Huth ret = err; 772*fcf5ef2aSThomas Huth } 773*fcf5ef2aSThomas Huth } 774*fcf5ef2aSThomas Huth 775*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 776*fcf5ef2aSThomas Huth &env->CP0_EntryHi); 777*fcf5ef2aSThomas Huth if (err < 0) { 778*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); 779*fcf5ef2aSThomas Huth ret = err; 780*fcf5ef2aSThomas Huth } 781*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 782*fcf5ef2aSThomas Huth &env->CP0_Compare); 783*fcf5ef2aSThomas Huth if (err < 0) { 784*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); 785*fcf5ef2aSThomas Huth ret = err; 786*fcf5ef2aSThomas Huth } 787*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 788*fcf5ef2aSThomas Huth if (err < 0) { 789*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); 790*fcf5ef2aSThomas Huth ret = err; 791*fcf5ef2aSThomas Huth } 792*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 793*fcf5ef2aSThomas Huth if (err < 0) { 794*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); 795*fcf5ef2aSThomas Huth ret = err; 796*fcf5ef2aSThomas Huth } 797*fcf5ef2aSThomas Huth err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 798*fcf5ef2aSThomas Huth if (err < 0) { 799*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); 800*fcf5ef2aSThomas Huth ret = err; 801*fcf5ef2aSThomas Huth } 802*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, 803*fcf5ef2aSThomas Huth &env->CP0_Config0, 804*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG_MASK); 805*fcf5ef2aSThomas Huth if (err < 0) { 806*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); 807*fcf5ef2aSThomas Huth ret = err; 808*fcf5ef2aSThomas Huth } 809*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, 810*fcf5ef2aSThomas Huth &env->CP0_Config1, 811*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG1_MASK); 812*fcf5ef2aSThomas Huth if (err < 0) { 813*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); 814*fcf5ef2aSThomas Huth ret = err; 815*fcf5ef2aSThomas Huth } 816*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, 817*fcf5ef2aSThomas Huth &env->CP0_Config2, 818*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG2_MASK); 819*fcf5ef2aSThomas Huth if (err < 0) { 820*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); 821*fcf5ef2aSThomas Huth ret = err; 822*fcf5ef2aSThomas Huth } 823*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, 824*fcf5ef2aSThomas Huth &env->CP0_Config3, 825*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG3_MASK); 826*fcf5ef2aSThomas Huth if (err < 0) { 827*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); 828*fcf5ef2aSThomas Huth ret = err; 829*fcf5ef2aSThomas Huth } 830*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, 831*fcf5ef2aSThomas Huth &env->CP0_Config4, 832*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG4_MASK); 833*fcf5ef2aSThomas Huth if (err < 0) { 834*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); 835*fcf5ef2aSThomas Huth ret = err; 836*fcf5ef2aSThomas Huth } 837*fcf5ef2aSThomas Huth err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, 838*fcf5ef2aSThomas Huth &env->CP0_Config5, 839*fcf5ef2aSThomas Huth KVM_REG_MIPS_CP0_CONFIG5_MASK); 840*fcf5ef2aSThomas Huth if (err < 0) { 841*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); 842*fcf5ef2aSThomas Huth ret = err; 843*fcf5ef2aSThomas Huth } 844*fcf5ef2aSThomas Huth err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 845*fcf5ef2aSThomas Huth &env->CP0_ErrorEPC); 846*fcf5ef2aSThomas Huth if (err < 0) { 847*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); 848*fcf5ef2aSThomas Huth ret = err; 849*fcf5ef2aSThomas Huth } 850*fcf5ef2aSThomas Huth 851*fcf5ef2aSThomas Huth return ret; 852*fcf5ef2aSThomas Huth } 853*fcf5ef2aSThomas Huth 854*fcf5ef2aSThomas Huth static int kvm_mips_get_cp0_registers(CPUState *cs) 855*fcf5ef2aSThomas Huth { 856*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 857*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 858*fcf5ef2aSThomas Huth int err, ret = 0; 859*fcf5ef2aSThomas Huth 860*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); 861*fcf5ef2aSThomas Huth if (err < 0) { 862*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); 863*fcf5ef2aSThomas Huth ret = err; 864*fcf5ef2aSThomas Huth } 865*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, 866*fcf5ef2aSThomas Huth &env->CP0_Context); 867*fcf5ef2aSThomas Huth if (err < 0) { 868*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); 869*fcf5ef2aSThomas Huth ret = err; 870*fcf5ef2aSThomas Huth } 871*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, 872*fcf5ef2aSThomas Huth &env->active_tc.CP0_UserLocal); 873*fcf5ef2aSThomas Huth if (err < 0) { 874*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); 875*fcf5ef2aSThomas Huth ret = err; 876*fcf5ef2aSThomas Huth } 877*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, 878*fcf5ef2aSThomas Huth &env->CP0_PageMask); 879*fcf5ef2aSThomas Huth if (err < 0) { 880*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); 881*fcf5ef2aSThomas Huth ret = err; 882*fcf5ef2aSThomas Huth } 883*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); 884*fcf5ef2aSThomas Huth if (err < 0) { 885*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); 886*fcf5ef2aSThomas Huth ret = err; 887*fcf5ef2aSThomas Huth } 888*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); 889*fcf5ef2aSThomas Huth if (err < 0) { 890*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); 891*fcf5ef2aSThomas Huth ret = err; 892*fcf5ef2aSThomas Huth } 893*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, 894*fcf5ef2aSThomas Huth &env->CP0_BadVAddr); 895*fcf5ef2aSThomas Huth if (err < 0) { 896*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); 897*fcf5ef2aSThomas Huth ret = err; 898*fcf5ef2aSThomas Huth } 899*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, 900*fcf5ef2aSThomas Huth &env->CP0_EntryHi); 901*fcf5ef2aSThomas Huth if (err < 0) { 902*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); 903*fcf5ef2aSThomas Huth ret = err; 904*fcf5ef2aSThomas Huth } 905*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, 906*fcf5ef2aSThomas Huth &env->CP0_Compare); 907*fcf5ef2aSThomas Huth if (err < 0) { 908*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); 909*fcf5ef2aSThomas Huth ret = err; 910*fcf5ef2aSThomas Huth } 911*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); 912*fcf5ef2aSThomas Huth if (err < 0) { 913*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); 914*fcf5ef2aSThomas Huth ret = err; 915*fcf5ef2aSThomas Huth } 916*fcf5ef2aSThomas Huth 917*fcf5ef2aSThomas Huth /* If VM clock stopped then state was already saved when it was stopped */ 918*fcf5ef2aSThomas Huth if (runstate_is_running()) { 919*fcf5ef2aSThomas Huth err = kvm_mips_save_count(cs); 920*fcf5ef2aSThomas Huth if (err < 0) { 921*fcf5ef2aSThomas Huth ret = err; 922*fcf5ef2aSThomas Huth } 923*fcf5ef2aSThomas Huth } 924*fcf5ef2aSThomas Huth 925*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); 926*fcf5ef2aSThomas Huth if (err < 0) { 927*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); 928*fcf5ef2aSThomas Huth ret = err; 929*fcf5ef2aSThomas Huth } 930*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); 931*fcf5ef2aSThomas Huth if (err < 0) { 932*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); 933*fcf5ef2aSThomas Huth ret = err; 934*fcf5ef2aSThomas Huth } 935*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); 936*fcf5ef2aSThomas Huth if (err < 0) { 937*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); 938*fcf5ef2aSThomas Huth ret = err; 939*fcf5ef2aSThomas Huth } 940*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); 941*fcf5ef2aSThomas Huth if (err < 0) { 942*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); 943*fcf5ef2aSThomas Huth ret = err; 944*fcf5ef2aSThomas Huth } 945*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); 946*fcf5ef2aSThomas Huth if (err < 0) { 947*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); 948*fcf5ef2aSThomas Huth ret = err; 949*fcf5ef2aSThomas Huth } 950*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); 951*fcf5ef2aSThomas Huth if (err < 0) { 952*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); 953*fcf5ef2aSThomas Huth ret = err; 954*fcf5ef2aSThomas Huth } 955*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); 956*fcf5ef2aSThomas Huth if (err < 0) { 957*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); 958*fcf5ef2aSThomas Huth ret = err; 959*fcf5ef2aSThomas Huth } 960*fcf5ef2aSThomas Huth err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); 961*fcf5ef2aSThomas Huth if (err < 0) { 962*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); 963*fcf5ef2aSThomas Huth ret = err; 964*fcf5ef2aSThomas Huth } 965*fcf5ef2aSThomas Huth err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, 966*fcf5ef2aSThomas Huth &env->CP0_ErrorEPC); 967*fcf5ef2aSThomas Huth if (err < 0) { 968*fcf5ef2aSThomas Huth DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); 969*fcf5ef2aSThomas Huth ret = err; 970*fcf5ef2aSThomas Huth } 971*fcf5ef2aSThomas Huth 972*fcf5ef2aSThomas Huth return ret; 973*fcf5ef2aSThomas Huth } 974*fcf5ef2aSThomas Huth 975*fcf5ef2aSThomas Huth int kvm_arch_put_registers(CPUState *cs, int level) 976*fcf5ef2aSThomas Huth { 977*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 978*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 979*fcf5ef2aSThomas Huth struct kvm_regs regs; 980*fcf5ef2aSThomas Huth int ret; 981*fcf5ef2aSThomas Huth int i; 982*fcf5ef2aSThomas Huth 983*fcf5ef2aSThomas Huth /* Set the registers based on QEMU's view of things */ 984*fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) { 985*fcf5ef2aSThomas Huth regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; 986*fcf5ef2aSThomas Huth } 987*fcf5ef2aSThomas Huth 988*fcf5ef2aSThomas Huth regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; 989*fcf5ef2aSThomas Huth regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; 990*fcf5ef2aSThomas Huth regs.pc = (int64_t)(target_long)env->active_tc.PC; 991*fcf5ef2aSThomas Huth 992*fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 993*fcf5ef2aSThomas Huth 994*fcf5ef2aSThomas Huth if (ret < 0) { 995*fcf5ef2aSThomas Huth return ret; 996*fcf5ef2aSThomas Huth } 997*fcf5ef2aSThomas Huth 998*fcf5ef2aSThomas Huth ret = kvm_mips_put_cp0_registers(cs, level); 999*fcf5ef2aSThomas Huth if (ret < 0) { 1000*fcf5ef2aSThomas Huth return ret; 1001*fcf5ef2aSThomas Huth } 1002*fcf5ef2aSThomas Huth 1003*fcf5ef2aSThomas Huth ret = kvm_mips_put_fpu_registers(cs, level); 1004*fcf5ef2aSThomas Huth if (ret < 0) { 1005*fcf5ef2aSThomas Huth return ret; 1006*fcf5ef2aSThomas Huth } 1007*fcf5ef2aSThomas Huth 1008*fcf5ef2aSThomas Huth return ret; 1009*fcf5ef2aSThomas Huth } 1010*fcf5ef2aSThomas Huth 1011*fcf5ef2aSThomas Huth int kvm_arch_get_registers(CPUState *cs) 1012*fcf5ef2aSThomas Huth { 1013*fcf5ef2aSThomas Huth MIPSCPU *cpu = MIPS_CPU(cs); 1014*fcf5ef2aSThomas Huth CPUMIPSState *env = &cpu->env; 1015*fcf5ef2aSThomas Huth int ret = 0; 1016*fcf5ef2aSThomas Huth struct kvm_regs regs; 1017*fcf5ef2aSThomas Huth int i; 1018*fcf5ef2aSThomas Huth 1019*fcf5ef2aSThomas Huth /* Get the current register set as KVM seems it */ 1020*fcf5ef2aSThomas Huth ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 1021*fcf5ef2aSThomas Huth 1022*fcf5ef2aSThomas Huth if (ret < 0) { 1023*fcf5ef2aSThomas Huth return ret; 1024*fcf5ef2aSThomas Huth } 1025*fcf5ef2aSThomas Huth 1026*fcf5ef2aSThomas Huth for (i = 0; i < 32; i++) { 1027*fcf5ef2aSThomas Huth env->active_tc.gpr[i] = regs.gpr[i]; 1028*fcf5ef2aSThomas Huth } 1029*fcf5ef2aSThomas Huth 1030*fcf5ef2aSThomas Huth env->active_tc.HI[0] = regs.hi; 1031*fcf5ef2aSThomas Huth env->active_tc.LO[0] = regs.lo; 1032*fcf5ef2aSThomas Huth env->active_tc.PC = regs.pc; 1033*fcf5ef2aSThomas Huth 1034*fcf5ef2aSThomas Huth kvm_mips_get_cp0_registers(cs); 1035*fcf5ef2aSThomas Huth kvm_mips_get_fpu_registers(cs); 1036*fcf5ef2aSThomas Huth 1037*fcf5ef2aSThomas Huth return ret; 1038*fcf5ef2aSThomas Huth } 1039*fcf5ef2aSThomas Huth 1040*fcf5ef2aSThomas Huth int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 1041*fcf5ef2aSThomas Huth uint64_t address, uint32_t data, PCIDevice *dev) 1042*fcf5ef2aSThomas Huth { 1043*fcf5ef2aSThomas Huth return 0; 1044*fcf5ef2aSThomas Huth } 1045*fcf5ef2aSThomas Huth 1046*fcf5ef2aSThomas Huth int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 1047*fcf5ef2aSThomas Huth int vector, PCIDevice *dev) 1048*fcf5ef2aSThomas Huth { 1049*fcf5ef2aSThomas Huth return 0; 1050*fcf5ef2aSThomas Huth } 1051*fcf5ef2aSThomas Huth 1052*fcf5ef2aSThomas Huth int kvm_arch_release_virq_post(int virq) 1053*fcf5ef2aSThomas Huth { 1054*fcf5ef2aSThomas Huth return 0; 1055*fcf5ef2aSThomas Huth } 1056*fcf5ef2aSThomas Huth 1057*fcf5ef2aSThomas Huth int kvm_arch_msi_data_to_gsi(uint32_t data) 1058*fcf5ef2aSThomas Huth { 1059*fcf5ef2aSThomas Huth abort(); 1060*fcf5ef2aSThomas Huth } 1061