1d7d5b05fSDeng-Cheng Zhu /*
2d7d5b05fSDeng-Cheng Zhu * This file is subject to the terms and conditions of the GNU General Public
3d7d5b05fSDeng-Cheng Zhu * License. See the file "COPYING" in the main directory of this archive
4d7d5b05fSDeng-Cheng Zhu * for more details.
5d7d5b05fSDeng-Cheng Zhu *
6d7d5b05fSDeng-Cheng Zhu * KVM/MIPS: Instruction/Exception emulation
7d7d5b05fSDeng-Cheng Zhu *
8d7d5b05fSDeng-Cheng Zhu * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9d7d5b05fSDeng-Cheng Zhu * Authors: Sanjay Lal <sanjayl@kymasys.com>
10d7d5b05fSDeng-Cheng Zhu */
11d7d5b05fSDeng-Cheng Zhu
12d7d5b05fSDeng-Cheng Zhu #include <linux/errno.h>
13d7d5b05fSDeng-Cheng Zhu #include <linux/err.h>
14d7d5b05fSDeng-Cheng Zhu #include <linux/ktime.h>
15d7d5b05fSDeng-Cheng Zhu #include <linux/kvm_host.h>
16d7d5b05fSDeng-Cheng Zhu #include <linux/vmalloc.h>
17d7d5b05fSDeng-Cheng Zhu #include <linux/fs.h>
1857c8a661SMike Rapoport #include <linux/memblock.h>
19d7d5b05fSDeng-Cheng Zhu #include <linux/random.h>
20d7d5b05fSDeng-Cheng Zhu #include <asm/page.h>
21d7d5b05fSDeng-Cheng Zhu #include <asm/cacheflush.h>
22f4956f62SJames Hogan #include <asm/cacheops.h>
23d7d5b05fSDeng-Cheng Zhu #include <asm/cpu-info.h>
24d7d5b05fSDeng-Cheng Zhu #include <asm/mmu_context.h>
25d7d5b05fSDeng-Cheng Zhu #include <asm/tlbflush.h>
26d7d5b05fSDeng-Cheng Zhu #include <asm/inst.h>
27d7d5b05fSDeng-Cheng Zhu
28d7d5b05fSDeng-Cheng Zhu #undef CONFIG_MIPS_MT
29d7d5b05fSDeng-Cheng Zhu #include <asm/r4kcache.h>
30d7d5b05fSDeng-Cheng Zhu #define CONFIG_MIPS_MT
31d7d5b05fSDeng-Cheng Zhu
32d7d5b05fSDeng-Cheng Zhu #include "interrupt.h"
33d7d5b05fSDeng-Cheng Zhu
34d7d5b05fSDeng-Cheng Zhu #include "trace.h"
35d7d5b05fSDeng-Cheng Zhu
36d7d5b05fSDeng-Cheng Zhu /*
37d7d5b05fSDeng-Cheng Zhu * Compute the return address and do emulate branch simulation, if required.
38d7d5b05fSDeng-Cheng Zhu * This function should be called only in branch delay slot active.
39d7d5b05fSDeng-Cheng Zhu */
kvm_compute_return_epc(struct kvm_vcpu * vcpu,unsigned long instpc,unsigned long * out)40122e51d4SJames Hogan static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
41122e51d4SJames Hogan unsigned long *out)
42d7d5b05fSDeng-Cheng Zhu {
43d7d5b05fSDeng-Cheng Zhu unsigned int dspcontrol;
44d7d5b05fSDeng-Cheng Zhu union mips_instruction insn;
45d7d5b05fSDeng-Cheng Zhu struct kvm_vcpu_arch *arch = &vcpu->arch;
46d7d5b05fSDeng-Cheng Zhu long epc = instpc;
47122e51d4SJames Hogan long nextpc;
48122e51d4SJames Hogan int err;
49d7d5b05fSDeng-Cheng Zhu
50122e51d4SJames Hogan if (epc & 3) {
51122e51d4SJames Hogan kvm_err("%s: unaligned epc\n", __func__);
52122e51d4SJames Hogan return -EINVAL;
53122e51d4SJames Hogan }
54d7d5b05fSDeng-Cheng Zhu
55d7d5b05fSDeng-Cheng Zhu /* Read the instruction */
566a97c775SJames Hogan err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
57122e51d4SJames Hogan if (err)
58122e51d4SJames Hogan return err;
59d7d5b05fSDeng-Cheng Zhu
60d7d5b05fSDeng-Cheng Zhu switch (insn.i_format.opcode) {
61d7d5b05fSDeng-Cheng Zhu /* jr and jalr are in r_format format. */
62d7d5b05fSDeng-Cheng Zhu case spec_op:
63d7d5b05fSDeng-Cheng Zhu switch (insn.r_format.func) {
64d7d5b05fSDeng-Cheng Zhu case jalr_op:
65d7d5b05fSDeng-Cheng Zhu arch->gprs[insn.r_format.rd] = epc + 8;
66c9b02990SLiangliang Huang fallthrough;
67d7d5b05fSDeng-Cheng Zhu case jr_op:
68d7d5b05fSDeng-Cheng Zhu nextpc = arch->gprs[insn.r_format.rs];
69d7d5b05fSDeng-Cheng Zhu break;
70122e51d4SJames Hogan default:
71122e51d4SJames Hogan return -EINVAL;
72d7d5b05fSDeng-Cheng Zhu }
73d7d5b05fSDeng-Cheng Zhu break;
74d7d5b05fSDeng-Cheng Zhu
75d7d5b05fSDeng-Cheng Zhu /*
76d7d5b05fSDeng-Cheng Zhu * This group contains:
77d7d5b05fSDeng-Cheng Zhu * bltz_op, bgez_op, bltzl_op, bgezl_op,
78d7d5b05fSDeng-Cheng Zhu * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79d7d5b05fSDeng-Cheng Zhu */
80d7d5b05fSDeng-Cheng Zhu case bcond_op:
81d7d5b05fSDeng-Cheng Zhu switch (insn.i_format.rt) {
82d7d5b05fSDeng-Cheng Zhu case bltz_op:
83d7d5b05fSDeng-Cheng Zhu case bltzl_op:
84d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] < 0)
85d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
86d7d5b05fSDeng-Cheng Zhu else
87d7d5b05fSDeng-Cheng Zhu epc += 8;
88d7d5b05fSDeng-Cheng Zhu nextpc = epc;
89d7d5b05fSDeng-Cheng Zhu break;
90d7d5b05fSDeng-Cheng Zhu
91d7d5b05fSDeng-Cheng Zhu case bgez_op:
92d7d5b05fSDeng-Cheng Zhu case bgezl_op:
93d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] >= 0)
94d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
95d7d5b05fSDeng-Cheng Zhu else
96d7d5b05fSDeng-Cheng Zhu epc += 8;
97d7d5b05fSDeng-Cheng Zhu nextpc = epc;
98d7d5b05fSDeng-Cheng Zhu break;
99d7d5b05fSDeng-Cheng Zhu
100d7d5b05fSDeng-Cheng Zhu case bltzal_op:
101d7d5b05fSDeng-Cheng Zhu case bltzall_op:
102d7d5b05fSDeng-Cheng Zhu arch->gprs[31] = epc + 8;
103d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] < 0)
104d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
105d7d5b05fSDeng-Cheng Zhu else
106d7d5b05fSDeng-Cheng Zhu epc += 8;
107d7d5b05fSDeng-Cheng Zhu nextpc = epc;
108d7d5b05fSDeng-Cheng Zhu break;
109d7d5b05fSDeng-Cheng Zhu
110d7d5b05fSDeng-Cheng Zhu case bgezal_op:
111d7d5b05fSDeng-Cheng Zhu case bgezall_op:
112d7d5b05fSDeng-Cheng Zhu arch->gprs[31] = epc + 8;
113d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] >= 0)
114d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
115d7d5b05fSDeng-Cheng Zhu else
116d7d5b05fSDeng-Cheng Zhu epc += 8;
117d7d5b05fSDeng-Cheng Zhu nextpc = epc;
118d7d5b05fSDeng-Cheng Zhu break;
119d7d5b05fSDeng-Cheng Zhu case bposge32_op:
120122e51d4SJames Hogan if (!cpu_has_dsp) {
121122e51d4SJames Hogan kvm_err("%s: DSP branch but not DSP ASE\n",
122122e51d4SJames Hogan __func__);
123122e51d4SJames Hogan return -EINVAL;
124122e51d4SJames Hogan }
125d7d5b05fSDeng-Cheng Zhu
126d7d5b05fSDeng-Cheng Zhu dspcontrol = rddsp(0x01);
127d7d5b05fSDeng-Cheng Zhu
128d7d5b05fSDeng-Cheng Zhu if (dspcontrol >= 32)
129d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
130d7d5b05fSDeng-Cheng Zhu else
131d7d5b05fSDeng-Cheng Zhu epc += 8;
132d7d5b05fSDeng-Cheng Zhu nextpc = epc;
133d7d5b05fSDeng-Cheng Zhu break;
134122e51d4SJames Hogan default:
135122e51d4SJames Hogan return -EINVAL;
136d7d5b05fSDeng-Cheng Zhu }
137d7d5b05fSDeng-Cheng Zhu break;
138d7d5b05fSDeng-Cheng Zhu
139d7d5b05fSDeng-Cheng Zhu /* These are unconditional and in j_format. */
140d7d5b05fSDeng-Cheng Zhu case jal_op:
141d7d5b05fSDeng-Cheng Zhu arch->gprs[31] = instpc + 8;
142c9b02990SLiangliang Huang fallthrough;
143d7d5b05fSDeng-Cheng Zhu case j_op:
144d7d5b05fSDeng-Cheng Zhu epc += 4;
145d7d5b05fSDeng-Cheng Zhu epc >>= 28;
146d7d5b05fSDeng-Cheng Zhu epc <<= 28;
147d7d5b05fSDeng-Cheng Zhu epc |= (insn.j_format.target << 2);
148d7d5b05fSDeng-Cheng Zhu nextpc = epc;
149d7d5b05fSDeng-Cheng Zhu break;
150d7d5b05fSDeng-Cheng Zhu
151d7d5b05fSDeng-Cheng Zhu /* These are conditional and in i_format. */
152d7d5b05fSDeng-Cheng Zhu case beq_op:
153d7d5b05fSDeng-Cheng Zhu case beql_op:
154d7d5b05fSDeng-Cheng Zhu if (arch->gprs[insn.i_format.rs] ==
155d7d5b05fSDeng-Cheng Zhu arch->gprs[insn.i_format.rt])
156d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
157d7d5b05fSDeng-Cheng Zhu else
158d7d5b05fSDeng-Cheng Zhu epc += 8;
159d7d5b05fSDeng-Cheng Zhu nextpc = epc;
160d7d5b05fSDeng-Cheng Zhu break;
161d7d5b05fSDeng-Cheng Zhu
162d7d5b05fSDeng-Cheng Zhu case bne_op:
163d7d5b05fSDeng-Cheng Zhu case bnel_op:
164d7d5b05fSDeng-Cheng Zhu if (arch->gprs[insn.i_format.rs] !=
165d7d5b05fSDeng-Cheng Zhu arch->gprs[insn.i_format.rt])
166d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
167d7d5b05fSDeng-Cheng Zhu else
168d7d5b05fSDeng-Cheng Zhu epc += 8;
169d7d5b05fSDeng-Cheng Zhu nextpc = epc;
170d7d5b05fSDeng-Cheng Zhu break;
171d7d5b05fSDeng-Cheng Zhu
1722e0badfaSJames Hogan case blez_op: /* POP06 */
1732e0badfaSJames Hogan #ifndef CONFIG_CPU_MIPSR6
1742e0badfaSJames Hogan case blezl_op: /* removed in R6 */
1752e0badfaSJames Hogan #endif
1762e0badfaSJames Hogan if (insn.i_format.rt != 0)
1772e0badfaSJames Hogan goto compact_branch;
178d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] <= 0)
179d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
180d7d5b05fSDeng-Cheng Zhu else
181d7d5b05fSDeng-Cheng Zhu epc += 8;
182d7d5b05fSDeng-Cheng Zhu nextpc = epc;
183d7d5b05fSDeng-Cheng Zhu break;
184d7d5b05fSDeng-Cheng Zhu
1852e0badfaSJames Hogan case bgtz_op: /* POP07 */
1862e0badfaSJames Hogan #ifndef CONFIG_CPU_MIPSR6
1872e0badfaSJames Hogan case bgtzl_op: /* removed in R6 */
1882e0badfaSJames Hogan #endif
1892e0badfaSJames Hogan if (insn.i_format.rt != 0)
1902e0badfaSJames Hogan goto compact_branch;
191d7d5b05fSDeng-Cheng Zhu if ((long)arch->gprs[insn.i_format.rs] > 0)
192d7d5b05fSDeng-Cheng Zhu epc = epc + 4 + (insn.i_format.simmediate << 2);
193d7d5b05fSDeng-Cheng Zhu else
194d7d5b05fSDeng-Cheng Zhu epc += 8;
195d7d5b05fSDeng-Cheng Zhu nextpc = epc;
196d7d5b05fSDeng-Cheng Zhu break;
197d7d5b05fSDeng-Cheng Zhu
198d7d5b05fSDeng-Cheng Zhu /* And now the FPA/cp1 branch instructions. */
199d7d5b05fSDeng-Cheng Zhu case cop1_op:
200d7d5b05fSDeng-Cheng Zhu kvm_err("%s: unsupported cop1_op\n", __func__);
201122e51d4SJames Hogan return -EINVAL;
2022e0badfaSJames Hogan
2032e0badfaSJames Hogan #ifdef CONFIG_CPU_MIPSR6
2042e0badfaSJames Hogan /* R6 added the following compact branches with forbidden slots */
2052e0badfaSJames Hogan case blezl_op: /* POP26 */
2062e0badfaSJames Hogan case bgtzl_op: /* POP27 */
2072e0badfaSJames Hogan /* only rt == 0 isn't compact branch */
2082e0badfaSJames Hogan if (insn.i_format.rt != 0)
2092e0badfaSJames Hogan goto compact_branch;
210122e51d4SJames Hogan return -EINVAL;
2112e0badfaSJames Hogan case pop10_op:
2122e0badfaSJames Hogan case pop30_op:
2132e0badfaSJames Hogan /* only rs == rt == 0 is reserved, rest are compact branches */
2142e0badfaSJames Hogan if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
2152e0badfaSJames Hogan goto compact_branch;
216122e51d4SJames Hogan return -EINVAL;
2172e0badfaSJames Hogan case pop66_op:
2182e0badfaSJames Hogan case pop76_op:
2192e0badfaSJames Hogan /* only rs == 0 isn't compact branch */
2202e0badfaSJames Hogan if (insn.i_format.rs != 0)
2212e0badfaSJames Hogan goto compact_branch;
222122e51d4SJames Hogan return -EINVAL;
2232e0badfaSJames Hogan compact_branch:
2242e0badfaSJames Hogan /*
2252e0badfaSJames Hogan * If we've hit an exception on the forbidden slot, then
2262e0badfaSJames Hogan * the branch must not have been taken.
2272e0badfaSJames Hogan */
2282e0badfaSJames Hogan epc += 8;
2292e0badfaSJames Hogan nextpc = epc;
2302e0badfaSJames Hogan break;
2312e0badfaSJames Hogan #else
2322e0badfaSJames Hogan compact_branch:
233122e51d4SJames Hogan /* Fall through - Compact branches not supported before R6 */
2342e0badfaSJames Hogan #endif
235122e51d4SJames Hogan default:
236122e51d4SJames Hogan return -EINVAL;
237d7d5b05fSDeng-Cheng Zhu }
238d7d5b05fSDeng-Cheng Zhu
239122e51d4SJames Hogan *out = nextpc;
240122e51d4SJames Hogan return 0;
241d7d5b05fSDeng-Cheng Zhu }
242d7d5b05fSDeng-Cheng Zhu
update_pc(struct kvm_vcpu * vcpu,u32 cause)243bdb7ed86SJames Hogan enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
244d7d5b05fSDeng-Cheng Zhu {
245122e51d4SJames Hogan int err;
246d7d5b05fSDeng-Cheng Zhu
247d7d5b05fSDeng-Cheng Zhu if (cause & CAUSEF_BD) {
248122e51d4SJames Hogan err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
249122e51d4SJames Hogan &vcpu->arch.pc);
250122e51d4SJames Hogan if (err)
251122e51d4SJames Hogan return EMULATE_FAIL;
252d7d5b05fSDeng-Cheng Zhu } else {
253d7d5b05fSDeng-Cheng Zhu vcpu->arch.pc += 4;
254122e51d4SJames Hogan }
255d7d5b05fSDeng-Cheng Zhu
256d7d5b05fSDeng-Cheng Zhu kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
257d7d5b05fSDeng-Cheng Zhu
258122e51d4SJames Hogan return EMULATE_DONE;
259d7d5b05fSDeng-Cheng Zhu }
260d7d5b05fSDeng-Cheng Zhu
261d7d5b05fSDeng-Cheng Zhu /**
2626a97c775SJames Hogan * kvm_get_badinstr() - Get bad instruction encoding.
2636a97c775SJames Hogan * @opc: Guest pointer to faulting instruction.
2646a97c775SJames Hogan * @vcpu: KVM VCPU information.
2656a97c775SJames Hogan *
2666a97c775SJames Hogan * Gets the instruction encoding of the faulting instruction, using the saved
2676a97c775SJames Hogan * BadInstr register value if it exists, otherwise falling back to reading guest
2686a97c775SJames Hogan * memory at @opc.
2696a97c775SJames Hogan *
2706a97c775SJames Hogan * Returns: The instruction encoding of the faulting instruction.
2716a97c775SJames Hogan */
kvm_get_badinstr(u32 * opc,struct kvm_vcpu * vcpu,u32 * out)2726a97c775SJames Hogan int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
2736a97c775SJames Hogan {
2746a97c775SJames Hogan if (cpu_has_badinstr) {
2756a97c775SJames Hogan *out = vcpu->arch.host_cp0_badinstr;
2766a97c775SJames Hogan return 0;
2776a97c775SJames Hogan } else {
27845c7e8afSThomas Bogendoerfer WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
27945c7e8afSThomas Bogendoerfer return -EINVAL;
2806a97c775SJames Hogan }
2816a97c775SJames Hogan }
2826a97c775SJames Hogan
2836a97c775SJames Hogan /**
2846a97c775SJames Hogan * kvm_get_badinstrp() - Get bad prior instruction encoding.
2856a97c775SJames Hogan * @opc: Guest pointer to prior faulting instruction.
2866a97c775SJames Hogan * @vcpu: KVM VCPU information.
2876a97c775SJames Hogan *
2886a97c775SJames Hogan * Gets the instruction encoding of the prior faulting instruction (the branch
2896a97c775SJames Hogan * containing the delay slot which faulted), using the saved BadInstrP register
2906a97c775SJames Hogan * value if it exists, otherwise falling back to reading guest memory at @opc.
2916a97c775SJames Hogan *
2926a97c775SJames Hogan * Returns: The instruction encoding of the prior faulting instruction.
2936a97c775SJames Hogan */
kvm_get_badinstrp(u32 * opc,struct kvm_vcpu * vcpu,u32 * out)2946a97c775SJames Hogan int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
2956a97c775SJames Hogan {
2966a97c775SJames Hogan if (cpu_has_badinstrp) {
2976a97c775SJames Hogan *out = vcpu->arch.host_cp0_badinstrp;
2986a97c775SJames Hogan return 0;
2996a97c775SJames Hogan } else {
30045c7e8afSThomas Bogendoerfer WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
30145c7e8afSThomas Bogendoerfer return -EINVAL;
3026a97c775SJames Hogan }
3036a97c775SJames Hogan }
3046a97c775SJames Hogan
3056a97c775SJames Hogan /**
306d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
307d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
308d7d5b05fSDeng-Cheng Zhu *
309d7d5b05fSDeng-Cheng Zhu * Returns: 1 if the CP0_Count timer is disabled by either the guest
310d7d5b05fSDeng-Cheng Zhu * CP0_Cause.DC bit or the count_ctl.DC bit.
311d7d5b05fSDeng-Cheng Zhu * 0 otherwise (in which case CP0_Count timer is running).
312d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_disabled(struct kvm_vcpu * vcpu)313f4474d50SJames Hogan int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
314d7d5b05fSDeng-Cheng Zhu {
315*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
316d7d5b05fSDeng-Cheng Zhu
317d7d5b05fSDeng-Cheng Zhu return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
318d7d5b05fSDeng-Cheng Zhu (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
319d7d5b05fSDeng-Cheng Zhu }
320d7d5b05fSDeng-Cheng Zhu
321d7d5b05fSDeng-Cheng Zhu /**
322d7d5b05fSDeng-Cheng Zhu * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
323d7d5b05fSDeng-Cheng Zhu *
324d7d5b05fSDeng-Cheng Zhu * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
325d7d5b05fSDeng-Cheng Zhu *
326d7d5b05fSDeng-Cheng Zhu * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
327d7d5b05fSDeng-Cheng Zhu */
kvm_mips_ktime_to_count(struct kvm_vcpu * vcpu,ktime_t now)328bdb7ed86SJames Hogan static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
329d7d5b05fSDeng-Cheng Zhu {
330d7d5b05fSDeng-Cheng Zhu s64 now_ns, periods;
331d7d5b05fSDeng-Cheng Zhu u64 delta;
332d7d5b05fSDeng-Cheng Zhu
333d7d5b05fSDeng-Cheng Zhu now_ns = ktime_to_ns(now);
334d7d5b05fSDeng-Cheng Zhu delta = now_ns + vcpu->arch.count_dyn_bias;
335d7d5b05fSDeng-Cheng Zhu
336d7d5b05fSDeng-Cheng Zhu if (delta >= vcpu->arch.count_period) {
337d7d5b05fSDeng-Cheng Zhu /* If delta is out of safe range the bias needs adjusting */
338d7d5b05fSDeng-Cheng Zhu periods = div64_s64(now_ns, vcpu->arch.count_period);
339d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
340d7d5b05fSDeng-Cheng Zhu /* Recalculate delta with new bias */
341d7d5b05fSDeng-Cheng Zhu delta = now_ns + vcpu->arch.count_dyn_bias;
342d7d5b05fSDeng-Cheng Zhu }
343d7d5b05fSDeng-Cheng Zhu
344d7d5b05fSDeng-Cheng Zhu /*
345d7d5b05fSDeng-Cheng Zhu * We've ensured that:
346d7d5b05fSDeng-Cheng Zhu * delta < count_period
347d7d5b05fSDeng-Cheng Zhu *
348d7d5b05fSDeng-Cheng Zhu * Therefore the intermediate delta*count_hz will never overflow since
349d7d5b05fSDeng-Cheng Zhu * at the boundary condition:
350d7d5b05fSDeng-Cheng Zhu * delta = count_period
351d7d5b05fSDeng-Cheng Zhu * delta = NSEC_PER_SEC * 2^32 / count_hz
352d7d5b05fSDeng-Cheng Zhu * delta * count_hz = NSEC_PER_SEC * 2^32
353d7d5b05fSDeng-Cheng Zhu */
354d7d5b05fSDeng-Cheng Zhu return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
355d7d5b05fSDeng-Cheng Zhu }
356d7d5b05fSDeng-Cheng Zhu
357d7d5b05fSDeng-Cheng Zhu /**
358d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_time() - Get effective current time.
359d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
360d7d5b05fSDeng-Cheng Zhu *
361d7d5b05fSDeng-Cheng Zhu * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
362d7d5b05fSDeng-Cheng Zhu * except when the master disable bit is set in count_ctl, in which case it is
363d7d5b05fSDeng-Cheng Zhu * count_resume, i.e. the time that the count was disabled.
364d7d5b05fSDeng-Cheng Zhu *
365d7d5b05fSDeng-Cheng Zhu * Returns: Effective monotonic ktime for CP0_Count.
366d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_time(struct kvm_vcpu * vcpu)367d7d5b05fSDeng-Cheng Zhu static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
368d7d5b05fSDeng-Cheng Zhu {
369d7d5b05fSDeng-Cheng Zhu if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
370d7d5b05fSDeng-Cheng Zhu return vcpu->arch.count_resume;
371d7d5b05fSDeng-Cheng Zhu
372d7d5b05fSDeng-Cheng Zhu return ktime_get();
373d7d5b05fSDeng-Cheng Zhu }
374d7d5b05fSDeng-Cheng Zhu
375d7d5b05fSDeng-Cheng Zhu /**
376d7d5b05fSDeng-Cheng Zhu * kvm_mips_read_count_running() - Read the current count value as if running.
377d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
378d7d5b05fSDeng-Cheng Zhu * @now: Kernel time to read CP0_Count at.
379d7d5b05fSDeng-Cheng Zhu *
380d7d5b05fSDeng-Cheng Zhu * Returns the current guest CP0_Count register at time @now and handles if the
381d7d5b05fSDeng-Cheng Zhu * timer interrupt is pending and hasn't been handled yet.
382d7d5b05fSDeng-Cheng Zhu *
383d7d5b05fSDeng-Cheng Zhu * Returns: The current value of the guest CP0_Count register.
384d7d5b05fSDeng-Cheng Zhu */
kvm_mips_read_count_running(struct kvm_vcpu * vcpu,ktime_t now)385bdb7ed86SJames Hogan static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
386d7d5b05fSDeng-Cheng Zhu {
387*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
3884355c44fSJames Hogan ktime_t expires, threshold;
3898cffd197SJames Hogan u32 count, compare;
390d7d5b05fSDeng-Cheng Zhu int running;
391d7d5b05fSDeng-Cheng Zhu
3924355c44fSJames Hogan /* Calculate the biased and scaled guest CP0_Count */
3934355c44fSJames Hogan count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
3944355c44fSJames Hogan compare = kvm_read_c0_guest_compare(cop0);
3954355c44fSJames Hogan
3964355c44fSJames Hogan /*
3974355c44fSJames Hogan * Find whether CP0_Count has reached the closest timer interrupt. If
3984355c44fSJames Hogan * not, we shouldn't inject it.
3994355c44fSJames Hogan */
4008cffd197SJames Hogan if ((s32)(count - compare) < 0)
4014355c44fSJames Hogan return count;
4024355c44fSJames Hogan
4034355c44fSJames Hogan /*
4044355c44fSJames Hogan * The CP0_Count we're going to return has already reached the closest
4054355c44fSJames Hogan * timer interrupt. Quickly check if it really is a new interrupt by
4064355c44fSJames Hogan * looking at whether the interval until the hrtimer expiry time is
4074355c44fSJames Hogan * less than 1/4 of the timer period.
4084355c44fSJames Hogan */
409d7d5b05fSDeng-Cheng Zhu expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
4104355c44fSJames Hogan threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
4114355c44fSJames Hogan if (ktime_before(expires, threshold)) {
412d7d5b05fSDeng-Cheng Zhu /*
413d7d5b05fSDeng-Cheng Zhu * Cancel it while we handle it so there's no chance of
414d7d5b05fSDeng-Cheng Zhu * interference with the timeout handler.
415d7d5b05fSDeng-Cheng Zhu */
416d7d5b05fSDeng-Cheng Zhu running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
417d7d5b05fSDeng-Cheng Zhu
418d7d5b05fSDeng-Cheng Zhu /* Nothing should be waiting on the timeout */
419d7d5b05fSDeng-Cheng Zhu kvm_mips_callbacks->queue_timer_int(vcpu);
420d7d5b05fSDeng-Cheng Zhu
421d7d5b05fSDeng-Cheng Zhu /*
422d7d5b05fSDeng-Cheng Zhu * Restart the timer if it was running based on the expiry time
423d7d5b05fSDeng-Cheng Zhu * we read, so that we don't push it back 2 periods.
424d7d5b05fSDeng-Cheng Zhu */
425d7d5b05fSDeng-Cheng Zhu if (running) {
426d7d5b05fSDeng-Cheng Zhu expires = ktime_add_ns(expires,
427d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_period);
428d7d5b05fSDeng-Cheng Zhu hrtimer_start(&vcpu->arch.comparecount_timer, expires,
429d7d5b05fSDeng-Cheng Zhu HRTIMER_MODE_ABS);
430d7d5b05fSDeng-Cheng Zhu }
431d7d5b05fSDeng-Cheng Zhu }
432d7d5b05fSDeng-Cheng Zhu
4334355c44fSJames Hogan return count;
434d7d5b05fSDeng-Cheng Zhu }
435d7d5b05fSDeng-Cheng Zhu
436d7d5b05fSDeng-Cheng Zhu /**
437d7d5b05fSDeng-Cheng Zhu * kvm_mips_read_count() - Read the current count value.
438d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
439d7d5b05fSDeng-Cheng Zhu *
440d7d5b05fSDeng-Cheng Zhu * Read the current guest CP0_Count value, taking into account whether the timer
441d7d5b05fSDeng-Cheng Zhu * is stopped.
442d7d5b05fSDeng-Cheng Zhu *
443d7d5b05fSDeng-Cheng Zhu * Returns: The current guest CP0_Count value.
444d7d5b05fSDeng-Cheng Zhu */
kvm_mips_read_count(struct kvm_vcpu * vcpu)445bdb7ed86SJames Hogan u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
446d7d5b05fSDeng-Cheng Zhu {
447*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
448d7d5b05fSDeng-Cheng Zhu
449d7d5b05fSDeng-Cheng Zhu /* If count disabled just read static copy of count */
450d7d5b05fSDeng-Cheng Zhu if (kvm_mips_count_disabled(vcpu))
451d7d5b05fSDeng-Cheng Zhu return kvm_read_c0_guest_count(cop0);
452d7d5b05fSDeng-Cheng Zhu
453d7d5b05fSDeng-Cheng Zhu return kvm_mips_read_count_running(vcpu, ktime_get());
454d7d5b05fSDeng-Cheng Zhu }
455d7d5b05fSDeng-Cheng Zhu
456d7d5b05fSDeng-Cheng Zhu /**
457d7d5b05fSDeng-Cheng Zhu * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
458d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
459d7d5b05fSDeng-Cheng Zhu * @count: Output pointer for CP0_Count value at point of freeze.
460d7d5b05fSDeng-Cheng Zhu *
461d7d5b05fSDeng-Cheng Zhu * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
462d7d5b05fSDeng-Cheng Zhu * at the point it was frozen. It is guaranteed that any pending interrupts at
463d7d5b05fSDeng-Cheng Zhu * the point it was frozen are handled, and none after that point.
464d7d5b05fSDeng-Cheng Zhu *
465d7d5b05fSDeng-Cheng Zhu * This is useful where the time/CP0_Count is needed in the calculation of the
466d7d5b05fSDeng-Cheng Zhu * new parameters.
467d7d5b05fSDeng-Cheng Zhu *
468d7d5b05fSDeng-Cheng Zhu * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
469d7d5b05fSDeng-Cheng Zhu *
470d7d5b05fSDeng-Cheng Zhu * Returns: The ktime at the point of freeze.
471d7d5b05fSDeng-Cheng Zhu */
kvm_mips_freeze_hrtimer(struct kvm_vcpu * vcpu,u32 * count)472f4474d50SJames Hogan ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
473d7d5b05fSDeng-Cheng Zhu {
474d7d5b05fSDeng-Cheng Zhu ktime_t now;
475d7d5b05fSDeng-Cheng Zhu
476d7d5b05fSDeng-Cheng Zhu /* stop hrtimer before finding time */
477d7d5b05fSDeng-Cheng Zhu hrtimer_cancel(&vcpu->arch.comparecount_timer);
478d7d5b05fSDeng-Cheng Zhu now = ktime_get();
479d7d5b05fSDeng-Cheng Zhu
480d7d5b05fSDeng-Cheng Zhu /* find count at this point and handle pending hrtimer */
481d7d5b05fSDeng-Cheng Zhu *count = kvm_mips_read_count_running(vcpu, now);
482d7d5b05fSDeng-Cheng Zhu
483d7d5b05fSDeng-Cheng Zhu return now;
484d7d5b05fSDeng-Cheng Zhu }
485d7d5b05fSDeng-Cheng Zhu
486d7d5b05fSDeng-Cheng Zhu /**
487d7d5b05fSDeng-Cheng Zhu * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
488d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
489d7d5b05fSDeng-Cheng Zhu * @now: ktime at point of resume.
490d7d5b05fSDeng-Cheng Zhu * @count: CP0_Count at point of resume.
491d7d5b05fSDeng-Cheng Zhu *
492d7d5b05fSDeng-Cheng Zhu * Resumes the timer and updates the timer expiry based on @now and @count.
493d7d5b05fSDeng-Cheng Zhu * This can be used in conjunction with kvm_mips_freeze_timer() when timer
494d7d5b05fSDeng-Cheng Zhu * parameters need to be changed.
495d7d5b05fSDeng-Cheng Zhu *
496d7d5b05fSDeng-Cheng Zhu * It is guaranteed that a timer interrupt immediately after resume will be
497d7d5b05fSDeng-Cheng Zhu * handled, but not if CP_Compare is exactly at @count. That case is already
498d7d5b05fSDeng-Cheng Zhu * handled by kvm_mips_freeze_timer().
499d7d5b05fSDeng-Cheng Zhu *
500d7d5b05fSDeng-Cheng Zhu * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
501d7d5b05fSDeng-Cheng Zhu */
kvm_mips_resume_hrtimer(struct kvm_vcpu * vcpu,ktime_t now,u32 count)502d7d5b05fSDeng-Cheng Zhu static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
503bdb7ed86SJames Hogan ktime_t now, u32 count)
504d7d5b05fSDeng-Cheng Zhu {
505*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
5068cffd197SJames Hogan u32 compare;
507d7d5b05fSDeng-Cheng Zhu u64 delta;
508d7d5b05fSDeng-Cheng Zhu ktime_t expire;
509d7d5b05fSDeng-Cheng Zhu
510d7d5b05fSDeng-Cheng Zhu /* Calculate timeout (wrap 0 to 2^32) */
511d7d5b05fSDeng-Cheng Zhu compare = kvm_read_c0_guest_compare(cop0);
5128cffd197SJames Hogan delta = (u64)(u32)(compare - count - 1) + 1;
513d7d5b05fSDeng-Cheng Zhu delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
514d7d5b05fSDeng-Cheng Zhu expire = ktime_add_ns(now, delta);
515d7d5b05fSDeng-Cheng Zhu
516d7d5b05fSDeng-Cheng Zhu /* Update hrtimer to use new timeout */
517d7d5b05fSDeng-Cheng Zhu hrtimer_cancel(&vcpu->arch.comparecount_timer);
518d7d5b05fSDeng-Cheng Zhu hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
519d7d5b05fSDeng-Cheng Zhu }
520d7d5b05fSDeng-Cheng Zhu
521d7d5b05fSDeng-Cheng Zhu /**
522f4474d50SJames Hogan * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
523f4474d50SJames Hogan * @vcpu: Virtual CPU.
524f4474d50SJames Hogan * @before: Time before Count was saved, lower bound of drift calculation.
525f4474d50SJames Hogan * @count: CP0_Count at point of restore.
526f4474d50SJames Hogan * @min_drift: Minimum amount of drift permitted before correction.
527f4474d50SJames Hogan * Must be <= 0.
528f4474d50SJames Hogan *
529f4474d50SJames Hogan * Restores the timer from a particular @count, accounting for drift. This can
530f4474d50SJames Hogan * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
531f4474d50SJames Hogan * to be used for a period of time, but the exact ktime corresponding to the
532f4474d50SJames Hogan * final Count that must be restored is not known.
533f4474d50SJames Hogan *
534f4474d50SJames Hogan * It is gauranteed that a timer interrupt immediately after restore will be
535f4474d50SJames Hogan * handled, but not if CP0_Compare is exactly at @count. That case should
536f4474d50SJames Hogan * already be handled when the hardware timer state is saved.
537f4474d50SJames Hogan *
538f4474d50SJames Hogan * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
539f4474d50SJames Hogan * stopped).
540f4474d50SJames Hogan *
541f4474d50SJames Hogan * Returns: Amount of correction to count_bias due to drift.
542f4474d50SJames Hogan */
kvm_mips_restore_hrtimer(struct kvm_vcpu * vcpu,ktime_t before,u32 count,int min_drift)543f4474d50SJames Hogan int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
544f4474d50SJames Hogan u32 count, int min_drift)
545f4474d50SJames Hogan {
546f4474d50SJames Hogan ktime_t now, count_time;
547f4474d50SJames Hogan u32 now_count, before_count;
548f4474d50SJames Hogan u64 delta;
549f4474d50SJames Hogan int drift, ret = 0;
550f4474d50SJames Hogan
551f4474d50SJames Hogan /* Calculate expected count at before */
552f4474d50SJames Hogan before_count = vcpu->arch.count_bias +
553f4474d50SJames Hogan kvm_mips_ktime_to_count(vcpu, before);
554f4474d50SJames Hogan
555f4474d50SJames Hogan /*
556f4474d50SJames Hogan * Detect significantly negative drift, where count is lower than
557f4474d50SJames Hogan * expected. Some negative drift is expected when hardware counter is
558f4474d50SJames Hogan * set after kvm_mips_freeze_timer(), and it is harmless to allow the
559f4474d50SJames Hogan * time to jump forwards a little, within reason. If the drift is too
560f4474d50SJames Hogan * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
561f4474d50SJames Hogan */
562f4474d50SJames Hogan drift = count - before_count;
563f4474d50SJames Hogan if (drift < min_drift) {
564f4474d50SJames Hogan count_time = before;
565f4474d50SJames Hogan vcpu->arch.count_bias += drift;
566f4474d50SJames Hogan ret = drift;
567f4474d50SJames Hogan goto resume;
568f4474d50SJames Hogan }
569f4474d50SJames Hogan
570f4474d50SJames Hogan /* Calculate expected count right now */
571f4474d50SJames Hogan now = ktime_get();
572f4474d50SJames Hogan now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
573f4474d50SJames Hogan
574f4474d50SJames Hogan /*
575f4474d50SJames Hogan * Detect positive drift, where count is higher than expected, and
576f4474d50SJames Hogan * adjust the bias to avoid guest time going backwards.
577f4474d50SJames Hogan */
578f4474d50SJames Hogan drift = count - now_count;
579f4474d50SJames Hogan if (drift > 0) {
580f4474d50SJames Hogan count_time = now;
581f4474d50SJames Hogan vcpu->arch.count_bias += drift;
582f4474d50SJames Hogan ret = drift;
583f4474d50SJames Hogan goto resume;
584f4474d50SJames Hogan }
585f4474d50SJames Hogan
586f4474d50SJames Hogan /* Subtract nanosecond delta to find ktime when count was read */
587f4474d50SJames Hogan delta = (u64)(u32)(now_count - count);
588f4474d50SJames Hogan delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
589f4474d50SJames Hogan count_time = ktime_sub_ns(now, delta);
590f4474d50SJames Hogan
591f4474d50SJames Hogan resume:
592f4474d50SJames Hogan /* Resume using the calculated ktime */
593f4474d50SJames Hogan kvm_mips_resume_hrtimer(vcpu, count_time, count);
594f4474d50SJames Hogan return ret;
595f4474d50SJames Hogan }
596f4474d50SJames Hogan
597f4474d50SJames Hogan /**
598d7d5b05fSDeng-Cheng Zhu * kvm_mips_write_count() - Modify the count and update timer.
599d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
600d7d5b05fSDeng-Cheng Zhu * @count: Guest CP0_Count value to set.
601d7d5b05fSDeng-Cheng Zhu *
602d7d5b05fSDeng-Cheng Zhu * Sets the CP0_Count value and updates the timer accordingly.
603d7d5b05fSDeng-Cheng Zhu */
kvm_mips_write_count(struct kvm_vcpu * vcpu,u32 count)604bdb7ed86SJames Hogan void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
605d7d5b05fSDeng-Cheng Zhu {
606*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
607d7d5b05fSDeng-Cheng Zhu ktime_t now;
608d7d5b05fSDeng-Cheng Zhu
609d7d5b05fSDeng-Cheng Zhu /* Calculate bias */
610d7d5b05fSDeng-Cheng Zhu now = kvm_mips_count_time(vcpu);
611d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
612d7d5b05fSDeng-Cheng Zhu
613d7d5b05fSDeng-Cheng Zhu if (kvm_mips_count_disabled(vcpu))
614d7d5b05fSDeng-Cheng Zhu /* The timer's disabled, adjust the static count */
615d7d5b05fSDeng-Cheng Zhu kvm_write_c0_guest_count(cop0, count);
616d7d5b05fSDeng-Cheng Zhu else
617d7d5b05fSDeng-Cheng Zhu /* Update timeout */
618d7d5b05fSDeng-Cheng Zhu kvm_mips_resume_hrtimer(vcpu, now, count);
619d7d5b05fSDeng-Cheng Zhu }
620d7d5b05fSDeng-Cheng Zhu
621d7d5b05fSDeng-Cheng Zhu /**
622d7d5b05fSDeng-Cheng Zhu * kvm_mips_init_count() - Initialise timer.
623d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
624a517c1adSJames Hogan * @count_hz: Frequency of timer.
625d7d5b05fSDeng-Cheng Zhu *
626a517c1adSJames Hogan * Initialise the timer to the specified frequency, zero it, and set it going if
627a517c1adSJames Hogan * it's enabled.
628d7d5b05fSDeng-Cheng Zhu */
kvm_mips_init_count(struct kvm_vcpu * vcpu,unsigned long count_hz)629a517c1adSJames Hogan void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
630d7d5b05fSDeng-Cheng Zhu {
631a517c1adSJames Hogan vcpu->arch.count_hz = count_hz;
632a517c1adSJames Hogan vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
633d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_dyn_bias = 0;
634d7d5b05fSDeng-Cheng Zhu
635d7d5b05fSDeng-Cheng Zhu /* Starting at 0 */
636d7d5b05fSDeng-Cheng Zhu kvm_mips_write_count(vcpu, 0);
637d7d5b05fSDeng-Cheng Zhu }
638d7d5b05fSDeng-Cheng Zhu
639d7d5b05fSDeng-Cheng Zhu /**
640d7d5b05fSDeng-Cheng Zhu * kvm_mips_set_count_hz() - Update the frequency of the timer.
641d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
642d7d5b05fSDeng-Cheng Zhu * @count_hz: Frequency of CP0_Count timer in Hz.
643d7d5b05fSDeng-Cheng Zhu *
644d7d5b05fSDeng-Cheng Zhu * Change the frequency of the CP0_Count timer. This is done atomically so that
645d7d5b05fSDeng-Cheng Zhu * CP0_Count is continuous and no timer interrupt is lost.
646d7d5b05fSDeng-Cheng Zhu *
647d7d5b05fSDeng-Cheng Zhu * Returns: -EINVAL if @count_hz is out of range.
648d7d5b05fSDeng-Cheng Zhu * 0 on success.
649d7d5b05fSDeng-Cheng Zhu */
kvm_mips_set_count_hz(struct kvm_vcpu * vcpu,s64 count_hz)650d7d5b05fSDeng-Cheng Zhu int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
651d7d5b05fSDeng-Cheng Zhu {
652*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
653d7d5b05fSDeng-Cheng Zhu int dc;
654d7d5b05fSDeng-Cheng Zhu ktime_t now;
655d7d5b05fSDeng-Cheng Zhu u32 count;
656d7d5b05fSDeng-Cheng Zhu
657d7d5b05fSDeng-Cheng Zhu /* ensure the frequency is in a sensible range... */
658d7d5b05fSDeng-Cheng Zhu if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
659d7d5b05fSDeng-Cheng Zhu return -EINVAL;
660d7d5b05fSDeng-Cheng Zhu /* ... and has actually changed */
661d7d5b05fSDeng-Cheng Zhu if (vcpu->arch.count_hz == count_hz)
662d7d5b05fSDeng-Cheng Zhu return 0;
663d7d5b05fSDeng-Cheng Zhu
664d7d5b05fSDeng-Cheng Zhu /* Safely freeze timer so we can keep it continuous */
665d7d5b05fSDeng-Cheng Zhu dc = kvm_mips_count_disabled(vcpu);
666d7d5b05fSDeng-Cheng Zhu if (dc) {
667d7d5b05fSDeng-Cheng Zhu now = kvm_mips_count_time(vcpu);
668d7d5b05fSDeng-Cheng Zhu count = kvm_read_c0_guest_count(cop0);
669d7d5b05fSDeng-Cheng Zhu } else {
670d7d5b05fSDeng-Cheng Zhu now = kvm_mips_freeze_hrtimer(vcpu, &count);
671d7d5b05fSDeng-Cheng Zhu }
672d7d5b05fSDeng-Cheng Zhu
673d7d5b05fSDeng-Cheng Zhu /* Update the frequency */
674d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_hz = count_hz;
675d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
676d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_dyn_bias = 0;
677d7d5b05fSDeng-Cheng Zhu
678d7d5b05fSDeng-Cheng Zhu /* Calculate adjusted bias so dynamic count is unchanged */
679d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
680d7d5b05fSDeng-Cheng Zhu
681d7d5b05fSDeng-Cheng Zhu /* Update and resume hrtimer */
682d7d5b05fSDeng-Cheng Zhu if (!dc)
683d7d5b05fSDeng-Cheng Zhu kvm_mips_resume_hrtimer(vcpu, now, count);
684d7d5b05fSDeng-Cheng Zhu return 0;
685d7d5b05fSDeng-Cheng Zhu }
686d7d5b05fSDeng-Cheng Zhu
687d7d5b05fSDeng-Cheng Zhu /**
688d7d5b05fSDeng-Cheng Zhu * kvm_mips_write_compare() - Modify compare and update timer.
689d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
690d7d5b05fSDeng-Cheng Zhu * @compare: New CP0_Compare value.
691b45bacd2SJames Hogan * @ack: Whether to acknowledge timer interrupt.
692d7d5b05fSDeng-Cheng Zhu *
693d7d5b05fSDeng-Cheng Zhu * Update CP0_Compare to a new value and update the timeout.
694b45bacd2SJames Hogan * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
695b45bacd2SJames Hogan * any pending timer interrupt is preserved.
696d7d5b05fSDeng-Cheng Zhu */
kvm_mips_write_compare(struct kvm_vcpu * vcpu,u32 compare,bool ack)697bdb7ed86SJames Hogan void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
698d7d5b05fSDeng-Cheng Zhu {
699*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
700b45bacd2SJames Hogan int dc;
701b45bacd2SJames Hogan u32 old_compare = kvm_read_c0_guest_compare(cop0);
7025dee99b2SJames Hogan s32 delta = compare - old_compare;
7035dee99b2SJames Hogan u32 cause;
7045dee99b2SJames Hogan ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
7058cffd197SJames Hogan u32 count;
706d7d5b05fSDeng-Cheng Zhu
707d7d5b05fSDeng-Cheng Zhu /* if unchanged, must just be an ack */
708b45bacd2SJames Hogan if (old_compare == compare) {
709b45bacd2SJames Hogan if (!ack)
710d7d5b05fSDeng-Cheng Zhu return;
711b45bacd2SJames Hogan kvm_mips_callbacks->dequeue_timer_int(vcpu);
712b45bacd2SJames Hogan kvm_write_c0_guest_compare(cop0, compare);
713b45bacd2SJames Hogan return;
714b45bacd2SJames Hogan }
715d7d5b05fSDeng-Cheng Zhu
7165dee99b2SJames Hogan /*
7175dee99b2SJames Hogan * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
7185dee99b2SJames Hogan * too to prevent guest CP0_Count hitting guest CP0_Compare.
7195dee99b2SJames Hogan *
7205dee99b2SJames Hogan * The new GTOffset corresponds to the new value of CP0_Compare, and is
7215dee99b2SJames Hogan * set prior to it being written into the guest context. We disable
7225dee99b2SJames Hogan * preemption until the new value is written to prevent restore of a
7235dee99b2SJames Hogan * GTOffset corresponding to the old CP0_Compare value.
7245dee99b2SJames Hogan */
72545c7e8afSThomas Bogendoerfer if (delta > 0) {
7265dee99b2SJames Hogan preempt_disable();
7275dee99b2SJames Hogan write_c0_gtoffset(compare - read_c0_count());
7285dee99b2SJames Hogan back_to_back_c0_hazard();
7295dee99b2SJames Hogan }
7305dee99b2SJames Hogan
731b45bacd2SJames Hogan /* freeze_hrtimer() takes care of timer interrupts <= count */
732b45bacd2SJames Hogan dc = kvm_mips_count_disabled(vcpu);
733b45bacd2SJames Hogan if (!dc)
734b45bacd2SJames Hogan now = kvm_mips_freeze_hrtimer(vcpu, &count);
735b45bacd2SJames Hogan
736b45bacd2SJames Hogan if (ack)
737b45bacd2SJames Hogan kvm_mips_callbacks->dequeue_timer_int(vcpu);
73845c7e8afSThomas Bogendoerfer else
7395dee99b2SJames Hogan /*
7405dee99b2SJames Hogan * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
7415dee99b2SJames Hogan * preserve guest CP0_Cause.TI if we don't want to ack it.
7425dee99b2SJames Hogan */
7435dee99b2SJames Hogan cause = kvm_read_c0_guest_cause(cop0);
744b45bacd2SJames Hogan
745d7d5b05fSDeng-Cheng Zhu kvm_write_c0_guest_compare(cop0, compare);
746d7d5b05fSDeng-Cheng Zhu
7475dee99b2SJames Hogan if (delta > 0)
7485dee99b2SJames Hogan preempt_enable();
7495dee99b2SJames Hogan
7505dee99b2SJames Hogan back_to_back_c0_hazard();
7515dee99b2SJames Hogan
7525dee99b2SJames Hogan if (!ack && cause & CAUSEF_TI)
7535dee99b2SJames Hogan kvm_write_c0_guest_cause(cop0, cause);
7545dee99b2SJames Hogan
755b45bacd2SJames Hogan /* resume_hrtimer() takes care of timer interrupts > count */
756b45bacd2SJames Hogan if (!dc)
757b45bacd2SJames Hogan kvm_mips_resume_hrtimer(vcpu, now, count);
7585dee99b2SJames Hogan
7595dee99b2SJames Hogan /*
7605dee99b2SJames Hogan * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
7615dee99b2SJames Hogan * until after the new CP0_Compare is written, otherwise new guest
7625dee99b2SJames Hogan * CP0_Count could hit new guest CP0_Compare.
7635dee99b2SJames Hogan */
76445c7e8afSThomas Bogendoerfer if (delta <= 0)
7655dee99b2SJames Hogan write_c0_gtoffset(compare - read_c0_count());
766d7d5b05fSDeng-Cheng Zhu }
767d7d5b05fSDeng-Cheng Zhu
768d7d5b05fSDeng-Cheng Zhu /**
769d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_disable() - Disable count.
770d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
771d7d5b05fSDeng-Cheng Zhu *
772d7d5b05fSDeng-Cheng Zhu * Disable the CP0_Count timer. A timer interrupt on or before the final stop
773d7d5b05fSDeng-Cheng Zhu * time will be handled but not after.
774d7d5b05fSDeng-Cheng Zhu *
775d7d5b05fSDeng-Cheng Zhu * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
776d7d5b05fSDeng-Cheng Zhu * count_ctl.DC has been set (count disabled).
777d7d5b05fSDeng-Cheng Zhu *
778d7d5b05fSDeng-Cheng Zhu * Returns: The time that the timer was stopped.
779d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_disable(struct kvm_vcpu * vcpu)780d7d5b05fSDeng-Cheng Zhu static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
781d7d5b05fSDeng-Cheng Zhu {
782*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
7838cffd197SJames Hogan u32 count;
784d7d5b05fSDeng-Cheng Zhu ktime_t now;
785d7d5b05fSDeng-Cheng Zhu
786d7d5b05fSDeng-Cheng Zhu /* Stop hrtimer */
787d7d5b05fSDeng-Cheng Zhu hrtimer_cancel(&vcpu->arch.comparecount_timer);
788d7d5b05fSDeng-Cheng Zhu
789d7d5b05fSDeng-Cheng Zhu /* Set the static count from the dynamic count, handling pending TI */
790d7d5b05fSDeng-Cheng Zhu now = ktime_get();
791d7d5b05fSDeng-Cheng Zhu count = kvm_mips_read_count_running(vcpu, now);
792d7d5b05fSDeng-Cheng Zhu kvm_write_c0_guest_count(cop0, count);
793d7d5b05fSDeng-Cheng Zhu
794d7d5b05fSDeng-Cheng Zhu return now;
795d7d5b05fSDeng-Cheng Zhu }
796d7d5b05fSDeng-Cheng Zhu
797d7d5b05fSDeng-Cheng Zhu /**
798d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
799d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
800d7d5b05fSDeng-Cheng Zhu *
801d7d5b05fSDeng-Cheng Zhu * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
802d7d5b05fSDeng-Cheng Zhu * before the final stop time will be handled if the timer isn't disabled by
803d7d5b05fSDeng-Cheng Zhu * count_ctl.DC, but not after.
804d7d5b05fSDeng-Cheng Zhu *
805d7d5b05fSDeng-Cheng Zhu * Assumes CP0_Cause.DC is clear (count enabled).
806d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_disable_cause(struct kvm_vcpu * vcpu)807d7d5b05fSDeng-Cheng Zhu void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
808d7d5b05fSDeng-Cheng Zhu {
809*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
810d7d5b05fSDeng-Cheng Zhu
811d7d5b05fSDeng-Cheng Zhu kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
812d7d5b05fSDeng-Cheng Zhu if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
813d7d5b05fSDeng-Cheng Zhu kvm_mips_count_disable(vcpu);
814d7d5b05fSDeng-Cheng Zhu }
815d7d5b05fSDeng-Cheng Zhu
816d7d5b05fSDeng-Cheng Zhu /**
817d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
818d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
819d7d5b05fSDeng-Cheng Zhu *
820d7d5b05fSDeng-Cheng Zhu * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
821d7d5b05fSDeng-Cheng Zhu * the start time will be handled if the timer isn't disabled by count_ctl.DC,
822d7d5b05fSDeng-Cheng Zhu * potentially before even returning, so the caller should be careful with
823d7d5b05fSDeng-Cheng Zhu * ordering of CP0_Cause modifications so as not to lose it.
824d7d5b05fSDeng-Cheng Zhu *
825d7d5b05fSDeng-Cheng Zhu * Assumes CP0_Cause.DC is set (count disabled).
826d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_enable_cause(struct kvm_vcpu * vcpu)827d7d5b05fSDeng-Cheng Zhu void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
828d7d5b05fSDeng-Cheng Zhu {
829*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
8308cffd197SJames Hogan u32 count;
831d7d5b05fSDeng-Cheng Zhu
832d7d5b05fSDeng-Cheng Zhu kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
833d7d5b05fSDeng-Cheng Zhu
834d7d5b05fSDeng-Cheng Zhu /*
835d7d5b05fSDeng-Cheng Zhu * Set the dynamic count to match the static count.
836d7d5b05fSDeng-Cheng Zhu * This starts the hrtimer if count_ctl.DC allows it.
837d7d5b05fSDeng-Cheng Zhu * Otherwise it conveniently updates the biases.
838d7d5b05fSDeng-Cheng Zhu */
839d7d5b05fSDeng-Cheng Zhu count = kvm_read_c0_guest_count(cop0);
840d7d5b05fSDeng-Cheng Zhu kvm_mips_write_count(vcpu, count);
841d7d5b05fSDeng-Cheng Zhu }
842d7d5b05fSDeng-Cheng Zhu
843d7d5b05fSDeng-Cheng Zhu /**
844d7d5b05fSDeng-Cheng Zhu * kvm_mips_set_count_ctl() - Update the count control KVM register.
845d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
846d7d5b05fSDeng-Cheng Zhu * @count_ctl: Count control register new value.
847d7d5b05fSDeng-Cheng Zhu *
848d7d5b05fSDeng-Cheng Zhu * Set the count control KVM register. The timer is updated accordingly.
849d7d5b05fSDeng-Cheng Zhu *
850d7d5b05fSDeng-Cheng Zhu * Returns: -EINVAL if reserved bits are set.
851d7d5b05fSDeng-Cheng Zhu * 0 on success.
852d7d5b05fSDeng-Cheng Zhu */
kvm_mips_set_count_ctl(struct kvm_vcpu * vcpu,s64 count_ctl)853d7d5b05fSDeng-Cheng Zhu int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
854d7d5b05fSDeng-Cheng Zhu {
855*e4de2057SHuacai Chen struct mips_coproc *cop0 = &vcpu->arch.cop0;
856d7d5b05fSDeng-Cheng Zhu s64 changed = count_ctl ^ vcpu->arch.count_ctl;
857d7d5b05fSDeng-Cheng Zhu s64 delta;
858d7d5b05fSDeng-Cheng Zhu ktime_t expire, now;
8598cffd197SJames Hogan u32 count, compare;
860d7d5b05fSDeng-Cheng Zhu
861d7d5b05fSDeng-Cheng Zhu /* Only allow defined bits to be changed */
862d7d5b05fSDeng-Cheng Zhu if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
863d7d5b05fSDeng-Cheng Zhu return -EINVAL;
864d7d5b05fSDeng-Cheng Zhu
865d7d5b05fSDeng-Cheng Zhu /* Apply new value */
866d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_ctl = count_ctl;
867d7d5b05fSDeng-Cheng Zhu
868d7d5b05fSDeng-Cheng Zhu /* Master CP0_Count disable */
869d7d5b05fSDeng-Cheng Zhu if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
870d7d5b05fSDeng-Cheng Zhu /* Is CP0_Cause.DC already disabling CP0_Count? */
871d7d5b05fSDeng-Cheng Zhu if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
872d7d5b05fSDeng-Cheng Zhu if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
873d7d5b05fSDeng-Cheng Zhu /* Just record the current time */
874d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_resume = ktime_get();
875d7d5b05fSDeng-Cheng Zhu } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
876d7d5b05fSDeng-Cheng Zhu /* disable timer and record current time */
877d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
878d7d5b05fSDeng-Cheng Zhu } else {
879d7d5b05fSDeng-Cheng Zhu /*
880d7d5b05fSDeng-Cheng Zhu * Calculate timeout relative to static count at resume
881d7d5b05fSDeng-Cheng Zhu * time (wrap 0 to 2^32).
882d7d5b05fSDeng-Cheng Zhu */
883d7d5b05fSDeng-Cheng Zhu count = kvm_read_c0_guest_count(cop0);
884d7d5b05fSDeng-Cheng Zhu compare = kvm_read_c0_guest_compare(cop0);
8858cffd197SJames Hogan delta = (u64)(u32)(compare - count - 1) + 1;
886d7d5b05fSDeng-Cheng Zhu delta = div_u64(delta * NSEC_PER_SEC,
887d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_hz);
888d7d5b05fSDeng-Cheng Zhu expire = ktime_add_ns(vcpu->arch.count_resume, delta);
889d7d5b05fSDeng-Cheng Zhu
890d7d5b05fSDeng-Cheng Zhu /* Handle pending interrupt */
891d7d5b05fSDeng-Cheng Zhu now = ktime_get();
892d7d5b05fSDeng-Cheng Zhu if (ktime_compare(now, expire) >= 0)
893d7d5b05fSDeng-Cheng Zhu /* Nothing should be waiting on the timeout */
894d7d5b05fSDeng-Cheng Zhu kvm_mips_callbacks->queue_timer_int(vcpu);
895d7d5b05fSDeng-Cheng Zhu
896d7d5b05fSDeng-Cheng Zhu /* Resume hrtimer without changing bias */
897d7d5b05fSDeng-Cheng Zhu count = kvm_mips_read_count_running(vcpu, now);
898d7d5b05fSDeng-Cheng Zhu kvm_mips_resume_hrtimer(vcpu, now, count);
899d7d5b05fSDeng-Cheng Zhu }
900d7d5b05fSDeng-Cheng Zhu }
901d7d5b05fSDeng-Cheng Zhu
902d7d5b05fSDeng-Cheng Zhu return 0;
903d7d5b05fSDeng-Cheng Zhu }
904d7d5b05fSDeng-Cheng Zhu
905d7d5b05fSDeng-Cheng Zhu /**
906d7d5b05fSDeng-Cheng Zhu * kvm_mips_set_count_resume() - Update the count resume KVM register.
907d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
908d7d5b05fSDeng-Cheng Zhu * @count_resume: Count resume register new value.
909d7d5b05fSDeng-Cheng Zhu *
910d7d5b05fSDeng-Cheng Zhu * Set the count resume KVM register.
911d7d5b05fSDeng-Cheng Zhu *
912d7d5b05fSDeng-Cheng Zhu * Returns: -EINVAL if out of valid range (0..now).
913d7d5b05fSDeng-Cheng Zhu * 0 on success.
914d7d5b05fSDeng-Cheng Zhu */
kvm_mips_set_count_resume(struct kvm_vcpu * vcpu,s64 count_resume)915d7d5b05fSDeng-Cheng Zhu int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
916d7d5b05fSDeng-Cheng Zhu {
917d7d5b05fSDeng-Cheng Zhu /*
918d7d5b05fSDeng-Cheng Zhu * It doesn't make sense for the resume time to be in the future, as it
919d7d5b05fSDeng-Cheng Zhu * would be possible for the next interrupt to be more than a full
920d7d5b05fSDeng-Cheng Zhu * period in the future.
921d7d5b05fSDeng-Cheng Zhu */
922d7d5b05fSDeng-Cheng Zhu if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
923d7d5b05fSDeng-Cheng Zhu return -EINVAL;
924d7d5b05fSDeng-Cheng Zhu
925d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_resume = ns_to_ktime(count_resume);
926d7d5b05fSDeng-Cheng Zhu return 0;
927d7d5b05fSDeng-Cheng Zhu }
928d7d5b05fSDeng-Cheng Zhu
929d7d5b05fSDeng-Cheng Zhu /**
930d7d5b05fSDeng-Cheng Zhu * kvm_mips_count_timeout() - Push timer forward on timeout.
931d7d5b05fSDeng-Cheng Zhu * @vcpu: Virtual CPU.
932d7d5b05fSDeng-Cheng Zhu *
933d7d5b05fSDeng-Cheng Zhu * Handle an hrtimer event by push the hrtimer forward a period.
934d7d5b05fSDeng-Cheng Zhu *
935d7d5b05fSDeng-Cheng Zhu * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
936d7d5b05fSDeng-Cheng Zhu */
kvm_mips_count_timeout(struct kvm_vcpu * vcpu)937d7d5b05fSDeng-Cheng Zhu enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
938d7d5b05fSDeng-Cheng Zhu {
939d7d5b05fSDeng-Cheng Zhu /* Add the Count period to the current expiry time */
940d7d5b05fSDeng-Cheng Zhu hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
941d7d5b05fSDeng-Cheng Zhu vcpu->arch.count_period);
942d7d5b05fSDeng-Cheng Zhu return HRTIMER_RESTART;
943d7d5b05fSDeng-Cheng Zhu }
944d7d5b05fSDeng-Cheng Zhu
kvm_mips_emul_wait(struct kvm_vcpu * vcpu)945d7d5b05fSDeng-Cheng Zhu enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
946d7d5b05fSDeng-Cheng Zhu {
947d7d5b05fSDeng-Cheng Zhu kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
948d7d5b05fSDeng-Cheng Zhu vcpu->arch.pending_exceptions);
949d7d5b05fSDeng-Cheng Zhu
950d7d5b05fSDeng-Cheng Zhu ++vcpu->stat.wait_exits;
9511e09e86aSJames Hogan trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
952d7d5b05fSDeng-Cheng Zhu if (!vcpu->arch.pending_exceptions) {
953f4474d50SJames Hogan kvm_vz_lose_htimer(vcpu);
954d7d5b05fSDeng-Cheng Zhu vcpu->arch.wait = 1;
95591b99ea7SSean Christopherson kvm_vcpu_halt(vcpu);
956d7d5b05fSDeng-Cheng Zhu
957d7d5b05fSDeng-Cheng Zhu /*
958599275c0SPaolo Bonzini * We are runnable, then definitely go off to user space to
959d7d5b05fSDeng-Cheng Zhu * check if any I/O interrupts are pending.
960d7d5b05fSDeng-Cheng Zhu */
961599275c0SPaolo Bonzini if (kvm_arch_vcpu_runnable(vcpu))
962d7d5b05fSDeng-Cheng Zhu vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
963d7d5b05fSDeng-Cheng Zhu }
964d7d5b05fSDeng-Cheng Zhu
965d7d5b05fSDeng-Cheng Zhu return EMULATE_DONE;
966d7d5b05fSDeng-Cheng Zhu }
967d7d5b05fSDeng-Cheng Zhu
kvm_mips_emulate_store(union mips_instruction inst,u32 cause,struct kvm_vcpu * vcpu)968258f3a2eSJames Hogan enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
969258f3a2eSJames Hogan u32 cause,
970d7d5b05fSDeng-Cheng Zhu struct kvm_vcpu *vcpu)
971d7d5b05fSDeng-Cheng Zhu {
972f21db309SHuacai Chen int r;
9738b48d5b7SJames Hogan enum emulation_result er;
974258f3a2eSJames Hogan u32 rt;
975c34b26b9STianjia Zhang struct kvm_run *run = vcpu->run;
976d7d5b05fSDeng-Cheng Zhu void *data = run->mmio.data;
977dc6d95b1SHuacai Chen unsigned int imme;
978d7d5b05fSDeng-Cheng Zhu unsigned long curr_pc;
979d7d5b05fSDeng-Cheng Zhu
980d7d5b05fSDeng-Cheng Zhu /*
981d7d5b05fSDeng-Cheng Zhu * Update PC and hold onto current PC in case there is
982d7d5b05fSDeng-Cheng Zhu * an error and we want to rollback the PC
983d7d5b05fSDeng-Cheng Zhu */
984d7d5b05fSDeng-Cheng Zhu curr_pc = vcpu->arch.pc;
985d7d5b05fSDeng-Cheng Zhu er = update_pc(vcpu, cause);
986d7d5b05fSDeng-Cheng Zhu if (er == EMULATE_FAIL)
987d7d5b05fSDeng-Cheng Zhu return er;
988d7d5b05fSDeng-Cheng Zhu
989258f3a2eSJames Hogan rt = inst.i_format.rt;
990d7d5b05fSDeng-Cheng Zhu
9918b48d5b7SJames Hogan run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
9928b48d5b7SJames Hogan vcpu->arch.host_cp0_badvaddr);
9938b48d5b7SJames Hogan if (run->mmio.phys_addr == KVM_INVALID_ADDR)
9948b48d5b7SJames Hogan goto out_fail;
9958b48d5b7SJames Hogan
996258f3a2eSJames Hogan switch (inst.i_format.opcode) {
99745c7e8afSThomas Bogendoerfer #if defined(CONFIG_64BIT)
99859d7814aSJames Hogan case sd_op:
99959d7814aSJames Hogan run->mmio.len = 8;
100059d7814aSJames Hogan *(u64 *)data = vcpu->arch.gprs[rt];
100159d7814aSJames Hogan
100259d7814aSJames Hogan kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
100359d7814aSJames Hogan vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
100459d7814aSJames Hogan vcpu->arch.gprs[rt], *(u64 *)data);
100559d7814aSJames Hogan break;
100659d7814aSJames Hogan #endif
100759d7814aSJames Hogan
1008d7d5b05fSDeng-Cheng Zhu case sw_op:
10098b48d5b7SJames Hogan run->mmio.len = 4;
10108cffd197SJames Hogan *(u32 *)data = vcpu->arch.gprs[rt];
1011d7d5b05fSDeng-Cheng Zhu
1012d7d5b05fSDeng-Cheng Zhu kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1013d7d5b05fSDeng-Cheng Zhu vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
10148cffd197SJames Hogan vcpu->arch.gprs[rt], *(u32 *)data);
1015d7d5b05fSDeng-Cheng Zhu break;
1016d7d5b05fSDeng-Cheng Zhu
1017d7d5b05fSDeng-Cheng Zhu case sh_op:
10188b48d5b7SJames Hogan run->mmio.len = 2;
10198cffd197SJames Hogan *(u16 *)data = vcpu->arch.gprs[rt];
1020d7d5b05fSDeng-Cheng Zhu
1021d7d5b05fSDeng-Cheng Zhu kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1022d7d5b05fSDeng-Cheng Zhu vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
10238b48d5b7SJames Hogan vcpu->arch.gprs[rt], *(u16 *)data);
10248b48d5b7SJames Hogan break;
10258b48d5b7SJames Hogan
10268b48d5b7SJames Hogan case sb_op:
10278b48d5b7SJames Hogan run->mmio.len = 1;
10288b48d5b7SJames Hogan *(u8 *)data = vcpu->arch.gprs[rt];
10298b48d5b7SJames Hogan
10308b48d5b7SJames Hogan kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
10318b48d5b7SJames Hogan vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
10328b48d5b7SJames Hogan vcpu->arch.gprs[rt], *(u8 *)data);
1033d7d5b05fSDeng-Cheng Zhu break;
1034d7d5b05fSDeng-Cheng Zhu
1035dc6d95b1SHuacai Chen case swl_op:
1036dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1037dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x3);
1038dc6d95b1SHuacai Chen run->mmio.len = 4;
1039dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1040dc6d95b1SHuacai Chen switch (imme) {
1041dc6d95b1SHuacai Chen case 0:
1042dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1043dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] >> 24);
1044dc6d95b1SHuacai Chen break;
1045dc6d95b1SHuacai Chen case 1:
1046dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1047dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] >> 16);
1048dc6d95b1SHuacai Chen break;
1049dc6d95b1SHuacai Chen case 2:
1050dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1051dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] >> 8);
1052dc6d95b1SHuacai Chen break;
1053dc6d95b1SHuacai Chen case 3:
1054dc6d95b1SHuacai Chen *(u32 *)data = vcpu->arch.gprs[rt];
1055dc6d95b1SHuacai Chen break;
1056dc6d95b1SHuacai Chen default:
1057dc6d95b1SHuacai Chen break;
1058dc6d95b1SHuacai Chen }
1059dc6d95b1SHuacai Chen
1060dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1061dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1062dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u32 *)data);
1063dc6d95b1SHuacai Chen break;
1064dc6d95b1SHuacai Chen
1065dc6d95b1SHuacai Chen case swr_op:
1066dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1067dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x3);
1068dc6d95b1SHuacai Chen run->mmio.len = 4;
1069dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1070dc6d95b1SHuacai Chen switch (imme) {
1071dc6d95b1SHuacai Chen case 0:
1072dc6d95b1SHuacai Chen *(u32 *)data = vcpu->arch.gprs[rt];
1073dc6d95b1SHuacai Chen break;
1074dc6d95b1SHuacai Chen case 1:
1075dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xff) |
1076dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 8);
1077dc6d95b1SHuacai Chen break;
1078dc6d95b1SHuacai Chen case 2:
1079dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xffff) |
1080dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 16);
1081dc6d95b1SHuacai Chen break;
1082dc6d95b1SHuacai Chen case 3:
1083dc6d95b1SHuacai Chen *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1084dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 24);
1085dc6d95b1SHuacai Chen break;
1086dc6d95b1SHuacai Chen default:
1087dc6d95b1SHuacai Chen break;
1088dc6d95b1SHuacai Chen }
1089dc6d95b1SHuacai Chen
1090dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1091dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1092dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u32 *)data);
1093dc6d95b1SHuacai Chen break;
1094dc6d95b1SHuacai Chen
109545c7e8afSThomas Bogendoerfer #if defined(CONFIG_64BIT)
1096dc6d95b1SHuacai Chen case sdl_op:
1097dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1098dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x7);
1099dc6d95b1SHuacai Chen
1100dc6d95b1SHuacai Chen run->mmio.len = 8;
1101dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1102dc6d95b1SHuacai Chen switch (imme) {
1103dc6d95b1SHuacai Chen case 0:
1104dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1105dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 56) & 0xff);
1106dc6d95b1SHuacai Chen break;
1107dc6d95b1SHuacai Chen case 1:
1108dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1109dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1110dc6d95b1SHuacai Chen break;
1111dc6d95b1SHuacai Chen case 2:
1112dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1113dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1114dc6d95b1SHuacai Chen break;
1115dc6d95b1SHuacai Chen case 3:
1116dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1117dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1118dc6d95b1SHuacai Chen break;
1119dc6d95b1SHuacai Chen case 4:
1120dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1121dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1122dc6d95b1SHuacai Chen break;
1123dc6d95b1SHuacai Chen case 5:
1124dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1125dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1126dc6d95b1SHuacai Chen break;
1127dc6d95b1SHuacai Chen case 6:
1128dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1129dc6d95b1SHuacai Chen ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1130dc6d95b1SHuacai Chen break;
1131dc6d95b1SHuacai Chen case 7:
1132dc6d95b1SHuacai Chen *(u64 *)data = vcpu->arch.gprs[rt];
1133dc6d95b1SHuacai Chen break;
1134dc6d95b1SHuacai Chen default:
1135dc6d95b1SHuacai Chen break;
1136dc6d95b1SHuacai Chen }
1137dc6d95b1SHuacai Chen
1138dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1139dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1140dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u64 *)data);
1141dc6d95b1SHuacai Chen break;
1142dc6d95b1SHuacai Chen
1143dc6d95b1SHuacai Chen case sdr_op:
1144dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1145dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x7);
1146dc6d95b1SHuacai Chen
1147dc6d95b1SHuacai Chen run->mmio.len = 8;
1148dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1149dc6d95b1SHuacai Chen switch (imme) {
1150dc6d95b1SHuacai Chen case 0:
1151dc6d95b1SHuacai Chen *(u64 *)data = vcpu->arch.gprs[rt];
1152dc6d95b1SHuacai Chen break;
1153dc6d95b1SHuacai Chen case 1:
1154dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xff) |
1155dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 8);
1156dc6d95b1SHuacai Chen break;
1157dc6d95b1SHuacai Chen case 2:
1158dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffff) |
1159dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 16);
1160dc6d95b1SHuacai Chen break;
1161dc6d95b1SHuacai Chen case 3:
1162dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1163dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 24);
1164dc6d95b1SHuacai Chen break;
1165dc6d95b1SHuacai Chen case 4:
1166dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1167dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 32);
1168dc6d95b1SHuacai Chen break;
1169dc6d95b1SHuacai Chen case 5:
1170dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1171dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 40);
1172dc6d95b1SHuacai Chen break;
1173dc6d95b1SHuacai Chen case 6:
1174dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1175dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 48);
1176dc6d95b1SHuacai Chen break;
1177dc6d95b1SHuacai Chen case 7:
1178dc6d95b1SHuacai Chen *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1179dc6d95b1SHuacai Chen (vcpu->arch.gprs[rt] << 56);
1180dc6d95b1SHuacai Chen break;
1181dc6d95b1SHuacai Chen default:
1182dc6d95b1SHuacai Chen break;
1183dc6d95b1SHuacai Chen }
1184dc6d95b1SHuacai Chen
1185dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1186dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1187dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u64 *)data);
1188dc6d95b1SHuacai Chen break;
11893d9fdc25SHuacai Chen #endif
1190dc6d95b1SHuacai Chen
1191dc6d95b1SHuacai Chen #ifdef CONFIG_CPU_LOONGSON64
1192dc6d95b1SHuacai Chen case sdc2_op:
1193dc6d95b1SHuacai Chen rt = inst.loongson3_lsdc2_format.rt;
1194dc6d95b1SHuacai Chen switch (inst.loongson3_lsdc2_format.opcode1) {
1195dc6d95b1SHuacai Chen /*
1196dc6d95b1SHuacai Chen * Loongson-3 overridden sdc2 instructions.
1197dc6d95b1SHuacai Chen * opcode1 instruction
1198dc6d95b1SHuacai Chen * 0x0 gssbx: store 1 bytes from GPR
1199dc6d95b1SHuacai Chen * 0x1 gsshx: store 2 bytes from GPR
1200dc6d95b1SHuacai Chen * 0x2 gsswx: store 4 bytes from GPR
1201dc6d95b1SHuacai Chen * 0x3 gssdx: store 8 bytes from GPR
1202dc6d95b1SHuacai Chen */
1203dc6d95b1SHuacai Chen case 0x0:
1204dc6d95b1SHuacai Chen run->mmio.len = 1;
1205dc6d95b1SHuacai Chen *(u8 *)data = vcpu->arch.gprs[rt];
1206dc6d95b1SHuacai Chen
1207dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1208dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1209dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u8 *)data);
1210dc6d95b1SHuacai Chen break;
1211dc6d95b1SHuacai Chen case 0x1:
1212dc6d95b1SHuacai Chen run->mmio.len = 2;
1213dc6d95b1SHuacai Chen *(u16 *)data = vcpu->arch.gprs[rt];
1214dc6d95b1SHuacai Chen
1215dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1216dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1217dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u16 *)data);
1218dc6d95b1SHuacai Chen break;
1219dc6d95b1SHuacai Chen case 0x2:
1220dc6d95b1SHuacai Chen run->mmio.len = 4;
1221dc6d95b1SHuacai Chen *(u32 *)data = vcpu->arch.gprs[rt];
1222dc6d95b1SHuacai Chen
1223dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1224dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1225dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u32 *)data);
1226dc6d95b1SHuacai Chen break;
1227dc6d95b1SHuacai Chen case 0x3:
1228dc6d95b1SHuacai Chen run->mmio.len = 8;
1229dc6d95b1SHuacai Chen *(u64 *)data = vcpu->arch.gprs[rt];
1230dc6d95b1SHuacai Chen
1231dc6d95b1SHuacai Chen kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1232dc6d95b1SHuacai Chen vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1233dc6d95b1SHuacai Chen vcpu->arch.gprs[rt], *(u64 *)data);
1234dc6d95b1SHuacai Chen break;
1235dc6d95b1SHuacai Chen default:
12360ed076c7SColin Ian King kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
1237dc6d95b1SHuacai Chen inst.word);
1238dc6d95b1SHuacai Chen break;
1239dc6d95b1SHuacai Chen }
1240dc6d95b1SHuacai Chen break;
1241dc6d95b1SHuacai Chen #endif
1242d7d5b05fSDeng-Cheng Zhu default:
1243d86c1ebeSJames Hogan kvm_err("Store not yet supported (inst=0x%08x)\n",
1244258f3a2eSJames Hogan inst.word);
12458b48d5b7SJames Hogan goto out_fail;
1246d7d5b05fSDeng-Cheng Zhu }
1247d7d5b05fSDeng-Cheng Zhu
12488b48d5b7SJames Hogan vcpu->mmio_needed = 1;
1249f21db309SHuacai Chen run->mmio.is_write = 1;
12508b48d5b7SJames Hogan vcpu->mmio_is_write = 1;
1251f21db309SHuacai Chen
1252f21db309SHuacai Chen r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1253f21db309SHuacai Chen run->mmio.phys_addr, run->mmio.len, data);
1254f21db309SHuacai Chen
1255f21db309SHuacai Chen if (!r) {
1256f21db309SHuacai Chen vcpu->mmio_needed = 0;
1257f21db309SHuacai Chen return EMULATE_DONE;
1258f21db309SHuacai Chen }
1259f21db309SHuacai Chen
12608b48d5b7SJames Hogan return EMULATE_DO_MMIO;
1261d7d5b05fSDeng-Cheng Zhu
12628b48d5b7SJames Hogan out_fail:
12638b48d5b7SJames Hogan /* Rollback PC if emulation was unsuccessful */
12648b48d5b7SJames Hogan vcpu->arch.pc = curr_pc;
12658b48d5b7SJames Hogan return EMULATE_FAIL;
1266d7d5b05fSDeng-Cheng Zhu }
1267d7d5b05fSDeng-Cheng Zhu
kvm_mips_emulate_load(union mips_instruction inst,u32 cause,struct kvm_vcpu * vcpu)1268258f3a2eSJames Hogan enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1269c34b26b9STianjia Zhang u32 cause, struct kvm_vcpu *vcpu)
1270d7d5b05fSDeng-Cheng Zhu {
1271c34b26b9STianjia Zhang struct kvm_run *run = vcpu->run;
1272f21db309SHuacai Chen int r;
12738b48d5b7SJames Hogan enum emulation_result er;
1274e1e575f6SJames Hogan unsigned long curr_pc;
1275258f3a2eSJames Hogan u32 op, rt;
1276dc6d95b1SHuacai Chen unsigned int imme;
1277d7d5b05fSDeng-Cheng Zhu
1278258f3a2eSJames Hogan rt = inst.i_format.rt;
1279258f3a2eSJames Hogan op = inst.i_format.opcode;
1280d7d5b05fSDeng-Cheng Zhu
1281e1e575f6SJames Hogan /*
1282e1e575f6SJames Hogan * Find the resume PC now while we have safe and easy access to the
1283e1e575f6SJames Hogan * prior branch instruction, and save it for
1284e1e575f6SJames Hogan * kvm_mips_complete_mmio_load() to restore later.
1285e1e575f6SJames Hogan */
1286e1e575f6SJames Hogan curr_pc = vcpu->arch.pc;
1287e1e575f6SJames Hogan er = update_pc(vcpu, cause);
1288e1e575f6SJames Hogan if (er == EMULATE_FAIL)
1289e1e575f6SJames Hogan return er;
1290e1e575f6SJames Hogan vcpu->arch.io_pc = vcpu->arch.pc;
1291e1e575f6SJames Hogan vcpu->arch.pc = curr_pc;
1292e1e575f6SJames Hogan
1293d7d5b05fSDeng-Cheng Zhu vcpu->arch.io_gpr = rt;
1294d7d5b05fSDeng-Cheng Zhu
12958b48d5b7SJames Hogan run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
12968b48d5b7SJames Hogan vcpu->arch.host_cp0_badvaddr);
12978b48d5b7SJames Hogan if (run->mmio.phys_addr == KVM_INVALID_ADDR)
12988b48d5b7SJames Hogan return EMULATE_FAIL;
12998b48d5b7SJames Hogan
13008b48d5b7SJames Hogan vcpu->mmio_needed = 2; /* signed */
1301d7d5b05fSDeng-Cheng Zhu switch (op) {
130245c7e8afSThomas Bogendoerfer #if defined(CONFIG_64BIT)
130359d7814aSJames Hogan case ld_op:
130459d7814aSJames Hogan run->mmio.len = 8;
130559d7814aSJames Hogan break;
130659d7814aSJames Hogan
130759d7814aSJames Hogan case lwu_op:
130859d7814aSJames Hogan vcpu->mmio_needed = 1; /* unsigned */
1309f40a4b05SJiaxun Yang fallthrough;
131059d7814aSJames Hogan #endif
1311d7d5b05fSDeng-Cheng Zhu case lw_op:
13128b48d5b7SJames Hogan run->mmio.len = 4;
1313d7d5b05fSDeng-Cheng Zhu break;
1314d7d5b05fSDeng-Cheng Zhu
1315d7d5b05fSDeng-Cheng Zhu case lhu_op:
13168b48d5b7SJames Hogan vcpu->mmio_needed = 1; /* unsigned */
1317c9b02990SLiangliang Huang fallthrough;
13188b48d5b7SJames Hogan case lh_op:
13198b48d5b7SJames Hogan run->mmio.len = 2;
1320d7d5b05fSDeng-Cheng Zhu break;
1321d7d5b05fSDeng-Cheng Zhu
1322d7d5b05fSDeng-Cheng Zhu case lbu_op:
13238b48d5b7SJames Hogan vcpu->mmio_needed = 1; /* unsigned */
1324c9b02990SLiangliang Huang fallthrough;
1325d7d5b05fSDeng-Cheng Zhu case lb_op:
13268b48d5b7SJames Hogan run->mmio.len = 1;
1327d7d5b05fSDeng-Cheng Zhu break;
1328d7d5b05fSDeng-Cheng Zhu
1329dc6d95b1SHuacai Chen case lwl_op:
1330dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1331dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x3);
1332dc6d95b1SHuacai Chen
1333dc6d95b1SHuacai Chen run->mmio.len = 4;
1334dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1335dc6d95b1SHuacai Chen switch (imme) {
1336dc6d95b1SHuacai Chen case 0:
1337dc6d95b1SHuacai Chen vcpu->mmio_needed = 3; /* 1 byte */
1338dc6d95b1SHuacai Chen break;
1339dc6d95b1SHuacai Chen case 1:
1340dc6d95b1SHuacai Chen vcpu->mmio_needed = 4; /* 2 bytes */
1341dc6d95b1SHuacai Chen break;
1342dc6d95b1SHuacai Chen case 2:
1343dc6d95b1SHuacai Chen vcpu->mmio_needed = 5; /* 3 bytes */
1344dc6d95b1SHuacai Chen break;
1345dc6d95b1SHuacai Chen case 3:
1346dc6d95b1SHuacai Chen vcpu->mmio_needed = 6; /* 4 bytes */
1347dc6d95b1SHuacai Chen break;
1348dc6d95b1SHuacai Chen default:
1349dc6d95b1SHuacai Chen break;
1350dc6d95b1SHuacai Chen }
1351dc6d95b1SHuacai Chen break;
1352dc6d95b1SHuacai Chen
1353dc6d95b1SHuacai Chen case lwr_op:
1354dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1355dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x3);
1356dc6d95b1SHuacai Chen
1357dc6d95b1SHuacai Chen run->mmio.len = 4;
1358dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1359dc6d95b1SHuacai Chen switch (imme) {
1360dc6d95b1SHuacai Chen case 0:
1361dc6d95b1SHuacai Chen vcpu->mmio_needed = 7; /* 4 bytes */
1362dc6d95b1SHuacai Chen break;
1363dc6d95b1SHuacai Chen case 1:
1364dc6d95b1SHuacai Chen vcpu->mmio_needed = 8; /* 3 bytes */
1365dc6d95b1SHuacai Chen break;
1366dc6d95b1SHuacai Chen case 2:
1367dc6d95b1SHuacai Chen vcpu->mmio_needed = 9; /* 2 bytes */
1368dc6d95b1SHuacai Chen break;
1369dc6d95b1SHuacai Chen case 3:
1370dc6d95b1SHuacai Chen vcpu->mmio_needed = 10; /* 1 byte */
1371dc6d95b1SHuacai Chen break;
1372dc6d95b1SHuacai Chen default:
1373dc6d95b1SHuacai Chen break;
1374dc6d95b1SHuacai Chen }
1375dc6d95b1SHuacai Chen break;
1376dc6d95b1SHuacai Chen
137745c7e8afSThomas Bogendoerfer #if defined(CONFIG_64BIT)
1378dc6d95b1SHuacai Chen case ldl_op:
1379dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1380dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x7);
1381dc6d95b1SHuacai Chen
1382dc6d95b1SHuacai Chen run->mmio.len = 8;
1383dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1384dc6d95b1SHuacai Chen switch (imme) {
1385dc6d95b1SHuacai Chen case 0:
1386dc6d95b1SHuacai Chen vcpu->mmio_needed = 11; /* 1 byte */
1387dc6d95b1SHuacai Chen break;
1388dc6d95b1SHuacai Chen case 1:
1389dc6d95b1SHuacai Chen vcpu->mmio_needed = 12; /* 2 bytes */
1390dc6d95b1SHuacai Chen break;
1391dc6d95b1SHuacai Chen case 2:
1392dc6d95b1SHuacai Chen vcpu->mmio_needed = 13; /* 3 bytes */
1393dc6d95b1SHuacai Chen break;
1394dc6d95b1SHuacai Chen case 3:
1395dc6d95b1SHuacai Chen vcpu->mmio_needed = 14; /* 4 bytes */
1396dc6d95b1SHuacai Chen break;
1397dc6d95b1SHuacai Chen case 4:
1398dc6d95b1SHuacai Chen vcpu->mmio_needed = 15; /* 5 bytes */
1399dc6d95b1SHuacai Chen break;
1400dc6d95b1SHuacai Chen case 5:
1401dc6d95b1SHuacai Chen vcpu->mmio_needed = 16; /* 6 bytes */
1402dc6d95b1SHuacai Chen break;
1403dc6d95b1SHuacai Chen case 6:
1404dc6d95b1SHuacai Chen vcpu->mmio_needed = 17; /* 7 bytes */
1405dc6d95b1SHuacai Chen break;
1406dc6d95b1SHuacai Chen case 7:
1407dc6d95b1SHuacai Chen vcpu->mmio_needed = 18; /* 8 bytes */
1408dc6d95b1SHuacai Chen break;
1409dc6d95b1SHuacai Chen default:
1410dc6d95b1SHuacai Chen break;
1411dc6d95b1SHuacai Chen }
1412dc6d95b1SHuacai Chen break;
1413dc6d95b1SHuacai Chen
1414dc6d95b1SHuacai Chen case ldr_op:
1415dc6d95b1SHuacai Chen run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1416dc6d95b1SHuacai Chen vcpu->arch.host_cp0_badvaddr) & (~0x7);
1417dc6d95b1SHuacai Chen
1418dc6d95b1SHuacai Chen run->mmio.len = 8;
1419dc6d95b1SHuacai Chen imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1420dc6d95b1SHuacai Chen switch (imme) {
1421dc6d95b1SHuacai Chen case 0:
1422dc6d95b1SHuacai Chen vcpu->mmio_needed = 19; /* 8 bytes */
1423dc6d95b1SHuacai Chen break;
1424dc6d95b1SHuacai Chen case 1:
1425dc6d95b1SHuacai Chen vcpu->mmio_needed = 20; /* 7 bytes */
1426dc6d95b1SHuacai Chen break;
1427dc6d95b1SHuacai Chen case 2:
1428dc6d95b1SHuacai Chen vcpu->mmio_needed = 21; /* 6 bytes */
1429dc6d95b1SHuacai Chen break;
1430dc6d95b1SHuacai Chen case 3:
1431dc6d95b1SHuacai Chen vcpu->mmio_needed = 22; /* 5 bytes */
1432dc6d95b1SHuacai Chen break;
1433dc6d95b1SHuacai Chen case 4:
1434dc6d95b1SHuacai Chen vcpu->mmio_needed = 23; /* 4 bytes */
1435dc6d95b1SHuacai Chen break;
1436dc6d95b1SHuacai Chen case 5:
1437dc6d95b1SHuacai Chen vcpu->mmio_needed = 24; /* 3 bytes */
1438dc6d95b1SHuacai Chen break;
1439dc6d95b1SHuacai Chen case 6:
1440dc6d95b1SHuacai Chen vcpu->mmio_needed = 25; /* 2 bytes */
1441dc6d95b1SHuacai Chen break;
1442dc6d95b1SHuacai Chen case 7:
1443dc6d95b1SHuacai Chen vcpu->mmio_needed = 26; /* 1 byte */
1444dc6d95b1SHuacai Chen break;
1445dc6d95b1SHuacai Chen default:
1446dc6d95b1SHuacai Chen break;
1447dc6d95b1SHuacai Chen }
1448dc6d95b1SHuacai Chen break;
14493d9fdc25SHuacai Chen #endif
1450dc6d95b1SHuacai Chen
1451dc6d95b1SHuacai Chen #ifdef CONFIG_CPU_LOONGSON64
1452dc6d95b1SHuacai Chen case ldc2_op:
1453dc6d95b1SHuacai Chen rt = inst.loongson3_lsdc2_format.rt;
1454dc6d95b1SHuacai Chen switch (inst.loongson3_lsdc2_format.opcode1) {
1455dc6d95b1SHuacai Chen /*
1456dc6d95b1SHuacai Chen * Loongson-3 overridden ldc2 instructions.
1457dc6d95b1SHuacai Chen * opcode1 instruction
1458dc6d95b1SHuacai Chen * 0x0 gslbx: store 1 bytes from GPR
1459dc6d95b1SHuacai Chen * 0x1 gslhx: store 2 bytes from GPR
1460dc6d95b1SHuacai Chen * 0x2 gslwx: store 4 bytes from GPR
1461dc6d95b1SHuacai Chen * 0x3 gsldx: store 8 bytes from GPR
1462dc6d95b1SHuacai Chen */
1463dc6d95b1SHuacai Chen case 0x0:
1464dc6d95b1SHuacai Chen run->mmio.len = 1;
1465dc6d95b1SHuacai Chen vcpu->mmio_needed = 27; /* signed */
1466dc6d95b1SHuacai Chen break;
1467dc6d95b1SHuacai Chen case 0x1:
1468dc6d95b1SHuacai Chen run->mmio.len = 2;
1469dc6d95b1SHuacai Chen vcpu->mmio_needed = 28; /* signed */
1470dc6d95b1SHuacai Chen break;
1471dc6d95b1SHuacai Chen case 0x2:
1472dc6d95b1SHuacai Chen run->mmio.len = 4;
1473dc6d95b1SHuacai Chen vcpu->mmio_needed = 29; /* signed */
1474dc6d95b1SHuacai Chen break;
1475dc6d95b1SHuacai Chen case 0x3:
1476dc6d95b1SHuacai Chen run->mmio.len = 8;
1477dc6d95b1SHuacai Chen vcpu->mmio_needed = 30; /* signed */
1478dc6d95b1SHuacai Chen break;
1479dc6d95b1SHuacai Chen default:
14800ed076c7SColin Ian King kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
1481dc6d95b1SHuacai Chen inst.word);
1482dc6d95b1SHuacai Chen break;
1483dc6d95b1SHuacai Chen }
1484dc6d95b1SHuacai Chen break;
1485dc6d95b1SHuacai Chen #endif
1486dc6d95b1SHuacai Chen
1487d7d5b05fSDeng-Cheng Zhu default:
1488d86c1ebeSJames Hogan kvm_err("Load not yet supported (inst=0x%08x)\n",
1489258f3a2eSJames Hogan inst.word);
14908b48d5b7SJames Hogan vcpu->mmio_needed = 0;
14918b48d5b7SJames Hogan return EMULATE_FAIL;
1492d7d5b05fSDeng-Cheng Zhu }
1493d7d5b05fSDeng-Cheng Zhu
14948b48d5b7SJames Hogan run->mmio.is_write = 0;
14958b48d5b7SJames Hogan vcpu->mmio_is_write = 0;
1496f21db309SHuacai Chen
1497f21db309SHuacai Chen r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1498f21db309SHuacai Chen run->mmio.phys_addr, run->mmio.len, run->mmio.data);
1499f21db309SHuacai Chen
1500f21db309SHuacai Chen if (!r) {
1501033555f6SHuacai Chen kvm_mips_complete_mmio_load(vcpu);
1502f21db309SHuacai Chen vcpu->mmio_needed = 0;
1503f21db309SHuacai Chen return EMULATE_DONE;
1504f21db309SHuacai Chen }
1505f21db309SHuacai Chen
15068b48d5b7SJames Hogan return EMULATE_DO_MMIO;
1507d7d5b05fSDeng-Cheng Zhu }
1508d7d5b05fSDeng-Cheng Zhu
kvm_mips_complete_mmio_load(struct kvm_vcpu * vcpu)1509c34b26b9STianjia Zhang enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
1510d7d5b05fSDeng-Cheng Zhu {
1511c34b26b9STianjia Zhang struct kvm_run *run = vcpu->run;
1512d7d5b05fSDeng-Cheng Zhu unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1513d7d5b05fSDeng-Cheng Zhu enum emulation_result er = EMULATE_DONE;
1514d7d5b05fSDeng-Cheng Zhu
1515d7d5b05fSDeng-Cheng Zhu if (run->mmio.len > sizeof(*gpr)) {
1516d7d5b05fSDeng-Cheng Zhu kvm_err("Bad MMIO length: %d", run->mmio.len);
1517d7d5b05fSDeng-Cheng Zhu er = EMULATE_FAIL;
1518d7d5b05fSDeng-Cheng Zhu goto done;
1519d7d5b05fSDeng-Cheng Zhu }
1520d7d5b05fSDeng-Cheng Zhu
1521e1e575f6SJames Hogan /* Restore saved resume PC */
1522e1e575f6SJames Hogan vcpu->arch.pc = vcpu->arch.io_pc;
1523d7d5b05fSDeng-Cheng Zhu
1524d7d5b05fSDeng-Cheng Zhu switch (run->mmio.len) {
152559d7814aSJames Hogan case 8:
1526dc6d95b1SHuacai Chen switch (vcpu->mmio_needed) {
1527dc6d95b1SHuacai Chen case 11:
1528dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1529dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xff) << 56);
1530dc6d95b1SHuacai Chen break;
1531dc6d95b1SHuacai Chen case 12:
1532dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1533dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffff) << 48);
1534dc6d95b1SHuacai Chen break;
1535dc6d95b1SHuacai Chen case 13:
1536dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1537dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
1538dc6d95b1SHuacai Chen break;
1539dc6d95b1SHuacai Chen case 14:
1540dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1541dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
1542dc6d95b1SHuacai Chen break;
1543dc6d95b1SHuacai Chen case 15:
1544dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1545dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
1546dc6d95b1SHuacai Chen break;
1547dc6d95b1SHuacai Chen case 16:
1548dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1549dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
1550dc6d95b1SHuacai Chen break;
1551dc6d95b1SHuacai Chen case 17:
1552dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1553dc6d95b1SHuacai Chen (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
1554dc6d95b1SHuacai Chen break;
1555dc6d95b1SHuacai Chen case 18:
1556dc6d95b1SHuacai Chen case 19:
155759d7814aSJames Hogan *gpr = *(s64 *)run->mmio.data;
155859d7814aSJames Hogan break;
1559dc6d95b1SHuacai Chen case 20:
1560dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1561dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
1562dc6d95b1SHuacai Chen break;
1563dc6d95b1SHuacai Chen case 21:
1564dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1565dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
1566dc6d95b1SHuacai Chen break;
1567dc6d95b1SHuacai Chen case 22:
1568dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1569dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
1570dc6d95b1SHuacai Chen break;
1571dc6d95b1SHuacai Chen case 23:
1572dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1573dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
1574dc6d95b1SHuacai Chen break;
1575dc6d95b1SHuacai Chen case 24:
1576dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1577dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
1578dc6d95b1SHuacai Chen break;
1579dc6d95b1SHuacai Chen case 25:
1580dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1581dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
1582dc6d95b1SHuacai Chen break;
1583dc6d95b1SHuacai Chen case 26:
1584dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1585dc6d95b1SHuacai Chen ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
1586dc6d95b1SHuacai Chen break;
1587dc6d95b1SHuacai Chen default:
1588dc6d95b1SHuacai Chen *gpr = *(s64 *)run->mmio.data;
1589dc6d95b1SHuacai Chen }
1590dc6d95b1SHuacai Chen break;
159159d7814aSJames Hogan
1592d7d5b05fSDeng-Cheng Zhu case 4:
1593dc6d95b1SHuacai Chen switch (vcpu->mmio_needed) {
1594dc6d95b1SHuacai Chen case 1:
159559d7814aSJames Hogan *gpr = *(u32 *)run->mmio.data;
1596d7d5b05fSDeng-Cheng Zhu break;
1597dc6d95b1SHuacai Chen case 2:
1598dc6d95b1SHuacai Chen *gpr = *(s32 *)run->mmio.data;
1599dc6d95b1SHuacai Chen break;
1600dc6d95b1SHuacai Chen case 3:
1601dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1602dc6d95b1SHuacai Chen (((*(s32 *)run->mmio.data) & 0xff) << 24);
1603dc6d95b1SHuacai Chen break;
1604dc6d95b1SHuacai Chen case 4:
1605dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1606dc6d95b1SHuacai Chen (((*(s32 *)run->mmio.data) & 0xffff) << 16);
1607dc6d95b1SHuacai Chen break;
1608dc6d95b1SHuacai Chen case 5:
1609dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1610dc6d95b1SHuacai Chen (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
1611dc6d95b1SHuacai Chen break;
1612dc6d95b1SHuacai Chen case 6:
1613dc6d95b1SHuacai Chen case 7:
1614dc6d95b1SHuacai Chen *gpr = *(s32 *)run->mmio.data;
1615dc6d95b1SHuacai Chen break;
1616dc6d95b1SHuacai Chen case 8:
1617dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1618dc6d95b1SHuacai Chen ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
1619dc6d95b1SHuacai Chen break;
1620dc6d95b1SHuacai Chen case 9:
1621dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1622dc6d95b1SHuacai Chen ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
1623dc6d95b1SHuacai Chen break;
1624dc6d95b1SHuacai Chen case 10:
1625dc6d95b1SHuacai Chen *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1626dc6d95b1SHuacai Chen ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
1627dc6d95b1SHuacai Chen break;
1628dc6d95b1SHuacai Chen default:
1629dc6d95b1SHuacai Chen *gpr = *(s32 *)run->mmio.data;
1630dc6d95b1SHuacai Chen }
1631dc6d95b1SHuacai Chen break;
1632d7d5b05fSDeng-Cheng Zhu
1633d7d5b05fSDeng-Cheng Zhu case 2:
1634dc6d95b1SHuacai Chen if (vcpu->mmio_needed == 1)
16358cffd197SJames Hogan *gpr = *(u16 *)run->mmio.data;
1636dc6d95b1SHuacai Chen else
1637dc6d95b1SHuacai Chen *gpr = *(s16 *)run->mmio.data;
1638d7d5b05fSDeng-Cheng Zhu
1639d7d5b05fSDeng-Cheng Zhu break;
1640d7d5b05fSDeng-Cheng Zhu case 1:
1641dc6d95b1SHuacai Chen if (vcpu->mmio_needed == 1)
1642d7d5b05fSDeng-Cheng Zhu *gpr = *(u8 *)run->mmio.data;
1643dc6d95b1SHuacai Chen else
1644dc6d95b1SHuacai Chen *gpr = *(s8 *)run->mmio.data;
1645d7d5b05fSDeng-Cheng Zhu break;
1646d7d5b05fSDeng-Cheng Zhu }
1647d7d5b05fSDeng-Cheng Zhu
1648d7d5b05fSDeng-Cheng Zhu done:
1649d7d5b05fSDeng-Cheng Zhu return er;
1650d7d5b05fSDeng-Cheng Zhu }
1651