1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include "timing.h"
35 #include "trace.h"
36 
37 /* XXX to do:
38  * lhax
39  * lhaux
40  * lswx
41  * lswi
42  * stswx
43  * stswi
44  * lha
45  * lhau
46  * lmw
47  * stmw
48  *
49  */
50 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
51 {
52 	struct kvm_run *run = vcpu->run;
53 	u32 inst;
54 	int ra, rs, rt;
55 	enum emulation_result emulated;
56 	int advance = 1;
57 
58 	/* this default type might be overwritten by subcategories */
59 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
60 
61 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
62 	if (emulated != EMULATE_DONE)
63 		return emulated;
64 
65 	ra = get_ra(inst);
66 	rs = get_rs(inst);
67 	rt = get_rt(inst);
68 
69 	switch (get_op(inst)) {
70 	case 31:
71 		switch (get_xop(inst)) {
72 		case OP_31_XOP_LWZX:
73 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
74 			break;
75 
76 		case OP_31_XOP_LBZX:
77 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
78 			break;
79 
80 		case OP_31_XOP_LBZUX:
81 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
82 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
83 			break;
84 
85 		case OP_31_XOP_STWX:
86 			emulated = kvmppc_handle_store(run, vcpu,
87 						       kvmppc_get_gpr(vcpu, rs),
88 			                               4, 1);
89 			break;
90 
91 		case OP_31_XOP_STBX:
92 			emulated = kvmppc_handle_store(run, vcpu,
93 						       kvmppc_get_gpr(vcpu, rs),
94 			                               1, 1);
95 			break;
96 
97 		case OP_31_XOP_STBUX:
98 			emulated = kvmppc_handle_store(run, vcpu,
99 						       kvmppc_get_gpr(vcpu, rs),
100 			                               1, 1);
101 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
102 			break;
103 
104 		case OP_31_XOP_LHAX:
105 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
106 			break;
107 
108 		case OP_31_XOP_LHZX:
109 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
110 			break;
111 
112 		case OP_31_XOP_LHZUX:
113 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
114 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
115 			break;
116 
117 		case OP_31_XOP_STHX:
118 			emulated = kvmppc_handle_store(run, vcpu,
119 						       kvmppc_get_gpr(vcpu, rs),
120 			                               2, 1);
121 			break;
122 
123 		case OP_31_XOP_STHUX:
124 			emulated = kvmppc_handle_store(run, vcpu,
125 						       kvmppc_get_gpr(vcpu, rs),
126 			                               2, 1);
127 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
128 			break;
129 
130 		case OP_31_XOP_DCBST:
131 		case OP_31_XOP_DCBF:
132 		case OP_31_XOP_DCBI:
133 			/* Do nothing. The guest is performing dcbi because
134 			 * hardware DMA is not snooped by the dcache, but
135 			 * emulated DMA either goes through the dcache as
136 			 * normal writes, or the host kernel has handled dcache
137 			 * coherence. */
138 			break;
139 
140 		case OP_31_XOP_LWBRX:
141 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
142 			break;
143 
144 		case OP_31_XOP_STWBRX:
145 			emulated = kvmppc_handle_store(run, vcpu,
146 						       kvmppc_get_gpr(vcpu, rs),
147 			                               4, 0);
148 			break;
149 
150 		case OP_31_XOP_LHBRX:
151 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
152 			break;
153 
154 		case OP_31_XOP_STHBRX:
155 			emulated = kvmppc_handle_store(run, vcpu,
156 						       kvmppc_get_gpr(vcpu, rs),
157 			                               2, 0);
158 			break;
159 
160 		default:
161 			emulated = EMULATE_FAIL;
162 			break;
163 		}
164 		break;
165 
166 	case OP_LWZ:
167 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
168 		break;
169 
170 	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
171 	case OP_LD:
172 		rt = get_rt(inst);
173 		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
174 		break;
175 
176 	case OP_LWZU:
177 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
178 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
179 		break;
180 
181 	case OP_LBZ:
182 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
183 		break;
184 
185 	case OP_LBZU:
186 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
187 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
188 		break;
189 
190 	case OP_STW:
191 		emulated = kvmppc_handle_store(run, vcpu,
192 					       kvmppc_get_gpr(vcpu, rs),
193 		                               4, 1);
194 		break;
195 
196 	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
197 	case OP_STD:
198 		rs = get_rs(inst);
199 		emulated = kvmppc_handle_store(run, vcpu,
200 					       kvmppc_get_gpr(vcpu, rs),
201 		                               8, 1);
202 		break;
203 
204 	case OP_STWU:
205 		emulated = kvmppc_handle_store(run, vcpu,
206 					       kvmppc_get_gpr(vcpu, rs),
207 		                               4, 1);
208 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
209 		break;
210 
211 	case OP_STB:
212 		emulated = kvmppc_handle_store(run, vcpu,
213 					       kvmppc_get_gpr(vcpu, rs),
214 		                               1, 1);
215 		break;
216 
217 	case OP_STBU:
218 		emulated = kvmppc_handle_store(run, vcpu,
219 					       kvmppc_get_gpr(vcpu, rs),
220 		                               1, 1);
221 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
222 		break;
223 
224 	case OP_LHZ:
225 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
226 		break;
227 
228 	case OP_LHZU:
229 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
230 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
231 		break;
232 
233 	case OP_LHA:
234 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
235 		break;
236 
237 	case OP_LHAU:
238 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
239 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
240 		break;
241 
242 	case OP_STH:
243 		emulated = kvmppc_handle_store(run, vcpu,
244 					       kvmppc_get_gpr(vcpu, rs),
245 		                               2, 1);
246 		break;
247 
248 	case OP_STHU:
249 		emulated = kvmppc_handle_store(run, vcpu,
250 					       kvmppc_get_gpr(vcpu, rs),
251 		                               2, 1);
252 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
253 		break;
254 
255 	default:
256 		emulated = EMULATE_FAIL;
257 		break;
258 	}
259 
260 	if (emulated == EMULATE_FAIL) {
261 		advance = 0;
262 		kvmppc_core_queue_program(vcpu, 0);
263 	}
264 
265 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
266 
267 	/* Advance past emulated instruction. */
268 	if (advance)
269 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
270 
271 	return emulated;
272 }
273