1 /*
2 * x86 condition code helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "helper-tcg.h"
24
25 #define SHIFT 0
26 #include "cc_helper_template.h.inc"
27 #undef SHIFT
28
29 #define SHIFT 1
30 #include "cc_helper_template.h.inc"
31 #undef SHIFT
32
33 #define SHIFT 2
34 #include "cc_helper_template.h.inc"
35 #undef SHIFT
36
37 #ifdef TARGET_X86_64
38
39 #define SHIFT 3
40 #include "cc_helper_template.h.inc"
41 #undef SHIFT
42
43 #endif
44
compute_all_adcx(target_ulong dst,target_ulong src1,target_ulong src2)45 static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
46 target_ulong src2)
47 {
48 return (src1 & ~CC_C) | (dst * CC_C);
49 }
50
compute_all_adox(target_ulong dst,target_ulong src1,target_ulong src2)51 static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
52 target_ulong src2)
53 {
54 return (src1 & ~CC_O) | (src2 * CC_O);
55 }
56
compute_all_adcox(target_ulong dst,target_ulong src1,target_ulong src2)57 static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
58 target_ulong src2)
59 {
60 return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
61 }
62
helper_cc_compute_nz(target_ulong dst,target_ulong src1,int op)63 target_ulong helper_cc_compute_nz(target_ulong dst, target_ulong src1,
64 int op)
65 {
66 if (CC_OP_HAS_EFLAGS(op)) {
67 return ~src1 & CC_Z;
68 } else {
69 MemOp size = cc_op_size(op);
70 target_ulong mask = MAKE_64BIT_MASK(0, 8 << size);
71
72 return dst & mask;
73 }
74 }
75
helper_cc_compute_all(target_ulong dst,target_ulong src1,target_ulong src2,int op)76 target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
77 target_ulong src2, int op)
78 {
79 switch (op) {
80 default: /* should never happen */
81 return 0;
82
83 case CC_OP_EFLAGS:
84 return src1;
85 case CC_OP_POPCNT:
86 return dst ? 0 : CC_Z;
87
88 case CC_OP_MULB:
89 return compute_all_mulb(dst, src1);
90 case CC_OP_MULW:
91 return compute_all_mulw(dst, src1);
92 case CC_OP_MULL:
93 return compute_all_mull(dst, src1);
94
95 case CC_OP_ADDB:
96 return compute_all_addb(dst, src1);
97 case CC_OP_ADDW:
98 return compute_all_addw(dst, src1);
99 case CC_OP_ADDL:
100 return compute_all_addl(dst, src1);
101
102 case CC_OP_ADCB:
103 return compute_all_adcb(dst, src1, src2);
104 case CC_OP_ADCW:
105 return compute_all_adcw(dst, src1, src2);
106 case CC_OP_ADCL:
107 return compute_all_adcl(dst, src1, src2);
108
109 case CC_OP_SUBB:
110 return compute_all_subb(dst, src1);
111 case CC_OP_SUBW:
112 return compute_all_subw(dst, src1);
113 case CC_OP_SUBL:
114 return compute_all_subl(dst, src1);
115
116 case CC_OP_SBBB:
117 return compute_all_sbbb(dst, src1, src2);
118 case CC_OP_SBBW:
119 return compute_all_sbbw(dst, src1, src2);
120 case CC_OP_SBBL:
121 return compute_all_sbbl(dst, src1, src2);
122
123 case CC_OP_LOGICB:
124 return compute_all_logicb(dst, src1);
125 case CC_OP_LOGICW:
126 return compute_all_logicw(dst, src1);
127 case CC_OP_LOGICL:
128 return compute_all_logicl(dst, src1);
129
130 case CC_OP_INCB:
131 return compute_all_incb(dst, src1);
132 case CC_OP_INCW:
133 return compute_all_incw(dst, src1);
134 case CC_OP_INCL:
135 return compute_all_incl(dst, src1);
136
137 case CC_OP_DECB:
138 return compute_all_decb(dst, src1);
139 case CC_OP_DECW:
140 return compute_all_decw(dst, src1);
141 case CC_OP_DECL:
142 return compute_all_decl(dst, src1);
143
144 case CC_OP_SHLB:
145 return compute_all_shlb(dst, src1);
146 case CC_OP_SHLW:
147 return compute_all_shlw(dst, src1);
148 case CC_OP_SHLL:
149 return compute_all_shll(dst, src1);
150
151 case CC_OP_SARB:
152 return compute_all_sarb(dst, src1);
153 case CC_OP_SARW:
154 return compute_all_sarw(dst, src1);
155 case CC_OP_SARL:
156 return compute_all_sarl(dst, src1);
157
158 case CC_OP_BMILGB:
159 return compute_all_bmilgb(dst, src1);
160 case CC_OP_BMILGW:
161 return compute_all_bmilgw(dst, src1);
162 case CC_OP_BMILGL:
163 return compute_all_bmilgl(dst, src1);
164
165 case CC_OP_BLSIB:
166 return compute_all_blsib(dst, src1);
167 case CC_OP_BLSIW:
168 return compute_all_blsiw(dst, src1);
169 case CC_OP_BLSIL:
170 return compute_all_blsil(dst, src1);
171
172 case CC_OP_ADCX:
173 return compute_all_adcx(dst, src1, src2);
174 case CC_OP_ADOX:
175 return compute_all_adox(dst, src1, src2);
176 case CC_OP_ADCOX:
177 return compute_all_adcox(dst, src1, src2);
178
179 #ifdef TARGET_X86_64
180 case CC_OP_MULQ:
181 return compute_all_mulq(dst, src1);
182 case CC_OP_ADDQ:
183 return compute_all_addq(dst, src1);
184 case CC_OP_ADCQ:
185 return compute_all_adcq(dst, src1, src2);
186 case CC_OP_SUBQ:
187 return compute_all_subq(dst, src1);
188 case CC_OP_SBBQ:
189 return compute_all_sbbq(dst, src1, src2);
190 case CC_OP_LOGICQ:
191 return compute_all_logicq(dst, src1);
192 case CC_OP_INCQ:
193 return compute_all_incq(dst, src1);
194 case CC_OP_DECQ:
195 return compute_all_decq(dst, src1);
196 case CC_OP_SHLQ:
197 return compute_all_shlq(dst, src1);
198 case CC_OP_SARQ:
199 return compute_all_sarq(dst, src1);
200 case CC_OP_BMILGQ:
201 return compute_all_bmilgq(dst, src1);
202 case CC_OP_BLSIQ:
203 return compute_all_blsiq(dst, src1);
204 #endif
205 }
206 }
207
cpu_cc_compute_all(CPUX86State * env)208 uint32_t cpu_cc_compute_all(CPUX86State *env)
209 {
210 return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, CC_OP);
211 }
212
helper_cc_compute_c(target_ulong dst,target_ulong src1,target_ulong src2,int op)213 target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
214 target_ulong src2, int op)
215 {
216 switch (op) {
217 default: /* should never happen */
218 case CC_OP_LOGICB:
219 case CC_OP_LOGICW:
220 case CC_OP_LOGICL:
221 case CC_OP_LOGICQ:
222 case CC_OP_POPCNT:
223 return 0;
224
225 case CC_OP_EFLAGS:
226 case CC_OP_SARB:
227 case CC_OP_SARW:
228 case CC_OP_SARL:
229 case CC_OP_SARQ:
230 case CC_OP_ADOX:
231 return src1 & 1;
232
233 case CC_OP_INCB:
234 case CC_OP_INCW:
235 case CC_OP_INCL:
236 case CC_OP_INCQ:
237 case CC_OP_DECB:
238 case CC_OP_DECW:
239 case CC_OP_DECL:
240 case CC_OP_DECQ:
241 return src1;
242
243 case CC_OP_MULB:
244 case CC_OP_MULW:
245 case CC_OP_MULL:
246 case CC_OP_MULQ:
247 return src1 != 0;
248
249 case CC_OP_ADCX:
250 case CC_OP_ADCOX:
251 return dst;
252
253 case CC_OP_ADDB:
254 return compute_c_addb(dst, src1);
255 case CC_OP_ADDW:
256 return compute_c_addw(dst, src1);
257 case CC_OP_ADDL:
258 return compute_c_addl(dst, src1);
259
260 case CC_OP_ADCB:
261 return compute_c_adcb(dst, src1, src2);
262 case CC_OP_ADCW:
263 return compute_c_adcw(dst, src1, src2);
264 case CC_OP_ADCL:
265 return compute_c_adcl(dst, src1, src2);
266
267 case CC_OP_SUBB:
268 return compute_c_subb(dst, src1);
269 case CC_OP_SUBW:
270 return compute_c_subw(dst, src1);
271 case CC_OP_SUBL:
272 return compute_c_subl(dst, src1);
273
274 case CC_OP_SBBB:
275 return compute_c_sbbb(dst, src1, src2);
276 case CC_OP_SBBW:
277 return compute_c_sbbw(dst, src1, src2);
278 case CC_OP_SBBL:
279 return compute_c_sbbl(dst, src1, src2);
280
281 case CC_OP_SHLB:
282 return compute_c_shlb(dst, src1);
283 case CC_OP_SHLW:
284 return compute_c_shlw(dst, src1);
285 case CC_OP_SHLL:
286 return compute_c_shll(dst, src1);
287
288 case CC_OP_BMILGB:
289 return compute_c_bmilgb(dst, src1);
290 case CC_OP_BMILGW:
291 return compute_c_bmilgw(dst, src1);
292 case CC_OP_BMILGL:
293 return compute_c_bmilgl(dst, src1);
294
295 case CC_OP_BLSIB:
296 return compute_c_blsib(dst, src1);
297 case CC_OP_BLSIW:
298 return compute_c_blsiw(dst, src1);
299 case CC_OP_BLSIL:
300 return compute_c_blsil(dst, src1);
301
302 #ifdef TARGET_X86_64
303 case CC_OP_ADDQ:
304 return compute_c_addq(dst, src1);
305 case CC_OP_ADCQ:
306 return compute_c_adcq(dst, src1, src2);
307 case CC_OP_SUBQ:
308 return compute_c_subq(dst, src1);
309 case CC_OP_SBBQ:
310 return compute_c_sbbq(dst, src1, src2);
311 case CC_OP_SHLQ:
312 return compute_c_shlq(dst, src1);
313 case CC_OP_BMILGQ:
314 return compute_c_bmilgq(dst, src1);
315 case CC_OP_BLSIQ:
316 return compute_c_blsiq(dst, src1);
317 #endif
318 }
319 }
320
helper_write_eflags(CPUX86State * env,target_ulong t0,uint32_t update_mask)321 void helper_write_eflags(CPUX86State *env, target_ulong t0,
322 uint32_t update_mask)
323 {
324 cpu_load_eflags(env, t0, update_mask);
325 }
326
helper_read_eflags(CPUX86State * env)327 target_ulong helper_read_eflags(CPUX86State *env)
328 {
329 uint32_t eflags;
330
331 eflags = cpu_cc_compute_all(env);
332 eflags |= (env->df & DF_MASK);
333 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
334 return eflags;
335 }
336
helper_clts(CPUX86State * env)337 void helper_clts(CPUX86State *env)
338 {
339 env->cr[0] &= ~CR0_TS_MASK;
340 env->hflags &= ~HF_TS_MASK;
341 }
342