1 /*
2 * Helpers for emulation of CP0-related MIPS instructions.
3 *
4 * Copyright (C) 2004-2005 Jocelyn Mayer
5 * Copyright (C) 2020 Wave Computing, Inc.
6 * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
23 #include "qemu/osdep.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 #include "cpu.h"
27 #include "internal.h"
28 #include "qemu/host-utils.h"
29 #include "exec/helper-proto.h"
30 #include "exec/cputlb.h"
31
32
33 /* SMP helpers. */
mips_vpe_is_wfi(MIPSCPU * c)34 static bool mips_vpe_is_wfi(MIPSCPU *c)
35 {
36 CPUState *cpu = CPU(c);
37 CPUMIPSState *env = &c->env;
38
39 /*
40 * If the VPE is halted but otherwise active, it means it's waiting for
41 * an interrupt.\
42 */
43 return cpu->halted && mips_vpe_active(env);
44 }
45
mips_vp_is_wfi(MIPSCPU * c)46 static bool mips_vp_is_wfi(MIPSCPU *c)
47 {
48 CPUState *cpu = CPU(c);
49 CPUMIPSState *env = &c->env;
50
51 return cpu->halted && mips_vp_active(env);
52 }
53
mips_vpe_wake(MIPSCPU * c)54 static inline void mips_vpe_wake(MIPSCPU *c)
55 {
56 /*
57 * Don't set ->halted = 0 directly, let it be done via cpu_has_work
58 * because there might be other conditions that state that c should
59 * be sleeping.
60 */
61 bql_lock();
62 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
63 bql_unlock();
64 }
65
mips_vpe_sleep(MIPSCPU * cpu)66 static inline void mips_vpe_sleep(MIPSCPU *cpu)
67 {
68 CPUState *cs = CPU(cpu);
69
70 /*
71 * The VPE was shut off, really go to bed.
72 * Reset any old _WAKE requests.
73 */
74 cs->halted = 1;
75 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
76 }
77
mips_tc_wake(MIPSCPU * cpu,int tc)78 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
79 {
80 CPUMIPSState *c = &cpu->env;
81
82 /* FIXME: TC reschedule. */
83 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
84 mips_vpe_wake(cpu);
85 }
86 }
87
mips_tc_sleep(MIPSCPU * cpu,int tc)88 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
89 {
90 CPUMIPSState *c = &cpu->env;
91
92 /* FIXME: TC reschedule. */
93 if (!mips_vpe_active(c)) {
94 mips_vpe_sleep(cpu);
95 }
96 }
97
98 /**
99 * mips_cpu_map_tc:
100 * @env: CPU from which mapping is performed.
101 * @tc: Should point to an int with the value of the global TC index.
102 *
103 * This function will transform @tc into a local index within the
104 * returned #CPUMIPSState.
105 */
106
107 /*
108 * FIXME: This code assumes that all VPEs have the same number of TCs,
109 * which depends on runtime setup. Can probably be fixed by
110 * walking the list of CPUMIPSStates.
111 */
mips_cpu_map_tc(CPUMIPSState * env,int * tc)112 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
113 {
114 MIPSCPU *cpu;
115 CPUState *cs;
116 CPUState *other_cs;
117 int vpe_idx;
118 int tc_idx = *tc;
119
120 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
121 /* Not allowed to address other CPUs. */
122 *tc = env->current_tc;
123 return env;
124 }
125
126 cs = env_cpu(env);
127 vpe_idx = tc_idx / cs->nr_threads;
128 *tc = tc_idx % cs->nr_threads;
129 other_cs = qemu_get_cpu(vpe_idx);
130 if (other_cs == NULL) {
131 return env;
132 }
133 cpu = MIPS_CPU(other_cs);
134 return &cpu->env;
135 }
136
137 /*
138 * The per VPE CP0_Status register shares some fields with the per TC
139 * CP0_TCStatus registers. These fields are wired to the same registers,
140 * so changes to either of them should be reflected on both registers.
141 *
142 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
143 *
144 * These helper call synchronizes the regs for a given cpu.
145 */
146
147 /*
148 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
149 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
150 * int tc);
151 */
152
153 /* Called for updates to CP0_TCStatus. */
sync_c0_tcstatus(CPUMIPSState * cpu,int tc,target_ulong v)154 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
155 target_ulong v)
156 {
157 uint32_t status;
158 uint32_t tcu, tmx, tasid, tksu;
159 uint32_t mask = ((1U << CP0St_CU3)
160 | (1 << CP0St_CU2)
161 | (1 << CP0St_CU1)
162 | (1 << CP0St_CU0)
163 | (1 << CP0St_MX)
164 | (3 << CP0St_KSU));
165
166 tcu = (v >> CP0TCSt_TCU0) & 0xf;
167 tmx = (v >> CP0TCSt_TMX) & 0x1;
168 tasid = v & cpu->CP0_EntryHi_ASID_mask;
169 tksu = (v >> CP0TCSt_TKSU) & 0x3;
170
171 status = tcu << CP0St_CU0;
172 status |= tmx << CP0St_MX;
173 status |= tksu << CP0St_KSU;
174
175 cpu->CP0_Status &= ~mask;
176 cpu->CP0_Status |= status;
177
178 /* Sync the TASID with EntryHi. */
179 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
180 cpu->CP0_EntryHi |= tasid;
181
182 compute_hflags(cpu);
183 }
184
185 /* Called for updates to CP0_EntryHi. */
sync_c0_entryhi(CPUMIPSState * cpu,int tc)186 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
187 {
188 int32_t *tcst;
189 uint32_t asid, v = cpu->CP0_EntryHi;
190
191 asid = v & cpu->CP0_EntryHi_ASID_mask;
192
193 if (tc == cpu->current_tc) {
194 tcst = &cpu->active_tc.CP0_TCStatus;
195 } else {
196 tcst = &cpu->tcs[tc].CP0_TCStatus;
197 }
198
199 *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
200 *tcst |= asid;
201 }
202
203 /* XXX: do not use a global */
cpu_mips_get_random(CPUMIPSState * env)204 uint32_t cpu_mips_get_random(CPUMIPSState *env)
205 {
206 static uint32_t seed = 1;
207 static uint32_t prev_idx;
208 uint32_t idx;
209 uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
210
211 if (nb_rand_tlb == 1) {
212 return env->tlb->nb_tlb - 1;
213 }
214
215 /* Don't return same value twice, so get another value */
216 do {
217 /*
218 * Use a simple algorithm of Linear Congruential Generator
219 * from ISO/IEC 9899 standard.
220 */
221 seed = 1103515245 * seed + 12345;
222 idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
223 } while (idx == prev_idx);
224 prev_idx = idx;
225 return idx;
226 }
227
228 /* CP0 helpers */
helper_mfc0_mvpcontrol(CPUMIPSState * env)229 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
230 {
231 return env->mvp->CP0_MVPControl;
232 }
233
helper_mfc0_mvpconf0(CPUMIPSState * env)234 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
235 {
236 return env->mvp->CP0_MVPConf0;
237 }
238
helper_mfc0_mvpconf1(CPUMIPSState * env)239 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
240 {
241 return env->mvp->CP0_MVPConf1;
242 }
243
helper_mfc0_random(CPUMIPSState * env)244 target_ulong helper_mfc0_random(CPUMIPSState *env)
245 {
246 return (int32_t)cpu_mips_get_random(env);
247 }
248
helper_mfc0_tcstatus(CPUMIPSState * env)249 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
250 {
251 return env->active_tc.CP0_TCStatus;
252 }
253
helper_mftc0_tcstatus(CPUMIPSState * env)254 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
255 {
256 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
257 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
258
259 if (other_tc == other->current_tc) {
260 return other->active_tc.CP0_TCStatus;
261 } else {
262 return other->tcs[other_tc].CP0_TCStatus;
263 }
264 }
265
helper_mfc0_tcbind(CPUMIPSState * env)266 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
267 {
268 return env->active_tc.CP0_TCBind;
269 }
270
helper_mftc0_tcbind(CPUMIPSState * env)271 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
272 {
273 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
274 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
275
276 if (other_tc == other->current_tc) {
277 return other->active_tc.CP0_TCBind;
278 } else {
279 return other->tcs[other_tc].CP0_TCBind;
280 }
281 }
282
helper_mfc0_tcrestart(CPUMIPSState * env)283 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
284 {
285 return env->active_tc.PC;
286 }
287
helper_mftc0_tcrestart(CPUMIPSState * env)288 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
289 {
290 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
291 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
292
293 if (other_tc == other->current_tc) {
294 return other->active_tc.PC;
295 } else {
296 return other->tcs[other_tc].PC;
297 }
298 }
299
helper_mfc0_tchalt(CPUMIPSState * env)300 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
301 {
302 return env->active_tc.CP0_TCHalt;
303 }
304
helper_mftc0_tchalt(CPUMIPSState * env)305 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
306 {
307 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
308 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
309
310 if (other_tc == other->current_tc) {
311 return other->active_tc.CP0_TCHalt;
312 } else {
313 return other->tcs[other_tc].CP0_TCHalt;
314 }
315 }
316
helper_mfc0_tccontext(CPUMIPSState * env)317 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
318 {
319 return env->active_tc.CP0_TCContext;
320 }
321
helper_mftc0_tccontext(CPUMIPSState * env)322 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
323 {
324 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
325 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
326
327 if (other_tc == other->current_tc) {
328 return other->active_tc.CP0_TCContext;
329 } else {
330 return other->tcs[other_tc].CP0_TCContext;
331 }
332 }
333
helper_mfc0_tcschedule(CPUMIPSState * env)334 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
335 {
336 return env->active_tc.CP0_TCSchedule;
337 }
338
helper_mftc0_tcschedule(CPUMIPSState * env)339 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
340 {
341 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
342 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
343
344 if (other_tc == other->current_tc) {
345 return other->active_tc.CP0_TCSchedule;
346 } else {
347 return other->tcs[other_tc].CP0_TCSchedule;
348 }
349 }
350
helper_mfc0_tcschefback(CPUMIPSState * env)351 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
352 {
353 return env->active_tc.CP0_TCScheFBack;
354 }
355
helper_mftc0_tcschefback(CPUMIPSState * env)356 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
357 {
358 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
359 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
360
361 if (other_tc == other->current_tc) {
362 return other->active_tc.CP0_TCScheFBack;
363 } else {
364 return other->tcs[other_tc].CP0_TCScheFBack;
365 }
366 }
367
helper_mfc0_count(CPUMIPSState * env)368 target_ulong helper_mfc0_count(CPUMIPSState *env)
369 {
370 return (int32_t)cpu_mips_get_count(env);
371 }
372
helper_mftc0_entryhi(CPUMIPSState * env)373 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
374 {
375 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
376 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
377
378 return other->CP0_EntryHi;
379 }
380
helper_mftc0_cause(CPUMIPSState * env)381 target_ulong helper_mftc0_cause(CPUMIPSState *env)
382 {
383 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
384 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
385
386 return other->CP0_Cause;
387 }
388
helper_mftc0_status(CPUMIPSState * env)389 target_ulong helper_mftc0_status(CPUMIPSState *env)
390 {
391 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
392 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
393
394 return other->CP0_Status;
395 }
396
helper_mfc0_lladdr(CPUMIPSState * env)397 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
398 {
399 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
400 }
401
helper_mfc0_maar(CPUMIPSState * env)402 target_ulong helper_mfc0_maar(CPUMIPSState *env)
403 {
404 return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
405 }
406
helper_mfhc0_maar(CPUMIPSState * env)407 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
408 {
409 return env->CP0_MAAR[env->CP0_MAARI] >> 32;
410 }
411
helper_mfc0_watchlo(CPUMIPSState * env,uint32_t sel)412 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
413 {
414 return (int32_t)env->CP0_WatchLo[sel];
415 }
416
helper_mfc0_watchhi(CPUMIPSState * env,uint32_t sel)417 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
418 {
419 return (int32_t) env->CP0_WatchHi[sel];
420 }
421
helper_mfhc0_watchhi(CPUMIPSState * env,uint32_t sel)422 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
423 {
424 return env->CP0_WatchHi[sel] >> 32;
425 }
426
helper_mfc0_debug(CPUMIPSState * env)427 target_ulong helper_mfc0_debug(CPUMIPSState *env)
428 {
429 target_ulong t0 = env->CP0_Debug;
430 if (env->hflags & MIPS_HFLAG_DM) {
431 t0 |= 1 << CP0DB_DM;
432 }
433
434 return t0;
435 }
436
helper_mftc0_debug(CPUMIPSState * env)437 target_ulong helper_mftc0_debug(CPUMIPSState *env)
438 {
439 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
440 int32_t tcstatus;
441 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
442
443 if (other_tc == other->current_tc) {
444 tcstatus = other->active_tc.CP0_Debug_tcstatus;
445 } else {
446 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
447 }
448
449 /* XXX: Might be wrong, check with EJTAG spec. */
450 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
451 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
452 }
453
454 #if defined(TARGET_MIPS64)
helper_dmfc0_tcrestart(CPUMIPSState * env)455 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
456 {
457 return env->active_tc.PC;
458 }
459
helper_dmfc0_tchalt(CPUMIPSState * env)460 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
461 {
462 return env->active_tc.CP0_TCHalt;
463 }
464
helper_dmfc0_tccontext(CPUMIPSState * env)465 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
466 {
467 return env->active_tc.CP0_TCContext;
468 }
469
helper_dmfc0_tcschedule(CPUMIPSState * env)470 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
471 {
472 return env->active_tc.CP0_TCSchedule;
473 }
474
helper_dmfc0_tcschefback(CPUMIPSState * env)475 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
476 {
477 return env->active_tc.CP0_TCScheFBack;
478 }
479
helper_dmfc0_lladdr(CPUMIPSState * env)480 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
481 {
482 return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
483 }
484
helper_dmfc0_maar(CPUMIPSState * env)485 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
486 {
487 return env->CP0_MAAR[env->CP0_MAARI];
488 }
489
helper_dmfc0_watchlo(CPUMIPSState * env,uint32_t sel)490 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
491 {
492 return env->CP0_WatchLo[sel];
493 }
494
helper_dmfc0_watchhi(CPUMIPSState * env,uint32_t sel)495 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
496 {
497 return env->CP0_WatchHi[sel];
498 }
499
500 #endif /* TARGET_MIPS64 */
501
helper_mtc0_index(CPUMIPSState * env,target_ulong arg1)502 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
503 {
504 uint32_t index_p = env->CP0_Index & 0x80000000;
505 uint32_t tlb_index = arg1 & 0x7fffffff;
506 if (tlb_index < env->tlb->nb_tlb) {
507 if (env->insn_flags & ISA_MIPS_R6) {
508 index_p |= arg1 & 0x80000000;
509 }
510 env->CP0_Index = index_p | tlb_index;
511 }
512 }
513
helper_mtc0_mvpcontrol(CPUMIPSState * env,target_ulong arg1)514 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
515 {
516 uint32_t mask = 0;
517 uint32_t newval;
518
519 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
520 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
521 (1 << CP0MVPCo_EVP);
522 }
523 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
524 mask |= (1 << CP0MVPCo_STLB);
525 }
526 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
527
528 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
529
530 env->mvp->CP0_MVPControl = newval;
531 }
532
helper_mtc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)533 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
534 {
535 uint32_t mask;
536 uint32_t newval;
537
538 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
539 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
540 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
541
542 /*
543 * Yield scheduler intercept not implemented.
544 * Gating storage scheduler intercept not implemented.
545 */
546
547 /* TODO: Enable/disable TCs. */
548
549 env->CP0_VPEControl = newval;
550 }
551
helper_mttc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)552 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
553 {
554 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
555 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
556 uint32_t mask;
557 uint32_t newval;
558
559 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
560 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
561 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
562
563 /* TODO: Enable/disable TCs. */
564
565 other->CP0_VPEControl = newval;
566 }
567
helper_mftc0_vpecontrol(CPUMIPSState * env)568 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
569 {
570 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
571 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
572 /* FIXME: Mask away return zero on read bits. */
573 return other->CP0_VPEControl;
574 }
575
helper_mftc0_vpeconf0(CPUMIPSState * env)576 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
577 {
578 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
579 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
580
581 return other->CP0_VPEConf0;
582 }
583
helper_mtc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)584 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
585 {
586 uint32_t mask = 0;
587 uint32_t newval;
588
589 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
590 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
591 mask |= (0xff << CP0VPEC0_XTC);
592 }
593 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
594 }
595 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
596
597 /* TODO: TC exclusive handling due to ERL/EXL. */
598
599 env->CP0_VPEConf0 = newval;
600 }
601
helper_mttc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)602 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
603 {
604 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
605 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
606 uint32_t mask = 0;
607 uint32_t newval;
608
609 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
610 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
611
612 /* TODO: TC exclusive handling due to ERL/EXL. */
613 other->CP0_VPEConf0 = newval;
614 }
615
helper_mtc0_vpeconf1(CPUMIPSState * env,target_ulong arg1)616 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
617 {
618 uint32_t mask = 0;
619 uint32_t newval;
620
621 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
622 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
623 (0xff << CP0VPEC1_NCP1);
624 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
625
626 /* UDI not implemented. */
627 /* CP2 not implemented. */
628
629 /* TODO: Handle FPU (CP1) binding. */
630
631 env->CP0_VPEConf1 = newval;
632 }
633
helper_mtc0_yqmask(CPUMIPSState * env,target_ulong arg1)634 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
635 {
636 /* Yield qualifier inputs not implemented. */
637 env->CP0_YQMask = 0x00000000;
638 }
639
helper_mtc0_vpeopt(CPUMIPSState * env,target_ulong arg1)640 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
641 {
642 env->CP0_VPEOpt = arg1 & 0x0000ffff;
643 }
644
645 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
646
helper_mtc0_entrylo0(CPUMIPSState * env,target_ulong arg1)647 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
648 {
649 /* 1k pages not implemented */
650 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
651 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
652 | (rxi << (CP0EnLo_XI - 30));
653 }
654
655 #if defined(TARGET_MIPS64)
656 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
657
helper_dmtc0_entrylo0(CPUMIPSState * env,uint64_t arg1)658 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
659 {
660 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
661 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
662 }
663 #endif
664
helper_mtc0_tcstatus(CPUMIPSState * env,target_ulong arg1)665 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
666 {
667 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
668 uint32_t newval;
669
670 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
671
672 env->active_tc.CP0_TCStatus = newval;
673 sync_c0_tcstatus(env, env->current_tc, newval);
674 }
675
helper_mttc0_tcstatus(CPUMIPSState * env,target_ulong arg1)676 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
677 {
678 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
679 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
680
681 if (other_tc == other->current_tc) {
682 other->active_tc.CP0_TCStatus = arg1;
683 } else {
684 other->tcs[other_tc].CP0_TCStatus = arg1;
685 }
686 sync_c0_tcstatus(other, other_tc, arg1);
687 }
688
helper_mtc0_tcbind(CPUMIPSState * env,target_ulong arg1)689 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
690 {
691 uint32_t mask = (1 << CP0TCBd_TBE);
692 uint32_t newval;
693
694 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
695 mask |= (1 << CP0TCBd_CurVPE);
696 }
697 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
698 env->active_tc.CP0_TCBind = newval;
699 }
700
helper_mttc0_tcbind(CPUMIPSState * env,target_ulong arg1)701 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
702 {
703 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
704 uint32_t mask = (1 << CP0TCBd_TBE);
705 uint32_t newval;
706 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
707
708 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
709 mask |= (1 << CP0TCBd_CurVPE);
710 }
711 if (other_tc == other->current_tc) {
712 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
713 other->active_tc.CP0_TCBind = newval;
714 } else {
715 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
716 other->tcs[other_tc].CP0_TCBind = newval;
717 }
718 }
719
helper_mtc0_tcrestart(CPUMIPSState * env,target_ulong arg1)720 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
721 {
722 env->active_tc.PC = arg1;
723 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
724 env->CP0_LLAddr = 0;
725 env->lladdr = 0;
726 /* MIPS16 not implemented. */
727 }
728
helper_mttc0_tcrestart(CPUMIPSState * env,target_ulong arg1)729 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
730 {
731 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
732 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
733
734 if (other_tc == other->current_tc) {
735 other->active_tc.PC = arg1;
736 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
737 other->CP0_LLAddr = 0;
738 other->lladdr = 0;
739 /* MIPS16 not implemented. */
740 } else {
741 other->tcs[other_tc].PC = arg1;
742 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
743 other->CP0_LLAddr = 0;
744 other->lladdr = 0;
745 /* MIPS16 not implemented. */
746 }
747 }
748
helper_mtc0_tchalt(CPUMIPSState * env,target_ulong arg1)749 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
750 {
751 MIPSCPU *cpu = env_archcpu(env);
752
753 env->active_tc.CP0_TCHalt = arg1 & 0x1;
754
755 /* TODO: Halt TC / Restart (if allocated+active) TC. */
756 if (env->active_tc.CP0_TCHalt & 1) {
757 mips_tc_sleep(cpu, env->current_tc);
758 } else {
759 mips_tc_wake(cpu, env->current_tc);
760 }
761 }
762
helper_mttc0_tchalt(CPUMIPSState * env,target_ulong arg1)763 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
764 {
765 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
766 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
767 MIPSCPU *other_cpu = env_archcpu(other);
768
769 /* TODO: Halt TC / Restart (if allocated+active) TC. */
770
771 if (other_tc == other->current_tc) {
772 other->active_tc.CP0_TCHalt = arg1;
773 } else {
774 other->tcs[other_tc].CP0_TCHalt = arg1;
775 }
776
777 if (arg1 & 1) {
778 mips_tc_sleep(other_cpu, other_tc);
779 } else {
780 mips_tc_wake(other_cpu, other_tc);
781 }
782 }
783
helper_mtc0_tccontext(CPUMIPSState * env,target_ulong arg1)784 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
785 {
786 env->active_tc.CP0_TCContext = arg1;
787 }
788
helper_mttc0_tccontext(CPUMIPSState * env,target_ulong arg1)789 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
790 {
791 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
792 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
793
794 if (other_tc == other->current_tc) {
795 other->active_tc.CP0_TCContext = arg1;
796 } else {
797 other->tcs[other_tc].CP0_TCContext = arg1;
798 }
799 }
800
helper_mtc0_tcschedule(CPUMIPSState * env,target_ulong arg1)801 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
802 {
803 env->active_tc.CP0_TCSchedule = arg1;
804 }
805
helper_mttc0_tcschedule(CPUMIPSState * env,target_ulong arg1)806 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
807 {
808 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
809 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
810
811 if (other_tc == other->current_tc) {
812 other->active_tc.CP0_TCSchedule = arg1;
813 } else {
814 other->tcs[other_tc].CP0_TCSchedule = arg1;
815 }
816 }
817
helper_mtc0_tcschefback(CPUMIPSState * env,target_ulong arg1)818 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
819 {
820 env->active_tc.CP0_TCScheFBack = arg1;
821 }
822
helper_mttc0_tcschefback(CPUMIPSState * env,target_ulong arg1)823 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
824 {
825 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
826 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
827
828 if (other_tc == other->current_tc) {
829 other->active_tc.CP0_TCScheFBack = arg1;
830 } else {
831 other->tcs[other_tc].CP0_TCScheFBack = arg1;
832 }
833 }
834
helper_mtc0_entrylo1(CPUMIPSState * env,target_ulong arg1)835 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
836 {
837 /* 1k pages not implemented */
838 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
839 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
840 | (rxi << (CP0EnLo_XI - 30));
841 }
842
843 #if defined(TARGET_MIPS64)
helper_dmtc0_entrylo1(CPUMIPSState * env,uint64_t arg1)844 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
845 {
846 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
847 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
848 }
849 #endif
850
helper_mtc0_context(CPUMIPSState * env,target_ulong arg1)851 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
852 {
853 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
854 }
855
helper_mtc0_memorymapid(CPUMIPSState * env,target_ulong arg1)856 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
857 {
858 int32_t old;
859 old = env->CP0_MemoryMapID;
860 env->CP0_MemoryMapID = (int32_t) arg1;
861 /* If the MemoryMapID changes, flush qemu's TLB. */
862 if (old != env->CP0_MemoryMapID) {
863 cpu_mips_tlb_flush(env);
864 }
865 }
866
compute_pagemask(uint32_t val)867 uint32_t compute_pagemask(uint32_t val)
868 {
869 /* Don't care MASKX as we don't support 1KB page */
870 uint32_t mask = extract32(val, CP0PM_MASK, 16);
871 int maskbits = cto32(mask);
872
873 /* Ensure no more set bit after first zero, and maskbits even. */
874 if ((mask >> maskbits) == 0 && maskbits % 2 == 0) {
875 return mask << CP0PM_MASK;
876 } else {
877 /* When invalid, set to default target page size. */
878 return 0;
879 }
880 }
881
helper_mtc0_pagemask(CPUMIPSState * env,target_ulong arg1)882 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
883 {
884 env->CP0_PageMask = compute_pagemask(arg1);
885 }
886
helper_mtc0_pagegrain(CPUMIPSState * env,target_ulong arg1)887 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
888 {
889 /* SmartMIPS not implemented */
890 /* 1k pages not implemented */
891 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
892 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
893 compute_hflags(env);
894 restore_pamask(env);
895 }
896
helper_mtc0_segctl0(CPUMIPSState * env,target_ulong arg1)897 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
898 {
899 CPUState *cs = env_cpu(env);
900
901 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
902 tlb_flush(cs);
903 }
904
helper_mtc0_segctl1(CPUMIPSState * env,target_ulong arg1)905 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
906 {
907 CPUState *cs = env_cpu(env);
908
909 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
910 tlb_flush(cs);
911 }
912
helper_mtc0_segctl2(CPUMIPSState * env,target_ulong arg1)913 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
914 {
915 CPUState *cs = env_cpu(env);
916
917 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
918 tlb_flush(cs);
919 }
920
helper_mtc0_pwfield(CPUMIPSState * env,target_ulong arg1)921 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
922 {
923 #if defined(TARGET_MIPS64)
924 uint64_t mask = 0x3F3FFFFFFFULL;
925 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
926 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
927
928 if ((env->insn_flags & ISA_MIPS_R6)) {
929 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
930 mask &= ~(0x3FULL << CP0PF_BDI);
931 }
932 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
933 mask &= ~(0x3FULL << CP0PF_GDI);
934 }
935 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
936 mask &= ~(0x3FULL << CP0PF_UDI);
937 }
938 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
939 mask &= ~(0x3FULL << CP0PF_MDI);
940 }
941 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
942 mask &= ~(0x3FULL << CP0PF_PTI);
943 }
944 }
945 env->CP0_PWField = arg1 & mask;
946
947 if ((new_ptei >= 32) ||
948 ((env->insn_flags & ISA_MIPS_R6) &&
949 (new_ptei == 0 || new_ptei == 1))) {
950 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
951 (old_ptei << CP0PF_PTEI);
952 }
953 #else
954 uint32_t mask = 0x3FFFFFFF;
955 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
956 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
957
958 if ((env->insn_flags & ISA_MIPS_R6)) {
959 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
960 mask &= ~(0x3F << CP0PF_GDW);
961 }
962 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
963 mask &= ~(0x3F << CP0PF_UDW);
964 }
965 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
966 mask &= ~(0x3F << CP0PF_MDW);
967 }
968 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
969 mask &= ~(0x3F << CP0PF_PTW);
970 }
971 }
972 env->CP0_PWField = arg1 & mask;
973
974 if ((new_ptew >= 32) ||
975 ((env->insn_flags & ISA_MIPS_R6) &&
976 (new_ptew == 0 || new_ptew == 1))) {
977 env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
978 (old_ptew << CP0PF_PTEW);
979 }
980 #endif
981 }
982
helper_mtc0_pwsize(CPUMIPSState * env,target_ulong arg1)983 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
984 {
985 #if defined(TARGET_MIPS64)
986 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
987 #else
988 env->CP0_PWSize = arg1 & 0x3FFFFFFF;
989 #endif
990 }
991
helper_mtc0_wired(CPUMIPSState * env,target_ulong arg1)992 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
993 {
994 if (env->insn_flags & ISA_MIPS_R6) {
995 if (arg1 < env->tlb->nb_tlb) {
996 env->CP0_Wired = arg1;
997 }
998 } else {
999 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1000 }
1001 }
1002
helper_mtc0_pwctl(CPUMIPSState * env,target_ulong arg1)1003 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1004 {
1005 #if defined(TARGET_MIPS64)
1006 /* PWEn = 0. Hardware page table walking is not implemented. */
1007 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1008 #else
1009 env->CP0_PWCtl = (arg1 & 0x800000FF);
1010 #endif
1011 }
1012
helper_mtc0_srsconf0(CPUMIPSState * env,target_ulong arg1)1013 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1014 {
1015 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1016 }
1017
helper_mtc0_srsconf1(CPUMIPSState * env,target_ulong arg1)1018 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1019 {
1020 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1021 }
1022
helper_mtc0_srsconf2(CPUMIPSState * env,target_ulong arg1)1023 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1024 {
1025 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1026 }
1027
helper_mtc0_srsconf3(CPUMIPSState * env,target_ulong arg1)1028 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1029 {
1030 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1031 }
1032
helper_mtc0_srsconf4(CPUMIPSState * env,target_ulong arg1)1033 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1034 {
1035 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1036 }
1037
helper_mtc0_hwrena(CPUMIPSState * env,target_ulong arg1)1038 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1039 {
1040 uint32_t mask = 0x0000000F;
1041
1042 if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1043 (env->insn_flags & ISA_MIPS_R6)) {
1044 mask |= (1 << 4);
1045 }
1046 if (env->insn_flags & ISA_MIPS_R6) {
1047 mask |= (1 << 5);
1048 }
1049 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1050 mask |= (1 << 29);
1051
1052 if (arg1 & (1 << 29)) {
1053 env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1054 } else {
1055 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1056 }
1057 }
1058
1059 env->CP0_HWREna = arg1 & mask;
1060 }
1061
helper_mtc0_count(CPUMIPSState * env,target_ulong arg1)1062 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1063 {
1064 cpu_mips_store_count(env, arg1);
1065 }
1066
helper_mtc0_entryhi(CPUMIPSState * env,target_ulong arg1)1067 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1068 {
1069 target_ulong old, val, mask;
1070 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1071 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1072 mask |= 1 << CP0EnHi_EHINV;
1073 }
1074
1075 /* 1k pages not implemented */
1076 #if defined(TARGET_MIPS64)
1077 if (env->insn_flags & ISA_MIPS_R6) {
1078 int entryhi_r = extract64(arg1, 62, 2);
1079 int config0_at = extract32(env->CP0_Config0, 13, 2);
1080 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1081 if ((entryhi_r == 2) ||
1082 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1083 /* skip EntryHi.R field if new value is reserved */
1084 mask &= ~(0x3ull << 62);
1085 }
1086 }
1087 mask &= env->SEGMask;
1088 #endif
1089 old = env->CP0_EntryHi;
1090 val = (arg1 & mask) | (old & ~mask);
1091 env->CP0_EntryHi = val;
1092 if (ase_mt_available(env)) {
1093 sync_c0_entryhi(env, env->current_tc);
1094 }
1095 /* If the ASID changes, flush qemu's TLB. */
1096 if ((old & env->CP0_EntryHi_ASID_mask) !=
1097 (val & env->CP0_EntryHi_ASID_mask)) {
1098 tlb_flush(env_cpu(env));
1099 }
1100 }
1101
helper_mttc0_entryhi(CPUMIPSState * env,target_ulong arg1)1102 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1103 {
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1105 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1106
1107 other->CP0_EntryHi = arg1;
1108 sync_c0_entryhi(other, other_tc);
1109 }
1110
helper_mtc0_compare(CPUMIPSState * env,target_ulong arg1)1111 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1112 {
1113 cpu_mips_store_compare(env, arg1);
1114 }
1115
helper_mtc0_status(CPUMIPSState * env,target_ulong arg1)1116 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1117 {
1118 uint32_t val, old;
1119
1120 old = env->CP0_Status;
1121 cpu_mips_store_status(env, arg1);
1122 val = env->CP0_Status;
1123
1124 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1125 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1126 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1127 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1128 env->CP0_Cause);
1129 switch (mips_env_mmu_index(env)) {
1130 case 3:
1131 qemu_log(", ERL\n");
1132 break;
1133 case MIPS_HFLAG_UM:
1134 qemu_log(", UM\n");
1135 break;
1136 case MIPS_HFLAG_SM:
1137 qemu_log(", SM\n");
1138 break;
1139 case MIPS_HFLAG_KM:
1140 qemu_log("\n");
1141 break;
1142 default:
1143 cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1144 break;
1145 }
1146 }
1147 }
1148
helper_mttc0_status(CPUMIPSState * env,target_ulong arg1)1149 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1150 {
1151 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1152 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1153 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1154
1155 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1156 sync_c0_status(env, other, other_tc);
1157 }
1158
helper_mtc0_intctl(CPUMIPSState * env,target_ulong arg1)1159 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1160 {
1161 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1162 }
1163
helper_mtc0_srsctl(CPUMIPSState * env,target_ulong arg1)1164 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1165 {
1166 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1167 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1168 }
1169
helper_mtc0_cause(CPUMIPSState * env,target_ulong arg1)1170 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1171 {
1172 cpu_mips_store_cause(env, arg1);
1173 }
1174
helper_mttc0_cause(CPUMIPSState * env,target_ulong arg1)1175 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1176 {
1177 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1178 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1179
1180 cpu_mips_store_cause(other, arg1);
1181 }
1182
helper_mftc0_epc(CPUMIPSState * env)1183 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1184 {
1185 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1186 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1187
1188 return other->CP0_EPC;
1189 }
1190
helper_mftc0_ebase(CPUMIPSState * env)1191 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1192 {
1193 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1194 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1195
1196 return other->CP0_EBase;
1197 }
1198
helper_mtc0_ebase(CPUMIPSState * env,target_ulong arg1)1199 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1200 {
1201 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1202 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1203 mask |= ~0x3FFFFFFF;
1204 }
1205 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1206 }
1207
helper_mttc0_ebase(CPUMIPSState * env,target_ulong arg1)1208 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1209 {
1210 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1211 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1212 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1213 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1214 mask |= ~0x3FFFFFFF;
1215 }
1216 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1217 }
1218
helper_mftc0_configx(CPUMIPSState * env,target_ulong idx)1219 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1220 {
1221 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1222 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1223
1224 switch (idx) {
1225 case 0: return other->CP0_Config0;
1226 case 1: return other->CP0_Config1;
1227 case 2: return other->CP0_Config2;
1228 case 3: return other->CP0_Config3;
1229 /* 4 and 5 are reserved. */
1230 case 6: return other->CP0_Config6;
1231 case 7: return other->CP0_Config7;
1232 default:
1233 break;
1234 }
1235 return 0;
1236 }
1237
helper_mtc0_config0(CPUMIPSState * env,target_ulong arg1)1238 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1239 {
1240 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1241 }
1242
helper_mtc0_config2(CPUMIPSState * env,target_ulong arg1)1243 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1244 {
1245 /* tertiary/secondary caches not implemented */
1246 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1247 }
1248
helper_mtc0_config3(CPUMIPSState * env,target_ulong arg1)1249 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1250 {
1251 if (env->insn_flags & ASE_MICROMIPS) {
1252 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1253 (arg1 & (1 << CP0C3_ISA_ON_EXC));
1254 }
1255 }
1256
helper_mtc0_config4(CPUMIPSState * env,target_ulong arg1)1257 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1258 {
1259 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1260 (arg1 & env->CP0_Config4_rw_bitmask);
1261 }
1262
helper_mtc0_config5(CPUMIPSState * env,target_ulong arg1)1263 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1264 {
1265 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1266 (arg1 & env->CP0_Config5_rw_bitmask);
1267 env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
1268 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
1269 compute_hflags(env);
1270 }
1271
helper_mtc0_lladdr(CPUMIPSState * env,target_ulong arg1)1272 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1273 {
1274 target_long mask = env->CP0_LLAddr_rw_bitmask;
1275 arg1 = arg1 << env->CP0_LLAddr_shift;
1276 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1277 }
1278
1279 #define MTC0_MAAR_MASK(env) \
1280 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1281
helper_mtc0_maar(CPUMIPSState * env,target_ulong arg1)1282 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1283 {
1284 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1285 }
1286
helper_mthc0_maar(CPUMIPSState * env,target_ulong arg1)1287 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1288 {
1289 env->CP0_MAAR[env->CP0_MAARI] =
1290 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1291 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1292 }
1293
helper_mtc0_maari(CPUMIPSState * env,target_ulong arg1)1294 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1295 {
1296 int index = arg1 & 0x3f;
1297 if (index == 0x3f) {
1298 /*
1299 * Software may write all ones to INDEX to determine the
1300 * maximum value supported.
1301 */
1302 env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1303 } else if (index < MIPS_MAAR_MAX) {
1304 env->CP0_MAARI = index;
1305 }
1306 /*
1307 * Other than the all ones, if the value written is not supported,
1308 * then INDEX is unchanged from its previous value.
1309 */
1310 }
1311
helper_mtc0_watchlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1312 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1313 {
1314 /*
1315 * Watch exceptions for instructions, data loads, data stores
1316 * not implemented.
1317 */
1318 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1319 }
1320
helper_mtc0_watchhi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1321 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1322 {
1323 uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1324 uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
1325 if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
1326 mask |= 0xFFFFFFFF00000000ULL; /* MMID */
1327 }
1328 env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
1329 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1330 }
1331
helper_mthc0_watchhi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1332 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1333 {
1334 env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
1335 (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
1336 }
1337
helper_mtc0_xcontext(CPUMIPSState * env,target_ulong arg1)1338 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1339 {
1340 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1341 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1342 }
1343
helper_mtc0_framemask(CPUMIPSState * env,target_ulong arg1)1344 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1345 {
1346 env->CP0_Framemask = arg1; /* XXX */
1347 }
1348
helper_mtc0_debug(CPUMIPSState * env,target_ulong arg1)1349 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1350 {
1351 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1352 if (arg1 & (1 << CP0DB_DM)) {
1353 env->hflags |= MIPS_HFLAG_DM;
1354 } else {
1355 env->hflags &= ~MIPS_HFLAG_DM;
1356 }
1357 }
1358
helper_mttc0_debug(CPUMIPSState * env,target_ulong arg1)1359 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1360 {
1361 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1362 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1363 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1364
1365 /* XXX: Might be wrong, check with EJTAG spec. */
1366 if (other_tc == other->current_tc) {
1367 other->active_tc.CP0_Debug_tcstatus = val;
1368 } else {
1369 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1370 }
1371 other->CP0_Debug = (other->CP0_Debug &
1372 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1373 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1374 }
1375
helper_mtc0_performance0(CPUMIPSState * env,target_ulong arg1)1376 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1377 {
1378 env->CP0_Performance0 = arg1 & 0x000007ff;
1379 }
1380
helper_mtc0_errctl(CPUMIPSState * env,target_ulong arg1)1381 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1382 {
1383 int32_t wst = arg1 & (1 << CP0EC_WST);
1384 int32_t spr = arg1 & (1 << CP0EC_SPR);
1385 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1386
1387 env->CP0_ErrCtl = wst | spr | itc;
1388
1389 if (itc && !wst && !spr) {
1390 env->hflags |= MIPS_HFLAG_ITC_CACHE;
1391 } else {
1392 env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1393 }
1394 }
1395
helper_mtc0_taglo(CPUMIPSState * env,target_ulong arg1)1396 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1397 {
1398 if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1399 /*
1400 * If CACHE instruction is configured for ITC tags then make all
1401 * CP0.TagLo bits writable. The actual write to ITC Configuration
1402 * Tag will take care of the read-only bits.
1403 */
1404 env->CP0_TagLo = arg1;
1405 } else {
1406 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1407 }
1408 }
1409
helper_mtc0_datalo(CPUMIPSState * env,target_ulong arg1)1410 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1411 {
1412 env->CP0_DataLo = arg1; /* XXX */
1413 }
1414
helper_mtc0_taghi(CPUMIPSState * env,target_ulong arg1)1415 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1416 {
1417 env->CP0_TagHi = arg1; /* XXX */
1418 }
1419
helper_mtc0_datahi(CPUMIPSState * env,target_ulong arg1)1420 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1421 {
1422 env->CP0_DataHi = arg1; /* XXX */
1423 }
1424
1425 /* MIPS MT functions */
helper_mftgpr(CPUMIPSState * env,uint32_t sel)1426 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1427 {
1428 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1429 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1430
1431 if (other_tc == other->current_tc) {
1432 return other->active_tc.gpr[sel];
1433 } else {
1434 return other->tcs[other_tc].gpr[sel];
1435 }
1436 }
1437
helper_mftlo(CPUMIPSState * env,uint32_t sel)1438 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1439 {
1440 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1441 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1442
1443 if (other_tc == other->current_tc) {
1444 return other->active_tc.LO[sel];
1445 } else {
1446 return other->tcs[other_tc].LO[sel];
1447 }
1448 }
1449
helper_mfthi(CPUMIPSState * env,uint32_t sel)1450 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1451 {
1452 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1453 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1454
1455 if (other_tc == other->current_tc) {
1456 return other->active_tc.HI[sel];
1457 } else {
1458 return other->tcs[other_tc].HI[sel];
1459 }
1460 }
1461
helper_mftacx(CPUMIPSState * env,uint32_t sel)1462 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1463 {
1464 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1465 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1466
1467 if (other_tc == other->current_tc) {
1468 return other->active_tc.ACX[sel];
1469 } else {
1470 return other->tcs[other_tc].ACX[sel];
1471 }
1472 }
1473
helper_mftdsp(CPUMIPSState * env)1474 target_ulong helper_mftdsp(CPUMIPSState *env)
1475 {
1476 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1477 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1478
1479 if (other_tc == other->current_tc) {
1480 return other->active_tc.DSPControl;
1481 } else {
1482 return other->tcs[other_tc].DSPControl;
1483 }
1484 }
1485
helper_mttgpr(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1486 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1487 {
1488 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1489 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1490
1491 if (other_tc == other->current_tc) {
1492 other->active_tc.gpr[sel] = arg1;
1493 } else {
1494 other->tcs[other_tc].gpr[sel] = arg1;
1495 }
1496 }
1497
helper_mttlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1498 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1499 {
1500 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1501 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1502
1503 if (other_tc == other->current_tc) {
1504 other->active_tc.LO[sel] = arg1;
1505 } else {
1506 other->tcs[other_tc].LO[sel] = arg1;
1507 }
1508 }
1509
helper_mtthi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1510 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1511 {
1512 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1513 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1514
1515 if (other_tc == other->current_tc) {
1516 other->active_tc.HI[sel] = arg1;
1517 } else {
1518 other->tcs[other_tc].HI[sel] = arg1;
1519 }
1520 }
1521
helper_mttacx(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1522 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1523 {
1524 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1525 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1526
1527 if (other_tc == other->current_tc) {
1528 other->active_tc.ACX[sel] = arg1;
1529 } else {
1530 other->tcs[other_tc].ACX[sel] = arg1;
1531 }
1532 }
1533
helper_mttdsp(CPUMIPSState * env,target_ulong arg1)1534 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1535 {
1536 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1537 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1538
1539 if (other_tc == other->current_tc) {
1540 other->active_tc.DSPControl = arg1;
1541 } else {
1542 other->tcs[other_tc].DSPControl = arg1;
1543 }
1544 }
1545
1546 /* MIPS MT functions */
helper_dmt(void)1547 target_ulong helper_dmt(void)
1548 {
1549 /* TODO */
1550 return 0;
1551 }
1552
helper_emt(void)1553 target_ulong helper_emt(void)
1554 {
1555 /* TODO */
1556 return 0;
1557 }
1558
helper_dvpe(CPUMIPSState * env)1559 target_ulong helper_dvpe(CPUMIPSState *env)
1560 {
1561 CPUState *other_cs = first_cpu;
1562 target_ulong prev = env->mvp->CP0_MVPControl;
1563
1564 CPU_FOREACH(other_cs) {
1565 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1566 /* Turn off all VPEs except the one executing the dvpe. */
1567 if (&other_cpu->env != env) {
1568 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1569 mips_vpe_sleep(other_cpu);
1570 }
1571 }
1572 return prev;
1573 }
1574
helper_evpe(CPUMIPSState * env)1575 target_ulong helper_evpe(CPUMIPSState *env)
1576 {
1577 CPUState *other_cs = first_cpu;
1578 target_ulong prev = env->mvp->CP0_MVPControl;
1579
1580 CPU_FOREACH(other_cs) {
1581 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1582
1583 if (&other_cpu->env != env
1584 /* If the VPE is WFI, don't disturb its sleep. */
1585 && !mips_vpe_is_wfi(other_cpu)) {
1586 /* Enable the VPE. */
1587 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1588 mips_vpe_wake(other_cpu); /* And wake it up. */
1589 }
1590 }
1591 return prev;
1592 }
1593
1594 /* R6 Multi-threading */
helper_dvp(CPUMIPSState * env)1595 target_ulong helper_dvp(CPUMIPSState *env)
1596 {
1597 CPUState *other_cs = first_cpu;
1598 target_ulong prev = env->CP0_VPControl;
1599
1600 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
1601 CPU_FOREACH(other_cs) {
1602 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1603 /* Turn off all VPs except the one executing the dvp. */
1604 if (&other_cpu->env != env) {
1605 mips_vpe_sleep(other_cpu);
1606 }
1607 }
1608 env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
1609 }
1610 return prev;
1611 }
1612
helper_evp(CPUMIPSState * env)1613 target_ulong helper_evp(CPUMIPSState *env)
1614 {
1615 CPUState *other_cs = first_cpu;
1616 target_ulong prev = env->CP0_VPControl;
1617
1618 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
1619 CPU_FOREACH(other_cs) {
1620 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1621 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
1622 /*
1623 * If the VP is WFI, don't disturb its sleep.
1624 * Otherwise, wake it up.
1625 */
1626 mips_vpe_wake(other_cpu);
1627 }
1628 }
1629 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
1630 }
1631 return prev;
1632 }
1633