xref: /openbmc/qemu/target/mips/tcg/sysemu/cp0_helper.c (revision 806f71ee)
1 /*
2  *  Helpers for emulation of CP0-related MIPS instructions.
3  *
4  *  Copyright (C) 2004-2005  Jocelyn Mayer
5  *  Copyright (C) 2020  Wave Computing, Inc.
6  *  Copyright (C) 2020  Aleksandar Markovic <amarkovic@wavecomp.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 #include "cpu.h"
27 #include "internal.h"
28 #include "qemu/host-utils.h"
29 #include "exec/helper-proto.h"
30 #include "exec/exec-all.h"
31 #include "hw/misc/mips_itu.h"
32 
33 
34 /* SMP helpers.  */
35 static bool mips_vpe_is_wfi(MIPSCPU *c)
36 {
37     CPUState *cpu = CPU(c);
38     CPUMIPSState *env = &c->env;
39 
40     /*
41      * If the VPE is halted but otherwise active, it means it's waiting for
42      * an interrupt.\
43      */
44     return cpu->halted && mips_vpe_active(env);
45 }
46 
47 static bool mips_vp_is_wfi(MIPSCPU *c)
48 {
49     CPUState *cpu = CPU(c);
50     CPUMIPSState *env = &c->env;
51 
52     return cpu->halted && mips_vp_active(env);
53 }
54 
55 static inline void mips_vpe_wake(MIPSCPU *c)
56 {
57     /*
58      * Don't set ->halted = 0 directly, let it be done via cpu_has_work
59      * because there might be other conditions that state that c should
60      * be sleeping.
61      */
62     qemu_mutex_lock_iothread();
63     cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
64     qemu_mutex_unlock_iothread();
65 }
66 
67 static inline void mips_vpe_sleep(MIPSCPU *cpu)
68 {
69     CPUState *cs = CPU(cpu);
70 
71     /*
72      * The VPE was shut off, really go to bed.
73      * Reset any old _WAKE requests.
74      */
75     cs->halted = 1;
76     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
77 }
78 
79 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
80 {
81     CPUMIPSState *c = &cpu->env;
82 
83     /* FIXME: TC reschedule.  */
84     if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
85         mips_vpe_wake(cpu);
86     }
87 }
88 
89 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
90 {
91     CPUMIPSState *c = &cpu->env;
92 
93     /* FIXME: TC reschedule.  */
94     if (!mips_vpe_active(c)) {
95         mips_vpe_sleep(cpu);
96     }
97 }
98 
99 /**
100  * mips_cpu_map_tc:
101  * @env: CPU from which mapping is performed.
102  * @tc: Should point to an int with the value of the global TC index.
103  *
104  * This function will transform @tc into a local index within the
105  * returned #CPUMIPSState.
106  */
107 
108 /*
109  * FIXME: This code assumes that all VPEs have the same number of TCs,
110  *        which depends on runtime setup. Can probably be fixed by
111  *        walking the list of CPUMIPSStates.
112  */
113 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
114 {
115     MIPSCPU *cpu;
116     CPUState *cs;
117     CPUState *other_cs;
118     int vpe_idx;
119     int tc_idx = *tc;
120 
121     if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
122         /* Not allowed to address other CPUs.  */
123         *tc = env->current_tc;
124         return env;
125     }
126 
127     cs = env_cpu(env);
128     vpe_idx = tc_idx / cs->nr_threads;
129     *tc = tc_idx % cs->nr_threads;
130     other_cs = qemu_get_cpu(vpe_idx);
131     if (other_cs == NULL) {
132         return env;
133     }
134     cpu = MIPS_CPU(other_cs);
135     return &cpu->env;
136 }
137 
138 /*
139  * The per VPE CP0_Status register shares some fields with the per TC
140  * CP0_TCStatus registers. These fields are wired to the same registers,
141  * so changes to either of them should be reflected on both registers.
142  *
143  * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
144  *
145  * These helper call synchronizes the regs for a given cpu.
146  */
147 
148 /*
149  * Called for updates to CP0_Status.  Defined in "cpu.h" for gdbstub.c.
150  * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
151  *                                   int tc);
152  */
153 
154 /* Called for updates to CP0_TCStatus.  */
155 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
156                              target_ulong v)
157 {
158     uint32_t status;
159     uint32_t tcu, tmx, tasid, tksu;
160     uint32_t mask = ((1U << CP0St_CU3)
161                        | (1 << CP0St_CU2)
162                        | (1 << CP0St_CU1)
163                        | (1 << CP0St_CU0)
164                        | (1 << CP0St_MX)
165                        | (3 << CP0St_KSU));
166 
167     tcu = (v >> CP0TCSt_TCU0) & 0xf;
168     tmx = (v >> CP0TCSt_TMX) & 0x1;
169     tasid = v & cpu->CP0_EntryHi_ASID_mask;
170     tksu = (v >> CP0TCSt_TKSU) & 0x3;
171 
172     status = tcu << CP0St_CU0;
173     status |= tmx << CP0St_MX;
174     status |= tksu << CP0St_KSU;
175 
176     cpu->CP0_Status &= ~mask;
177     cpu->CP0_Status |= status;
178 
179     /* Sync the TASID with EntryHi.  */
180     cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
181     cpu->CP0_EntryHi |= tasid;
182 
183     compute_hflags(cpu);
184 }
185 
186 /* Called for updates to CP0_EntryHi.  */
187 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
188 {
189     int32_t *tcst;
190     uint32_t asid, v = cpu->CP0_EntryHi;
191 
192     asid = v & cpu->CP0_EntryHi_ASID_mask;
193 
194     if (tc == cpu->current_tc) {
195         tcst = &cpu->active_tc.CP0_TCStatus;
196     } else {
197         tcst = &cpu->tcs[tc].CP0_TCStatus;
198     }
199 
200     *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
201     *tcst |= asid;
202 }
203 
204 /* XXX: do not use a global */
205 uint32_t cpu_mips_get_random(CPUMIPSState *env)
206 {
207     static uint32_t seed = 1;
208     static uint32_t prev_idx;
209     uint32_t idx;
210     uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
211 
212     if (nb_rand_tlb == 1) {
213         return env->tlb->nb_tlb - 1;
214     }
215 
216     /* Don't return same value twice, so get another value */
217     do {
218         /*
219          * Use a simple algorithm of Linear Congruential Generator
220          * from ISO/IEC 9899 standard.
221          */
222         seed = 1103515245 * seed + 12345;
223         idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
224     } while (idx == prev_idx);
225     prev_idx = idx;
226     return idx;
227 }
228 
229 /* CP0 helpers */
230 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
231 {
232     return env->mvp->CP0_MVPControl;
233 }
234 
235 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
236 {
237     return env->mvp->CP0_MVPConf0;
238 }
239 
240 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
241 {
242     return env->mvp->CP0_MVPConf1;
243 }
244 
245 target_ulong helper_mfc0_random(CPUMIPSState *env)
246 {
247     return (int32_t)cpu_mips_get_random(env);
248 }
249 
250 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
251 {
252     return env->active_tc.CP0_TCStatus;
253 }
254 
255 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
256 {
257     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
258     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
259 
260     if (other_tc == other->current_tc) {
261         return other->active_tc.CP0_TCStatus;
262     } else {
263         return other->tcs[other_tc].CP0_TCStatus;
264     }
265 }
266 
267 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
268 {
269     return env->active_tc.CP0_TCBind;
270 }
271 
272 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
273 {
274     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
275     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
276 
277     if (other_tc == other->current_tc) {
278         return other->active_tc.CP0_TCBind;
279     } else {
280         return other->tcs[other_tc].CP0_TCBind;
281     }
282 }
283 
284 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
285 {
286     return env->active_tc.PC;
287 }
288 
289 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
290 {
291     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
292     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
293 
294     if (other_tc == other->current_tc) {
295         return other->active_tc.PC;
296     } else {
297         return other->tcs[other_tc].PC;
298     }
299 }
300 
301 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
302 {
303     return env->active_tc.CP0_TCHalt;
304 }
305 
306 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
307 {
308     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
309     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
310 
311     if (other_tc == other->current_tc) {
312         return other->active_tc.CP0_TCHalt;
313     } else {
314         return other->tcs[other_tc].CP0_TCHalt;
315     }
316 }
317 
318 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
319 {
320     return env->active_tc.CP0_TCContext;
321 }
322 
323 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
324 {
325     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
326     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
327 
328     if (other_tc == other->current_tc) {
329         return other->active_tc.CP0_TCContext;
330     } else {
331         return other->tcs[other_tc].CP0_TCContext;
332     }
333 }
334 
335 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
336 {
337     return env->active_tc.CP0_TCSchedule;
338 }
339 
340 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
341 {
342     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
343     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
344 
345     if (other_tc == other->current_tc) {
346         return other->active_tc.CP0_TCSchedule;
347     } else {
348         return other->tcs[other_tc].CP0_TCSchedule;
349     }
350 }
351 
352 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
353 {
354     return env->active_tc.CP0_TCScheFBack;
355 }
356 
357 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
358 {
359     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
360     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
361 
362     if (other_tc == other->current_tc) {
363         return other->active_tc.CP0_TCScheFBack;
364     } else {
365         return other->tcs[other_tc].CP0_TCScheFBack;
366     }
367 }
368 
369 target_ulong helper_mfc0_count(CPUMIPSState *env)
370 {
371     return (int32_t)cpu_mips_get_count(env);
372 }
373 
374 target_ulong helper_mfc0_saar(CPUMIPSState *env)
375 {
376     if ((env->CP0_SAARI & 0x3f) < 2) {
377         return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f];
378     }
379     return 0;
380 }
381 
382 target_ulong helper_mfhc0_saar(CPUMIPSState *env)
383 {
384     if ((env->CP0_SAARI & 0x3f) < 2) {
385         return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32;
386     }
387     return 0;
388 }
389 
390 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
391 {
392     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
393     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
394 
395     return other->CP0_EntryHi;
396 }
397 
398 target_ulong helper_mftc0_cause(CPUMIPSState *env)
399 {
400     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
401     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
402 
403     return other->CP0_Cause;
404 }
405 
406 target_ulong helper_mftc0_status(CPUMIPSState *env)
407 {
408     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
409     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
410 
411     return other->CP0_Status;
412 }
413 
414 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
415 {
416     return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
417 }
418 
419 target_ulong helper_mfc0_maar(CPUMIPSState *env)
420 {
421     return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
422 }
423 
424 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
425 {
426     return env->CP0_MAAR[env->CP0_MAARI] >> 32;
427 }
428 
429 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
430 {
431     return (int32_t)env->CP0_WatchLo[sel];
432 }
433 
434 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
435 {
436     return (int32_t) env->CP0_WatchHi[sel];
437 }
438 
439 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
440 {
441     return env->CP0_WatchHi[sel] >> 32;
442 }
443 
444 target_ulong helper_mfc0_debug(CPUMIPSState *env)
445 {
446     target_ulong t0 = env->CP0_Debug;
447     if (env->hflags & MIPS_HFLAG_DM) {
448         t0 |= 1 << CP0DB_DM;
449     }
450 
451     return t0;
452 }
453 
454 target_ulong helper_mftc0_debug(CPUMIPSState *env)
455 {
456     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
457     int32_t tcstatus;
458     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
459 
460     if (other_tc == other->current_tc) {
461         tcstatus = other->active_tc.CP0_Debug_tcstatus;
462     } else {
463         tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
464     }
465 
466     /* XXX: Might be wrong, check with EJTAG spec. */
467     return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
468             (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
469 }
470 
471 #if defined(TARGET_MIPS64)
472 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
473 {
474     return env->active_tc.PC;
475 }
476 
477 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
478 {
479     return env->active_tc.CP0_TCHalt;
480 }
481 
482 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
483 {
484     return env->active_tc.CP0_TCContext;
485 }
486 
487 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
488 {
489     return env->active_tc.CP0_TCSchedule;
490 }
491 
492 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
493 {
494     return env->active_tc.CP0_TCScheFBack;
495 }
496 
497 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
498 {
499     return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
500 }
501 
502 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
503 {
504     return env->CP0_MAAR[env->CP0_MAARI];
505 }
506 
507 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
508 {
509     return env->CP0_WatchLo[sel];
510 }
511 
512 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
513 {
514     return env->CP0_WatchHi[sel];
515 }
516 
517 target_ulong helper_dmfc0_saar(CPUMIPSState *env)
518 {
519     if ((env->CP0_SAARI & 0x3f) < 2) {
520         return env->CP0_SAAR[env->CP0_SAARI & 0x3f];
521     }
522     return 0;
523 }
524 #endif /* TARGET_MIPS64 */
525 
526 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
527 {
528     uint32_t index_p = env->CP0_Index & 0x80000000;
529     uint32_t tlb_index = arg1 & 0x7fffffff;
530     if (tlb_index < env->tlb->nb_tlb) {
531         if (env->insn_flags & ISA_MIPS_R6) {
532             index_p |= arg1 & 0x80000000;
533         }
534         env->CP0_Index = index_p | tlb_index;
535     }
536 }
537 
538 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
539 {
540     uint32_t mask = 0;
541     uint32_t newval;
542 
543     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
544         mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
545                 (1 << CP0MVPCo_EVP);
546     }
547     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
548         mask |= (1 << CP0MVPCo_STLB);
549     }
550     newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
551 
552     /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
553 
554     env->mvp->CP0_MVPControl = newval;
555 }
556 
557 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
558 {
559     uint32_t mask;
560     uint32_t newval;
561 
562     mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
563            (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
564     newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
565 
566     /*
567      * Yield scheduler intercept not implemented.
568      * Gating storage scheduler intercept not implemented.
569      */
570 
571     /* TODO: Enable/disable TCs. */
572 
573     env->CP0_VPEControl = newval;
574 }
575 
576 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
577 {
578     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
579     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
580     uint32_t mask;
581     uint32_t newval;
582 
583     mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
584            (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
585     newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
586 
587     /* TODO: Enable/disable TCs.  */
588 
589     other->CP0_VPEControl = newval;
590 }
591 
592 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
593 {
594     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
595     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
596     /* FIXME: Mask away return zero on read bits.  */
597     return other->CP0_VPEControl;
598 }
599 
600 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
601 {
602     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
603     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
604 
605     return other->CP0_VPEConf0;
606 }
607 
608 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
609 {
610     uint32_t mask = 0;
611     uint32_t newval;
612 
613     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
614         if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
615             mask |= (0xff << CP0VPEC0_XTC);
616         }
617         mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
618     }
619     newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
620 
621     /* TODO: TC exclusive handling due to ERL/EXL. */
622 
623     env->CP0_VPEConf0 = newval;
624 }
625 
626 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
627 {
628     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
629     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
630     uint32_t mask = 0;
631     uint32_t newval;
632 
633     mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
634     newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
635 
636     /* TODO: TC exclusive handling due to ERL/EXL.  */
637     other->CP0_VPEConf0 = newval;
638 }
639 
640 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
641 {
642     uint32_t mask = 0;
643     uint32_t newval;
644 
645     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
646         mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
647                 (0xff << CP0VPEC1_NCP1);
648     newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
649 
650     /* UDI not implemented. */
651     /* CP2 not implemented. */
652 
653     /* TODO: Handle FPU (CP1) binding. */
654 
655     env->CP0_VPEConf1 = newval;
656 }
657 
658 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
659 {
660     /* Yield qualifier inputs not implemented. */
661     env->CP0_YQMask = 0x00000000;
662 }
663 
664 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
665 {
666     env->CP0_VPEOpt = arg1 & 0x0000ffff;
667 }
668 
669 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
670 
671 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
672 {
673     /* 1k pages not implemented */
674     target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
675     env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
676                         | (rxi << (CP0EnLo_XI - 30));
677 }
678 
679 #if defined(TARGET_MIPS64)
680 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
681 
682 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
683 {
684     uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
685     env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
686 }
687 #endif
688 
689 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
690 {
691     uint32_t mask = env->CP0_TCStatus_rw_bitmask;
692     uint32_t newval;
693 
694     newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
695 
696     env->active_tc.CP0_TCStatus = newval;
697     sync_c0_tcstatus(env, env->current_tc, newval);
698 }
699 
700 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
701 {
702     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
703     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
704 
705     if (other_tc == other->current_tc) {
706         other->active_tc.CP0_TCStatus = arg1;
707     } else {
708         other->tcs[other_tc].CP0_TCStatus = arg1;
709     }
710     sync_c0_tcstatus(other, other_tc, arg1);
711 }
712 
713 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
714 {
715     uint32_t mask = (1 << CP0TCBd_TBE);
716     uint32_t newval;
717 
718     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
719         mask |= (1 << CP0TCBd_CurVPE);
720     }
721     newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
722     env->active_tc.CP0_TCBind = newval;
723 }
724 
725 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
726 {
727     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
728     uint32_t mask = (1 << CP0TCBd_TBE);
729     uint32_t newval;
730     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
731 
732     if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
733         mask |= (1 << CP0TCBd_CurVPE);
734     }
735     if (other_tc == other->current_tc) {
736         newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
737         other->active_tc.CP0_TCBind = newval;
738     } else {
739         newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
740         other->tcs[other_tc].CP0_TCBind = newval;
741     }
742 }
743 
744 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
745 {
746     env->active_tc.PC = arg1;
747     env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
748     env->CP0_LLAddr = 0;
749     env->lladdr = 0;
750     /* MIPS16 not implemented. */
751 }
752 
753 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
754 {
755     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
756     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
757 
758     if (other_tc == other->current_tc) {
759         other->active_tc.PC = arg1;
760         other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
761         other->CP0_LLAddr = 0;
762         other->lladdr = 0;
763         /* MIPS16 not implemented. */
764     } else {
765         other->tcs[other_tc].PC = arg1;
766         other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
767         other->CP0_LLAddr = 0;
768         other->lladdr = 0;
769         /* MIPS16 not implemented. */
770     }
771 }
772 
773 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
774 {
775     MIPSCPU *cpu = env_archcpu(env);
776 
777     env->active_tc.CP0_TCHalt = arg1 & 0x1;
778 
779     /* TODO: Halt TC / Restart (if allocated+active) TC. */
780     if (env->active_tc.CP0_TCHalt & 1) {
781         mips_tc_sleep(cpu, env->current_tc);
782     } else {
783         mips_tc_wake(cpu, env->current_tc);
784     }
785 }
786 
787 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
788 {
789     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
790     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
791     MIPSCPU *other_cpu = env_archcpu(other);
792 
793     /* TODO: Halt TC / Restart (if allocated+active) TC. */
794 
795     if (other_tc == other->current_tc) {
796         other->active_tc.CP0_TCHalt = arg1;
797     } else {
798         other->tcs[other_tc].CP0_TCHalt = arg1;
799     }
800 
801     if (arg1 & 1) {
802         mips_tc_sleep(other_cpu, other_tc);
803     } else {
804         mips_tc_wake(other_cpu, other_tc);
805     }
806 }
807 
808 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
809 {
810     env->active_tc.CP0_TCContext = arg1;
811 }
812 
813 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
814 {
815     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
816     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
817 
818     if (other_tc == other->current_tc) {
819         other->active_tc.CP0_TCContext = arg1;
820     } else {
821         other->tcs[other_tc].CP0_TCContext = arg1;
822     }
823 }
824 
825 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
826 {
827     env->active_tc.CP0_TCSchedule = arg1;
828 }
829 
830 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
831 {
832     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
833     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
834 
835     if (other_tc == other->current_tc) {
836         other->active_tc.CP0_TCSchedule = arg1;
837     } else {
838         other->tcs[other_tc].CP0_TCSchedule = arg1;
839     }
840 }
841 
842 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
843 {
844     env->active_tc.CP0_TCScheFBack = arg1;
845 }
846 
847 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
848 {
849     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
850     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
851 
852     if (other_tc == other->current_tc) {
853         other->active_tc.CP0_TCScheFBack = arg1;
854     } else {
855         other->tcs[other_tc].CP0_TCScheFBack = arg1;
856     }
857 }
858 
859 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
860 {
861     /* 1k pages not implemented */
862     target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
863     env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
864                         | (rxi << (CP0EnLo_XI - 30));
865 }
866 
867 #if defined(TARGET_MIPS64)
868 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
869 {
870     uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
871     env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
872 }
873 #endif
874 
875 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
876 {
877     env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
878 }
879 
880 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
881 {
882     int32_t old;
883     old = env->CP0_MemoryMapID;
884     env->CP0_MemoryMapID = (int32_t) arg1;
885     /* If the MemoryMapID changes, flush qemu's TLB.  */
886     if (old != env->CP0_MemoryMapID) {
887         cpu_mips_tlb_flush(env);
888     }
889 }
890 
891 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
892 {
893     uint32_t mask;
894     int maskbits;
895 
896     /* Don't care MASKX as we don't support 1KB page */
897     mask = extract32((uint32_t)arg1, CP0PM_MASK, 16);
898     maskbits = cto32(mask);
899 
900     /* Ensure no more set bit after first zero */
901     if ((mask >> maskbits) != 0) {
902         goto invalid;
903     }
904     /* We don't support VTLB entry smaller than target page */
905     if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) {
906         goto invalid;
907     }
908     env->CP0_PageMask = mask << CP0PM_MASK;
909 
910     return;
911 
912 invalid:
913     /* When invalid, set to default target page size. */
914     mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN);
915     env->CP0_PageMask = mask << CP0PM_MASK;
916 }
917 
918 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
919 {
920     update_pagemask(env, arg1, &env->CP0_PageMask);
921 }
922 
923 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
924 {
925     /* SmartMIPS not implemented */
926     /* 1k pages not implemented */
927     env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
928                          (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
929     compute_hflags(env);
930     restore_pamask(env);
931 }
932 
933 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
934 {
935     CPUState *cs = env_cpu(env);
936 
937     env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
938     tlb_flush(cs);
939 }
940 
941 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
942 {
943     CPUState *cs = env_cpu(env);
944 
945     env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
946     tlb_flush(cs);
947 }
948 
949 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
950 {
951     CPUState *cs = env_cpu(env);
952 
953     env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
954     tlb_flush(cs);
955 }
956 
957 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
958 {
959 #if defined(TARGET_MIPS64)
960     uint64_t mask = 0x3F3FFFFFFFULL;
961     uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
962     uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
963 
964     if ((env->insn_flags & ISA_MIPS_R6)) {
965         if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
966             mask &= ~(0x3FULL << CP0PF_BDI);
967         }
968         if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
969             mask &= ~(0x3FULL << CP0PF_GDI);
970         }
971         if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
972             mask &= ~(0x3FULL << CP0PF_UDI);
973         }
974         if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
975             mask &= ~(0x3FULL << CP0PF_MDI);
976         }
977         if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
978             mask &= ~(0x3FULL << CP0PF_PTI);
979         }
980     }
981     env->CP0_PWField = arg1 & mask;
982 
983     if ((new_ptei >= 32) ||
984             ((env->insn_flags & ISA_MIPS_R6) &&
985                     (new_ptei == 0 || new_ptei == 1))) {
986         env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
987                 (old_ptei << CP0PF_PTEI);
988     }
989 #else
990     uint32_t mask = 0x3FFFFFFF;
991     uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
992     uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
993 
994     if ((env->insn_flags & ISA_MIPS_R6)) {
995         if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
996             mask &= ~(0x3F << CP0PF_GDW);
997         }
998         if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
999             mask &= ~(0x3F << CP0PF_UDW);
1000         }
1001         if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
1002             mask &= ~(0x3F << CP0PF_MDW);
1003         }
1004         if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
1005             mask &= ~(0x3F << CP0PF_PTW);
1006         }
1007     }
1008     env->CP0_PWField = arg1 & mask;
1009 
1010     if ((new_ptew >= 32) ||
1011             ((env->insn_flags & ISA_MIPS_R6) &&
1012                     (new_ptew == 0 || new_ptew == 1))) {
1013         env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
1014                 (old_ptew << CP0PF_PTEW);
1015     }
1016 #endif
1017 }
1018 
1019 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
1020 {
1021 #if defined(TARGET_MIPS64)
1022     env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
1023 #else
1024     env->CP0_PWSize = arg1 & 0x3FFFFFFF;
1025 #endif
1026 }
1027 
1028 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1029 {
1030     if (env->insn_flags & ISA_MIPS_R6) {
1031         if (arg1 < env->tlb->nb_tlb) {
1032             env->CP0_Wired = arg1;
1033         }
1034     } else {
1035         env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1036     }
1037 }
1038 
1039 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1040 {
1041 #if defined(TARGET_MIPS64)
1042     /* PWEn = 0. Hardware page table walking is not implemented. */
1043     env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1044 #else
1045     env->CP0_PWCtl = (arg1 & 0x800000FF);
1046 #endif
1047 }
1048 
1049 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1050 {
1051     env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1052 }
1053 
1054 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1055 {
1056     env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1057 }
1058 
1059 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1060 {
1061     env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1062 }
1063 
1064 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1065 {
1066     env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1067 }
1068 
1069 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1070 {
1071     env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1072 }
1073 
1074 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1075 {
1076     uint32_t mask = 0x0000000F;
1077 
1078     if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1079         (env->insn_flags & ISA_MIPS_R6)) {
1080         mask |= (1 << 4);
1081     }
1082     if (env->insn_flags & ISA_MIPS_R6) {
1083         mask |= (1 << 5);
1084     }
1085     if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1086         mask |= (1 << 29);
1087 
1088         if (arg1 & (1 << 29)) {
1089             env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1090         } else {
1091             env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1092         }
1093     }
1094 
1095     env->CP0_HWREna = arg1 & mask;
1096 }
1097 
1098 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1099 {
1100     cpu_mips_store_count(env, arg1);
1101 }
1102 
1103 void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
1104 {
1105     uint32_t target = arg1 & 0x3f;
1106     if (target <= 1) {
1107         env->CP0_SAARI = target;
1108     }
1109 }
1110 
1111 void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1)
1112 {
1113     uint32_t target = env->CP0_SAARI & 0x3f;
1114     if (target < 2) {
1115         env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL;
1116         switch (target) {
1117         case 0:
1118             if (env->itu) {
1119                 itc_reconfigure(env->itu);
1120             }
1121             break;
1122         }
1123     }
1124 }
1125 
1126 void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1)
1127 {
1128     uint32_t target = env->CP0_SAARI & 0x3f;
1129     if (target < 2) {
1130         env->CP0_SAAR[target] =
1131             (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) |
1132             (env->CP0_SAAR[target] & 0x00000000ffffffffULL);
1133         switch (target) {
1134         case 0:
1135             if (env->itu) {
1136                 itc_reconfigure(env->itu);
1137             }
1138             break;
1139         }
1140     }
1141 }
1142 
1143 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1144 {
1145     target_ulong old, val, mask;
1146     mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1147     if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1148         mask |= 1 << CP0EnHi_EHINV;
1149     }
1150 
1151     /* 1k pages not implemented */
1152 #if defined(TARGET_MIPS64)
1153     if (env->insn_flags & ISA_MIPS_R6) {
1154         int entryhi_r = extract64(arg1, 62, 2);
1155         int config0_at = extract32(env->CP0_Config0, 13, 2);
1156         bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1157         if ((entryhi_r == 2) ||
1158             (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1159             /* skip EntryHi.R field if new value is reserved */
1160             mask &= ~(0x3ull << 62);
1161         }
1162     }
1163     mask &= env->SEGMask;
1164 #endif
1165     old = env->CP0_EntryHi;
1166     val = (arg1 & mask) | (old & ~mask);
1167     env->CP0_EntryHi = val;
1168     if (ase_mt_available(env)) {
1169         sync_c0_entryhi(env, env->current_tc);
1170     }
1171     /* If the ASID changes, flush qemu's TLB.  */
1172     if ((old & env->CP0_EntryHi_ASID_mask) !=
1173         (val & env->CP0_EntryHi_ASID_mask)) {
1174         tlb_flush(env_cpu(env));
1175     }
1176 }
1177 
1178 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1179 {
1180     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1181     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1182 
1183     other->CP0_EntryHi = arg1;
1184     sync_c0_entryhi(other, other_tc);
1185 }
1186 
1187 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1188 {
1189     cpu_mips_store_compare(env, arg1);
1190 }
1191 
1192 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1193 {
1194     uint32_t val, old;
1195 
1196     old = env->CP0_Status;
1197     cpu_mips_store_status(env, arg1);
1198     val = env->CP0_Status;
1199 
1200     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1201         qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1202                 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1203                 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1204                 env->CP0_Cause);
1205         switch (cpu_mmu_index(env, false)) {
1206         case 3:
1207             qemu_log(", ERL\n");
1208             break;
1209         case MIPS_HFLAG_UM:
1210             qemu_log(", UM\n");
1211             break;
1212         case MIPS_HFLAG_SM:
1213             qemu_log(", SM\n");
1214             break;
1215         case MIPS_HFLAG_KM:
1216             qemu_log("\n");
1217             break;
1218         default:
1219             cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1220             break;
1221         }
1222     }
1223 }
1224 
1225 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1226 {
1227     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1228     uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1229     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1230 
1231     other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1232     sync_c0_status(env, other, other_tc);
1233 }
1234 
1235 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1236 {
1237     env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1238 }
1239 
1240 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1241 {
1242     uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1243     env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1244 }
1245 
1246 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1247 {
1248     cpu_mips_store_cause(env, arg1);
1249 }
1250 
1251 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1252 {
1253     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1254     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1255 
1256     cpu_mips_store_cause(other, arg1);
1257 }
1258 
1259 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1260 {
1261     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1262     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1263 
1264     return other->CP0_EPC;
1265 }
1266 
1267 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1268 {
1269     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1270     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1271 
1272     return other->CP0_EBase;
1273 }
1274 
1275 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1276 {
1277     target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1278     if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1279         mask |= ~0x3FFFFFFF;
1280     }
1281     env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1282 }
1283 
1284 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1285 {
1286     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1287     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1288     target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1289     if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1290         mask |= ~0x3FFFFFFF;
1291     }
1292     other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1293 }
1294 
1295 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1296 {
1297     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1298     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1299 
1300     switch (idx) {
1301     case 0: return other->CP0_Config0;
1302     case 1: return other->CP0_Config1;
1303     case 2: return other->CP0_Config2;
1304     case 3: return other->CP0_Config3;
1305     /* 4 and 5 are reserved.  */
1306     case 6: return other->CP0_Config6;
1307     case 7: return other->CP0_Config7;
1308     default:
1309         break;
1310     }
1311     return 0;
1312 }
1313 
1314 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1315 {
1316     env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1317 }
1318 
1319 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1320 {
1321     /* tertiary/secondary caches not implemented */
1322     env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1323 }
1324 
1325 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1326 {
1327     if (env->insn_flags & ASE_MICROMIPS) {
1328         env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1329                            (arg1 & (1 << CP0C3_ISA_ON_EXC));
1330     }
1331 }
1332 
1333 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1334 {
1335     env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1336                        (arg1 & env->CP0_Config4_rw_bitmask);
1337 }
1338 
1339 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1340 {
1341     env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1342                        (arg1 & env->CP0_Config5_rw_bitmask);
1343     env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
1344             0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
1345     compute_hflags(env);
1346 }
1347 
1348 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1349 {
1350     target_long mask = env->CP0_LLAddr_rw_bitmask;
1351     arg1 = arg1 << env->CP0_LLAddr_shift;
1352     env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1353 }
1354 
1355 #define MTC0_MAAR_MASK(env) \
1356         ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1357 
1358 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1359 {
1360     env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1361 }
1362 
1363 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1364 {
1365     env->CP0_MAAR[env->CP0_MAARI] =
1366         (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1367         (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1368 }
1369 
1370 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1371 {
1372     int index = arg1 & 0x3f;
1373     if (index == 0x3f) {
1374         /*
1375          * Software may write all ones to INDEX to determine the
1376          *  maximum value supported.
1377          */
1378         env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1379     } else if (index < MIPS_MAAR_MAX) {
1380         env->CP0_MAARI = index;
1381     }
1382     /*
1383      * Other than the all ones, if the value written is not supported,
1384      * then INDEX is unchanged from its previous value.
1385      */
1386 }
1387 
1388 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1389 {
1390     /*
1391      * Watch exceptions for instructions, data loads, data stores
1392      * not implemented.
1393      */
1394     env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1395 }
1396 
1397 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1398 {
1399     uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1400     uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
1401     if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
1402         mask |= 0xFFFFFFFF00000000ULL; /* MMID */
1403     }
1404     env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
1405     env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1406 }
1407 
1408 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1409 {
1410     env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
1411                             (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
1412 }
1413 
1414 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1415 {
1416     target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1417     env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1418 }
1419 
1420 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1421 {
1422     env->CP0_Framemask = arg1; /* XXX */
1423 }
1424 
1425 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1426 {
1427     env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1428     if (arg1 & (1 << CP0DB_DM)) {
1429         env->hflags |= MIPS_HFLAG_DM;
1430     } else {
1431         env->hflags &= ~MIPS_HFLAG_DM;
1432     }
1433 }
1434 
1435 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1436 {
1437     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1438     uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1439     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1440 
1441     /* XXX: Might be wrong, check with EJTAG spec. */
1442     if (other_tc == other->current_tc) {
1443         other->active_tc.CP0_Debug_tcstatus = val;
1444     } else {
1445         other->tcs[other_tc].CP0_Debug_tcstatus = val;
1446     }
1447     other->CP0_Debug = (other->CP0_Debug &
1448                      ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1449                      (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1450 }
1451 
1452 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1453 {
1454     env->CP0_Performance0 = arg1 & 0x000007ff;
1455 }
1456 
1457 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1458 {
1459     int32_t wst = arg1 & (1 << CP0EC_WST);
1460     int32_t spr = arg1 & (1 << CP0EC_SPR);
1461     int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1462 
1463     env->CP0_ErrCtl = wst | spr | itc;
1464 
1465     if (itc && !wst && !spr) {
1466         env->hflags |= MIPS_HFLAG_ITC_CACHE;
1467     } else {
1468         env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1469     }
1470 }
1471 
1472 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1473 {
1474     if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1475         /*
1476          * If CACHE instruction is configured for ITC tags then make all
1477          * CP0.TagLo bits writable. The actual write to ITC Configuration
1478          * Tag will take care of the read-only bits.
1479          */
1480         env->CP0_TagLo = arg1;
1481     } else {
1482         env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1483     }
1484 }
1485 
1486 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1487 {
1488     env->CP0_DataLo = arg1; /* XXX */
1489 }
1490 
1491 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1492 {
1493     env->CP0_TagHi = arg1; /* XXX */
1494 }
1495 
1496 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1497 {
1498     env->CP0_DataHi = arg1; /* XXX */
1499 }
1500 
1501 /* MIPS MT functions */
1502 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1503 {
1504     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1505     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1506 
1507     if (other_tc == other->current_tc) {
1508         return other->active_tc.gpr[sel];
1509     } else {
1510         return other->tcs[other_tc].gpr[sel];
1511     }
1512 }
1513 
1514 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1515 {
1516     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1517     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1518 
1519     if (other_tc == other->current_tc) {
1520         return other->active_tc.LO[sel];
1521     } else {
1522         return other->tcs[other_tc].LO[sel];
1523     }
1524 }
1525 
1526 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1527 {
1528     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1529     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1530 
1531     if (other_tc == other->current_tc) {
1532         return other->active_tc.HI[sel];
1533     } else {
1534         return other->tcs[other_tc].HI[sel];
1535     }
1536 }
1537 
1538 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1539 {
1540     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1541     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1542 
1543     if (other_tc == other->current_tc) {
1544         return other->active_tc.ACX[sel];
1545     } else {
1546         return other->tcs[other_tc].ACX[sel];
1547     }
1548 }
1549 
1550 target_ulong helper_mftdsp(CPUMIPSState *env)
1551 {
1552     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1553     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1554 
1555     if (other_tc == other->current_tc) {
1556         return other->active_tc.DSPControl;
1557     } else {
1558         return other->tcs[other_tc].DSPControl;
1559     }
1560 }
1561 
1562 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1563 {
1564     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1565     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1566 
1567     if (other_tc == other->current_tc) {
1568         other->active_tc.gpr[sel] = arg1;
1569     } else {
1570         other->tcs[other_tc].gpr[sel] = arg1;
1571     }
1572 }
1573 
1574 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1575 {
1576     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1577     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1578 
1579     if (other_tc == other->current_tc) {
1580         other->active_tc.LO[sel] = arg1;
1581     } else {
1582         other->tcs[other_tc].LO[sel] = arg1;
1583     }
1584 }
1585 
1586 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1587 {
1588     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1589     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1590 
1591     if (other_tc == other->current_tc) {
1592         other->active_tc.HI[sel] = arg1;
1593     } else {
1594         other->tcs[other_tc].HI[sel] = arg1;
1595     }
1596 }
1597 
1598 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1599 {
1600     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1601     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1602 
1603     if (other_tc == other->current_tc) {
1604         other->active_tc.ACX[sel] = arg1;
1605     } else {
1606         other->tcs[other_tc].ACX[sel] = arg1;
1607     }
1608 }
1609 
1610 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1611 {
1612     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1613     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1614 
1615     if (other_tc == other->current_tc) {
1616         other->active_tc.DSPControl = arg1;
1617     } else {
1618         other->tcs[other_tc].DSPControl = arg1;
1619     }
1620 }
1621 
1622 /* MIPS MT functions */
1623 target_ulong helper_dmt(void)
1624 {
1625     /* TODO */
1626     return 0;
1627 }
1628 
1629 target_ulong helper_emt(void)
1630 {
1631     /* TODO */
1632     return 0;
1633 }
1634 
1635 target_ulong helper_dvpe(CPUMIPSState *env)
1636 {
1637     CPUState *other_cs = first_cpu;
1638     target_ulong prev = env->mvp->CP0_MVPControl;
1639 
1640     CPU_FOREACH(other_cs) {
1641         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1642         /* Turn off all VPEs except the one executing the dvpe.  */
1643         if (&other_cpu->env != env) {
1644             other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1645             mips_vpe_sleep(other_cpu);
1646         }
1647     }
1648     return prev;
1649 }
1650 
1651 target_ulong helper_evpe(CPUMIPSState *env)
1652 {
1653     CPUState *other_cs = first_cpu;
1654     target_ulong prev = env->mvp->CP0_MVPControl;
1655 
1656     CPU_FOREACH(other_cs) {
1657         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1658 
1659         if (&other_cpu->env != env
1660             /* If the VPE is WFI, don't disturb its sleep.  */
1661             && !mips_vpe_is_wfi(other_cpu)) {
1662             /* Enable the VPE.  */
1663             other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1664             mips_vpe_wake(other_cpu); /* And wake it up.  */
1665         }
1666     }
1667     return prev;
1668 }
1669 
1670 /* R6 Multi-threading */
1671 target_ulong helper_dvp(CPUMIPSState *env)
1672 {
1673     CPUState *other_cs = first_cpu;
1674     target_ulong prev = env->CP0_VPControl;
1675 
1676     if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
1677         CPU_FOREACH(other_cs) {
1678             MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1679             /* Turn off all VPs except the one executing the dvp. */
1680             if (&other_cpu->env != env) {
1681                 mips_vpe_sleep(other_cpu);
1682             }
1683         }
1684         env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
1685     }
1686     return prev;
1687 }
1688 
1689 target_ulong helper_evp(CPUMIPSState *env)
1690 {
1691     CPUState *other_cs = first_cpu;
1692     target_ulong prev = env->CP0_VPControl;
1693 
1694     if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
1695         CPU_FOREACH(other_cs) {
1696             MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1697             if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
1698                 /*
1699                  * If the VP is WFI, don't disturb its sleep.
1700                  * Otherwise, wake it up.
1701                  */
1702                 mips_vpe_wake(other_cpu);
1703             }
1704         }
1705         env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
1706     }
1707     return prev;
1708 }
1709