1 /* 2 * Helpers for emulation of CP0-related MIPS instructions. 3 * 4 * Copyright (C) 2004-2005 Jocelyn Mayer 5 * Copyright (C) 2020 Wave Computing, Inc. 6 * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 * 21 */ 22 23 #include "qemu/osdep.h" 24 #include "qemu/log.h" 25 #include "qemu/main-loop.h" 26 #include "cpu.h" 27 #include "internal.h" 28 #include "qemu/host-utils.h" 29 #include "exec/helper-proto.h" 30 #include "exec/exec-all.h" 31 #include "hw/misc/mips_itu.h" 32 33 34 /* SMP helpers. */ 35 static bool mips_vpe_is_wfi(MIPSCPU *c) 36 { 37 CPUState *cpu = CPU(c); 38 CPUMIPSState *env = &c->env; 39 40 /* 41 * If the VPE is halted but otherwise active, it means it's waiting for 42 * an interrupt.\ 43 */ 44 return cpu->halted && mips_vpe_active(env); 45 } 46 47 static bool mips_vp_is_wfi(MIPSCPU *c) 48 { 49 CPUState *cpu = CPU(c); 50 CPUMIPSState *env = &c->env; 51 52 return cpu->halted && mips_vp_active(env); 53 } 54 55 static inline void mips_vpe_wake(MIPSCPU *c) 56 { 57 /* 58 * Don't set ->halted = 0 directly, let it be done via cpu_has_work 59 * because there might be other conditions that state that c should 60 * be sleeping. 61 */ 62 bql_lock(); 63 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); 64 bql_unlock(); 65 } 66 67 static inline void mips_vpe_sleep(MIPSCPU *cpu) 68 { 69 CPUState *cs = CPU(cpu); 70 71 /* 72 * The VPE was shut off, really go to bed. 73 * Reset any old _WAKE requests. 74 */ 75 cs->halted = 1; 76 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); 77 } 78 79 static inline void mips_tc_wake(MIPSCPU *cpu, int tc) 80 { 81 CPUMIPSState *c = &cpu->env; 82 83 /* FIXME: TC reschedule. */ 84 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { 85 mips_vpe_wake(cpu); 86 } 87 } 88 89 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) 90 { 91 CPUMIPSState *c = &cpu->env; 92 93 /* FIXME: TC reschedule. */ 94 if (!mips_vpe_active(c)) { 95 mips_vpe_sleep(cpu); 96 } 97 } 98 99 /** 100 * mips_cpu_map_tc: 101 * @env: CPU from which mapping is performed. 102 * @tc: Should point to an int with the value of the global TC index. 103 * 104 * This function will transform @tc into a local index within the 105 * returned #CPUMIPSState. 106 */ 107 108 /* 109 * FIXME: This code assumes that all VPEs have the same number of TCs, 110 * which depends on runtime setup. Can probably be fixed by 111 * walking the list of CPUMIPSStates. 112 */ 113 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) 114 { 115 MIPSCPU *cpu; 116 CPUState *cs; 117 CPUState *other_cs; 118 int vpe_idx; 119 int tc_idx = *tc; 120 121 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { 122 /* Not allowed to address other CPUs. */ 123 *tc = env->current_tc; 124 return env; 125 } 126 127 cs = env_cpu(env); 128 vpe_idx = tc_idx / cs->nr_threads; 129 *tc = tc_idx % cs->nr_threads; 130 other_cs = qemu_get_cpu(vpe_idx); 131 if (other_cs == NULL) { 132 return env; 133 } 134 cpu = MIPS_CPU(other_cs); 135 return &cpu->env; 136 } 137 138 /* 139 * The per VPE CP0_Status register shares some fields with the per TC 140 * CP0_TCStatus registers. These fields are wired to the same registers, 141 * so changes to either of them should be reflected on both registers. 142 * 143 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts. 144 * 145 * These helper call synchronizes the regs for a given cpu. 146 */ 147 148 /* 149 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. 150 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, 151 * int tc); 152 */ 153 154 /* Called for updates to CP0_TCStatus. */ 155 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, 156 target_ulong v) 157 { 158 uint32_t status; 159 uint32_t tcu, tmx, tasid, tksu; 160 uint32_t mask = ((1U << CP0St_CU3) 161 | (1 << CP0St_CU2) 162 | (1 << CP0St_CU1) 163 | (1 << CP0St_CU0) 164 | (1 << CP0St_MX) 165 | (3 << CP0St_KSU)); 166 167 tcu = (v >> CP0TCSt_TCU0) & 0xf; 168 tmx = (v >> CP0TCSt_TMX) & 0x1; 169 tasid = v & cpu->CP0_EntryHi_ASID_mask; 170 tksu = (v >> CP0TCSt_TKSU) & 0x3; 171 172 status = tcu << CP0St_CU0; 173 status |= tmx << CP0St_MX; 174 status |= tksu << CP0St_KSU; 175 176 cpu->CP0_Status &= ~mask; 177 cpu->CP0_Status |= status; 178 179 /* Sync the TASID with EntryHi. */ 180 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask; 181 cpu->CP0_EntryHi |= tasid; 182 183 compute_hflags(cpu); 184 } 185 186 /* Called for updates to CP0_EntryHi. */ 187 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) 188 { 189 int32_t *tcst; 190 uint32_t asid, v = cpu->CP0_EntryHi; 191 192 asid = v & cpu->CP0_EntryHi_ASID_mask; 193 194 if (tc == cpu->current_tc) { 195 tcst = &cpu->active_tc.CP0_TCStatus; 196 } else { 197 tcst = &cpu->tcs[tc].CP0_TCStatus; 198 } 199 200 *tcst &= ~cpu->CP0_EntryHi_ASID_mask; 201 *tcst |= asid; 202 } 203 204 /* XXX: do not use a global */ 205 uint32_t cpu_mips_get_random(CPUMIPSState *env) 206 { 207 static uint32_t seed = 1; 208 static uint32_t prev_idx; 209 uint32_t idx; 210 uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired; 211 212 if (nb_rand_tlb == 1) { 213 return env->tlb->nb_tlb - 1; 214 } 215 216 /* Don't return same value twice, so get another value */ 217 do { 218 /* 219 * Use a simple algorithm of Linear Congruential Generator 220 * from ISO/IEC 9899 standard. 221 */ 222 seed = 1103515245 * seed + 12345; 223 idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired; 224 } while (idx == prev_idx); 225 prev_idx = idx; 226 return idx; 227 } 228 229 /* CP0 helpers */ 230 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) 231 { 232 return env->mvp->CP0_MVPControl; 233 } 234 235 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) 236 { 237 return env->mvp->CP0_MVPConf0; 238 } 239 240 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) 241 { 242 return env->mvp->CP0_MVPConf1; 243 } 244 245 target_ulong helper_mfc0_random(CPUMIPSState *env) 246 { 247 return (int32_t)cpu_mips_get_random(env); 248 } 249 250 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) 251 { 252 return env->active_tc.CP0_TCStatus; 253 } 254 255 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) 256 { 257 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 258 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 259 260 if (other_tc == other->current_tc) { 261 return other->active_tc.CP0_TCStatus; 262 } else { 263 return other->tcs[other_tc].CP0_TCStatus; 264 } 265 } 266 267 target_ulong helper_mfc0_tcbind(CPUMIPSState *env) 268 { 269 return env->active_tc.CP0_TCBind; 270 } 271 272 target_ulong helper_mftc0_tcbind(CPUMIPSState *env) 273 { 274 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 275 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 276 277 if (other_tc == other->current_tc) { 278 return other->active_tc.CP0_TCBind; 279 } else { 280 return other->tcs[other_tc].CP0_TCBind; 281 } 282 } 283 284 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) 285 { 286 return env->active_tc.PC; 287 } 288 289 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) 290 { 291 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 292 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 293 294 if (other_tc == other->current_tc) { 295 return other->active_tc.PC; 296 } else { 297 return other->tcs[other_tc].PC; 298 } 299 } 300 301 target_ulong helper_mfc0_tchalt(CPUMIPSState *env) 302 { 303 return env->active_tc.CP0_TCHalt; 304 } 305 306 target_ulong helper_mftc0_tchalt(CPUMIPSState *env) 307 { 308 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 309 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 310 311 if (other_tc == other->current_tc) { 312 return other->active_tc.CP0_TCHalt; 313 } else { 314 return other->tcs[other_tc].CP0_TCHalt; 315 } 316 } 317 318 target_ulong helper_mfc0_tccontext(CPUMIPSState *env) 319 { 320 return env->active_tc.CP0_TCContext; 321 } 322 323 target_ulong helper_mftc0_tccontext(CPUMIPSState *env) 324 { 325 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 326 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 327 328 if (other_tc == other->current_tc) { 329 return other->active_tc.CP0_TCContext; 330 } else { 331 return other->tcs[other_tc].CP0_TCContext; 332 } 333 } 334 335 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) 336 { 337 return env->active_tc.CP0_TCSchedule; 338 } 339 340 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) 341 { 342 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 343 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 344 345 if (other_tc == other->current_tc) { 346 return other->active_tc.CP0_TCSchedule; 347 } else { 348 return other->tcs[other_tc].CP0_TCSchedule; 349 } 350 } 351 352 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) 353 { 354 return env->active_tc.CP0_TCScheFBack; 355 } 356 357 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) 358 { 359 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 360 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 361 362 if (other_tc == other->current_tc) { 363 return other->active_tc.CP0_TCScheFBack; 364 } else { 365 return other->tcs[other_tc].CP0_TCScheFBack; 366 } 367 } 368 369 target_ulong helper_mfc0_count(CPUMIPSState *env) 370 { 371 return (int32_t)cpu_mips_get_count(env); 372 } 373 374 target_ulong helper_mftc0_entryhi(CPUMIPSState *env) 375 { 376 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 377 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 378 379 return other->CP0_EntryHi; 380 } 381 382 target_ulong helper_mftc0_cause(CPUMIPSState *env) 383 { 384 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 385 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 386 387 return other->CP0_Cause; 388 } 389 390 target_ulong helper_mftc0_status(CPUMIPSState *env) 391 { 392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 393 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 394 395 return other->CP0_Status; 396 } 397 398 target_ulong helper_mfc0_lladdr(CPUMIPSState *env) 399 { 400 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift); 401 } 402 403 target_ulong helper_mfc0_maar(CPUMIPSState *env) 404 { 405 return (int32_t) env->CP0_MAAR[env->CP0_MAARI]; 406 } 407 408 target_ulong helper_mfhc0_maar(CPUMIPSState *env) 409 { 410 return env->CP0_MAAR[env->CP0_MAARI] >> 32; 411 } 412 413 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) 414 { 415 return (int32_t)env->CP0_WatchLo[sel]; 416 } 417 418 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) 419 { 420 return (int32_t) env->CP0_WatchHi[sel]; 421 } 422 423 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel) 424 { 425 return env->CP0_WatchHi[sel] >> 32; 426 } 427 428 target_ulong helper_mfc0_debug(CPUMIPSState *env) 429 { 430 target_ulong t0 = env->CP0_Debug; 431 if (env->hflags & MIPS_HFLAG_DM) { 432 t0 |= 1 << CP0DB_DM; 433 } 434 435 return t0; 436 } 437 438 target_ulong helper_mftc0_debug(CPUMIPSState *env) 439 { 440 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 441 int32_t tcstatus; 442 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 443 444 if (other_tc == other->current_tc) { 445 tcstatus = other->active_tc.CP0_Debug_tcstatus; 446 } else { 447 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; 448 } 449 450 /* XXX: Might be wrong, check with EJTAG spec. */ 451 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 452 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 453 } 454 455 #if defined(TARGET_MIPS64) 456 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) 457 { 458 return env->active_tc.PC; 459 } 460 461 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) 462 { 463 return env->active_tc.CP0_TCHalt; 464 } 465 466 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) 467 { 468 return env->active_tc.CP0_TCContext; 469 } 470 471 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) 472 { 473 return env->active_tc.CP0_TCSchedule; 474 } 475 476 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) 477 { 478 return env->active_tc.CP0_TCScheFBack; 479 } 480 481 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) 482 { 483 return env->CP0_LLAddr >> env->CP0_LLAddr_shift; 484 } 485 486 target_ulong helper_dmfc0_maar(CPUMIPSState *env) 487 { 488 return env->CP0_MAAR[env->CP0_MAARI]; 489 } 490 491 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) 492 { 493 return env->CP0_WatchLo[sel]; 494 } 495 496 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) 497 { 498 return env->CP0_WatchHi[sel]; 499 } 500 501 #endif /* TARGET_MIPS64 */ 502 503 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) 504 { 505 uint32_t index_p = env->CP0_Index & 0x80000000; 506 uint32_t tlb_index = arg1 & 0x7fffffff; 507 if (tlb_index < env->tlb->nb_tlb) { 508 if (env->insn_flags & ISA_MIPS_R6) { 509 index_p |= arg1 & 0x80000000; 510 } 511 env->CP0_Index = index_p | tlb_index; 512 } 513 } 514 515 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) 516 { 517 uint32_t mask = 0; 518 uint32_t newval; 519 520 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 521 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | 522 (1 << CP0MVPCo_EVP); 523 } 524 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 525 mask |= (1 << CP0MVPCo_STLB); 526 } 527 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); 528 529 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */ 530 531 env->mvp->CP0_MVPControl = newval; 532 } 533 534 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 535 { 536 uint32_t mask; 537 uint32_t newval; 538 539 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 540 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 541 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); 542 543 /* 544 * Yield scheduler intercept not implemented. 545 * Gating storage scheduler intercept not implemented. 546 */ 547 548 /* TODO: Enable/disable TCs. */ 549 550 env->CP0_VPEControl = newval; 551 } 552 553 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 554 { 555 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 556 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 557 uint32_t mask; 558 uint32_t newval; 559 560 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 561 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 562 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); 563 564 /* TODO: Enable/disable TCs. */ 565 566 other->CP0_VPEControl = newval; 567 } 568 569 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) 570 { 571 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 572 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 573 /* FIXME: Mask away return zero on read bits. */ 574 return other->CP0_VPEControl; 575 } 576 577 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) 578 { 579 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 580 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 581 582 return other->CP0_VPEConf0; 583 } 584 585 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 586 { 587 uint32_t mask = 0; 588 uint32_t newval; 589 590 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 591 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) { 592 mask |= (0xff << CP0VPEC0_XTC); 593 } 594 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 595 } 596 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); 597 598 /* TODO: TC exclusive handling due to ERL/EXL. */ 599 600 env->CP0_VPEConf0 = newval; 601 } 602 603 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 604 { 605 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 606 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 607 uint32_t mask = 0; 608 uint32_t newval; 609 610 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 611 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); 612 613 /* TODO: TC exclusive handling due to ERL/EXL. */ 614 other->CP0_VPEConf0 = newval; 615 } 616 617 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) 618 { 619 uint32_t mask = 0; 620 uint32_t newval; 621 622 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) 623 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | 624 (0xff << CP0VPEC1_NCP1); 625 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); 626 627 /* UDI not implemented. */ 628 /* CP2 not implemented. */ 629 630 /* TODO: Handle FPU (CP1) binding. */ 631 632 env->CP0_VPEConf1 = newval; 633 } 634 635 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) 636 { 637 /* Yield qualifier inputs not implemented. */ 638 env->CP0_YQMask = 0x00000000; 639 } 640 641 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) 642 { 643 env->CP0_VPEOpt = arg1 & 0x0000ffff; 644 } 645 646 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF) 647 648 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) 649 { 650 /* 1k pages not implemented */ 651 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 652 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env)) 653 | (rxi << (CP0EnLo_XI - 30)); 654 } 655 656 #if defined(TARGET_MIPS64) 657 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6) 658 659 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) 660 { 661 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 662 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 663 } 664 #endif 665 666 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 667 { 668 uint32_t mask = env->CP0_TCStatus_rw_bitmask; 669 uint32_t newval; 670 671 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); 672 673 env->active_tc.CP0_TCStatus = newval; 674 sync_c0_tcstatus(env, env->current_tc, newval); 675 } 676 677 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 678 { 679 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 680 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 681 682 if (other_tc == other->current_tc) { 683 other->active_tc.CP0_TCStatus = arg1; 684 } else { 685 other->tcs[other_tc].CP0_TCStatus = arg1; 686 } 687 sync_c0_tcstatus(other, other_tc, arg1); 688 } 689 690 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) 691 { 692 uint32_t mask = (1 << CP0TCBd_TBE); 693 uint32_t newval; 694 695 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 696 mask |= (1 << CP0TCBd_CurVPE); 697 } 698 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 699 env->active_tc.CP0_TCBind = newval; 700 } 701 702 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) 703 { 704 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 705 uint32_t mask = (1 << CP0TCBd_TBE); 706 uint32_t newval; 707 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 708 709 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 710 mask |= (1 << CP0TCBd_CurVPE); 711 } 712 if (other_tc == other->current_tc) { 713 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 714 other->active_tc.CP0_TCBind = newval; 715 } else { 716 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); 717 other->tcs[other_tc].CP0_TCBind = newval; 718 } 719 } 720 721 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 722 { 723 env->active_tc.PC = arg1; 724 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 725 env->CP0_LLAddr = 0; 726 env->lladdr = 0; 727 /* MIPS16 not implemented. */ 728 } 729 730 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 731 { 732 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 733 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 734 735 if (other_tc == other->current_tc) { 736 other->active_tc.PC = arg1; 737 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 738 other->CP0_LLAddr = 0; 739 other->lladdr = 0; 740 /* MIPS16 not implemented. */ 741 } else { 742 other->tcs[other_tc].PC = arg1; 743 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 744 other->CP0_LLAddr = 0; 745 other->lladdr = 0; 746 /* MIPS16 not implemented. */ 747 } 748 } 749 750 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) 751 { 752 MIPSCPU *cpu = env_archcpu(env); 753 754 env->active_tc.CP0_TCHalt = arg1 & 0x1; 755 756 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 757 if (env->active_tc.CP0_TCHalt & 1) { 758 mips_tc_sleep(cpu, env->current_tc); 759 } else { 760 mips_tc_wake(cpu, env->current_tc); 761 } 762 } 763 764 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) 765 { 766 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 767 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 768 MIPSCPU *other_cpu = env_archcpu(other); 769 770 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 771 772 if (other_tc == other->current_tc) { 773 other->active_tc.CP0_TCHalt = arg1; 774 } else { 775 other->tcs[other_tc].CP0_TCHalt = arg1; 776 } 777 778 if (arg1 & 1) { 779 mips_tc_sleep(other_cpu, other_tc); 780 } else { 781 mips_tc_wake(other_cpu, other_tc); 782 } 783 } 784 785 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) 786 { 787 env->active_tc.CP0_TCContext = arg1; 788 } 789 790 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) 791 { 792 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 793 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 794 795 if (other_tc == other->current_tc) { 796 other->active_tc.CP0_TCContext = arg1; 797 } else { 798 other->tcs[other_tc].CP0_TCContext = arg1; 799 } 800 } 801 802 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 803 { 804 env->active_tc.CP0_TCSchedule = arg1; 805 } 806 807 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 808 { 809 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 810 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 811 812 if (other_tc == other->current_tc) { 813 other->active_tc.CP0_TCSchedule = arg1; 814 } else { 815 other->tcs[other_tc].CP0_TCSchedule = arg1; 816 } 817 } 818 819 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 820 { 821 env->active_tc.CP0_TCScheFBack = arg1; 822 } 823 824 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 825 { 826 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 827 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 828 829 if (other_tc == other->current_tc) { 830 other->active_tc.CP0_TCScheFBack = arg1; 831 } else { 832 other->tcs[other_tc].CP0_TCScheFBack = arg1; 833 } 834 } 835 836 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) 837 { 838 /* 1k pages not implemented */ 839 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 840 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env)) 841 | (rxi << (CP0EnLo_XI - 30)); 842 } 843 844 #if defined(TARGET_MIPS64) 845 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) 846 { 847 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 848 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 849 } 850 #endif 851 852 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) 853 { 854 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); 855 } 856 857 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) 858 { 859 int32_t old; 860 old = env->CP0_MemoryMapID; 861 env->CP0_MemoryMapID = (int32_t) arg1; 862 /* If the MemoryMapID changes, flush qemu's TLB. */ 863 if (old != env->CP0_MemoryMapID) { 864 cpu_mips_tlb_flush(env); 865 } 866 } 867 868 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) 869 { 870 uint32_t mask; 871 int maskbits; 872 873 /* Don't care MASKX as we don't support 1KB page */ 874 mask = extract32((uint32_t)arg1, CP0PM_MASK, 16); 875 maskbits = cto32(mask); 876 877 /* Ensure no more set bit after first zero */ 878 if ((mask >> maskbits) != 0) { 879 goto invalid; 880 } 881 /* We don't support VTLB entry smaller than target page */ 882 if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) { 883 goto invalid; 884 } 885 env->CP0_PageMask = mask << CP0PM_MASK; 886 887 return; 888 889 invalid: 890 /* When invalid, set to default target page size. */ 891 mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN); 892 env->CP0_PageMask = mask << CP0PM_MASK; 893 } 894 895 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) 896 { 897 update_pagemask(env, arg1, &env->CP0_PageMask); 898 } 899 900 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) 901 { 902 /* SmartMIPS not implemented */ 903 /* 1k pages not implemented */ 904 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | 905 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); 906 compute_hflags(env); 907 restore_pamask(env); 908 } 909 910 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) 911 { 912 CPUState *cs = env_cpu(env); 913 914 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; 915 tlb_flush(cs); 916 } 917 918 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) 919 { 920 CPUState *cs = env_cpu(env); 921 922 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; 923 tlb_flush(cs); 924 } 925 926 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) 927 { 928 CPUState *cs = env_cpu(env); 929 930 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; 931 tlb_flush(cs); 932 } 933 934 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) 935 { 936 #if defined(TARGET_MIPS64) 937 uint64_t mask = 0x3F3FFFFFFFULL; 938 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; 939 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; 940 941 if ((env->insn_flags & ISA_MIPS_R6)) { 942 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { 943 mask &= ~(0x3FULL << CP0PF_BDI); 944 } 945 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { 946 mask &= ~(0x3FULL << CP0PF_GDI); 947 } 948 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { 949 mask &= ~(0x3FULL << CP0PF_UDI); 950 } 951 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { 952 mask &= ~(0x3FULL << CP0PF_MDI); 953 } 954 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { 955 mask &= ~(0x3FULL << CP0PF_PTI); 956 } 957 } 958 env->CP0_PWField = arg1 & mask; 959 960 if ((new_ptei >= 32) || 961 ((env->insn_flags & ISA_MIPS_R6) && 962 (new_ptei == 0 || new_ptei == 1))) { 963 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | 964 (old_ptei << CP0PF_PTEI); 965 } 966 #else 967 uint32_t mask = 0x3FFFFFFF; 968 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; 969 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; 970 971 if ((env->insn_flags & ISA_MIPS_R6)) { 972 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { 973 mask &= ~(0x3F << CP0PF_GDW); 974 } 975 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { 976 mask &= ~(0x3F << CP0PF_UDW); 977 } 978 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { 979 mask &= ~(0x3F << CP0PF_MDW); 980 } 981 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { 982 mask &= ~(0x3F << CP0PF_PTW); 983 } 984 } 985 env->CP0_PWField = arg1 & mask; 986 987 if ((new_ptew >= 32) || 988 ((env->insn_flags & ISA_MIPS_R6) && 989 (new_ptew == 0 || new_ptew == 1))) { 990 env->CP0_PWField = (env->CP0_PWField & ~0x3F) | 991 (old_ptew << CP0PF_PTEW); 992 } 993 #endif 994 } 995 996 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) 997 { 998 #if defined(TARGET_MIPS64) 999 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; 1000 #else 1001 env->CP0_PWSize = arg1 & 0x3FFFFFFF; 1002 #endif 1003 } 1004 1005 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) 1006 { 1007 if (env->insn_flags & ISA_MIPS_R6) { 1008 if (arg1 < env->tlb->nb_tlb) { 1009 env->CP0_Wired = arg1; 1010 } 1011 } else { 1012 env->CP0_Wired = arg1 % env->tlb->nb_tlb; 1013 } 1014 } 1015 1016 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) 1017 { 1018 #if defined(TARGET_MIPS64) 1019 /* PWEn = 0. Hardware page table walking is not implemented. */ 1020 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); 1021 #else 1022 env->CP0_PWCtl = (arg1 & 0x800000FF); 1023 #endif 1024 } 1025 1026 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) 1027 { 1028 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; 1029 } 1030 1031 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) 1032 { 1033 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; 1034 } 1035 1036 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) 1037 { 1038 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; 1039 } 1040 1041 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) 1042 { 1043 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; 1044 } 1045 1046 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) 1047 { 1048 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; 1049 } 1050 1051 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) 1052 { 1053 uint32_t mask = 0x0000000F; 1054 1055 if ((env->CP0_Config1 & (1 << CP0C1_PC)) && 1056 (env->insn_flags & ISA_MIPS_R6)) { 1057 mask |= (1 << 4); 1058 } 1059 if (env->insn_flags & ISA_MIPS_R6) { 1060 mask |= (1 << 5); 1061 } 1062 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { 1063 mask |= (1 << 29); 1064 1065 if (arg1 & (1 << 29)) { 1066 env->hflags |= MIPS_HFLAG_HWRENA_ULR; 1067 } else { 1068 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; 1069 } 1070 } 1071 1072 env->CP0_HWREna = arg1 & mask; 1073 } 1074 1075 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) 1076 { 1077 cpu_mips_store_count(env, arg1); 1078 } 1079 1080 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1081 { 1082 target_ulong old, val, mask; 1083 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; 1084 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { 1085 mask |= 1 << CP0EnHi_EHINV; 1086 } 1087 1088 /* 1k pages not implemented */ 1089 #if defined(TARGET_MIPS64) 1090 if (env->insn_flags & ISA_MIPS_R6) { 1091 int entryhi_r = extract64(arg1, 62, 2); 1092 int config0_at = extract32(env->CP0_Config0, 13, 2); 1093 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; 1094 if ((entryhi_r == 2) || 1095 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { 1096 /* skip EntryHi.R field if new value is reserved */ 1097 mask &= ~(0x3ull << 62); 1098 } 1099 } 1100 mask &= env->SEGMask; 1101 #endif 1102 old = env->CP0_EntryHi; 1103 val = (arg1 & mask) | (old & ~mask); 1104 env->CP0_EntryHi = val; 1105 if (ase_mt_available(env)) { 1106 sync_c0_entryhi(env, env->current_tc); 1107 } 1108 /* If the ASID changes, flush qemu's TLB. */ 1109 if ((old & env->CP0_EntryHi_ASID_mask) != 1110 (val & env->CP0_EntryHi_ASID_mask)) { 1111 tlb_flush(env_cpu(env)); 1112 } 1113 } 1114 1115 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1116 { 1117 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1118 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1119 1120 other->CP0_EntryHi = arg1; 1121 sync_c0_entryhi(other, other_tc); 1122 } 1123 1124 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) 1125 { 1126 cpu_mips_store_compare(env, arg1); 1127 } 1128 1129 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) 1130 { 1131 uint32_t val, old; 1132 1133 old = env->CP0_Status; 1134 cpu_mips_store_status(env, arg1); 1135 val = env->CP0_Status; 1136 1137 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 1138 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", 1139 old, old & env->CP0_Cause & CP0Ca_IP_mask, 1140 val, val & env->CP0_Cause & CP0Ca_IP_mask, 1141 env->CP0_Cause); 1142 switch (mips_env_mmu_index(env)) { 1143 case 3: 1144 qemu_log(", ERL\n"); 1145 break; 1146 case MIPS_HFLAG_UM: 1147 qemu_log(", UM\n"); 1148 break; 1149 case MIPS_HFLAG_SM: 1150 qemu_log(", SM\n"); 1151 break; 1152 case MIPS_HFLAG_KM: 1153 qemu_log("\n"); 1154 break; 1155 default: 1156 cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 1157 break; 1158 } 1159 } 1160 } 1161 1162 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) 1163 { 1164 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1165 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018; 1166 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1167 1168 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask); 1169 sync_c0_status(env, other, other_tc); 1170 } 1171 1172 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) 1173 { 1174 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); 1175 } 1176 1177 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) 1178 { 1179 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); 1180 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); 1181 } 1182 1183 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) 1184 { 1185 cpu_mips_store_cause(env, arg1); 1186 } 1187 1188 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) 1189 { 1190 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1191 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1192 1193 cpu_mips_store_cause(other, arg1); 1194 } 1195 1196 target_ulong helper_mftc0_epc(CPUMIPSState *env) 1197 { 1198 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1199 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1200 1201 return other->CP0_EPC; 1202 } 1203 1204 target_ulong helper_mftc0_ebase(CPUMIPSState *env) 1205 { 1206 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1207 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1208 1209 return other->CP0_EBase; 1210 } 1211 1212 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) 1213 { 1214 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1215 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1216 mask |= ~0x3FFFFFFF; 1217 } 1218 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask); 1219 } 1220 1221 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) 1222 { 1223 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1224 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1225 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1226 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1227 mask |= ~0x3FFFFFFF; 1228 } 1229 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask); 1230 } 1231 1232 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) 1233 { 1234 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1235 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1236 1237 switch (idx) { 1238 case 0: return other->CP0_Config0; 1239 case 1: return other->CP0_Config1; 1240 case 2: return other->CP0_Config2; 1241 case 3: return other->CP0_Config3; 1242 /* 4 and 5 are reserved. */ 1243 case 6: return other->CP0_Config6; 1244 case 7: return other->CP0_Config7; 1245 default: 1246 break; 1247 } 1248 return 0; 1249 } 1250 1251 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) 1252 { 1253 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); 1254 } 1255 1256 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) 1257 { 1258 /* tertiary/secondary caches not implemented */ 1259 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); 1260 } 1261 1262 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1) 1263 { 1264 if (env->insn_flags & ASE_MICROMIPS) { 1265 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) | 1266 (arg1 & (1 << CP0C3_ISA_ON_EXC)); 1267 } 1268 } 1269 1270 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) 1271 { 1272 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | 1273 (arg1 & env->CP0_Config4_rw_bitmask); 1274 } 1275 1276 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) 1277 { 1278 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | 1279 (arg1 & env->CP0_Config5_rw_bitmask); 1280 env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 1281 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; 1282 compute_hflags(env); 1283 } 1284 1285 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) 1286 { 1287 target_long mask = env->CP0_LLAddr_rw_bitmask; 1288 arg1 = arg1 << env->CP0_LLAddr_shift; 1289 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask); 1290 } 1291 1292 #define MTC0_MAAR_MASK(env) \ 1293 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3) 1294 1295 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1) 1296 { 1297 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env); 1298 } 1299 1300 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1) 1301 { 1302 env->CP0_MAAR[env->CP0_MAARI] = 1303 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) | 1304 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL); 1305 } 1306 1307 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1) 1308 { 1309 int index = arg1 & 0x3f; 1310 if (index == 0x3f) { 1311 /* 1312 * Software may write all ones to INDEX to determine the 1313 * maximum value supported. 1314 */ 1315 env->CP0_MAARI = MIPS_MAAR_MAX - 1; 1316 } else if (index < MIPS_MAAR_MAX) { 1317 env->CP0_MAARI = index; 1318 } 1319 /* 1320 * Other than the all ones, if the value written is not supported, 1321 * then INDEX is unchanged from its previous value. 1322 */ 1323 } 1324 1325 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1326 { 1327 /* 1328 * Watch exceptions for instructions, data loads, data stores 1329 * not implemented. 1330 */ 1331 env->CP0_WatchLo[sel] = (arg1 & ~0x7); 1332 } 1333 1334 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1335 { 1336 uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); 1337 uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */ 1338 if ((env->CP0_Config5 >> CP0C5_MI) & 1) { 1339 mask |= 0xFFFFFFFF00000000ULL; /* MMID */ 1340 } 1341 env->CP0_WatchHi[sel] = m_bit | (arg1 & mask); 1342 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); 1343 } 1344 1345 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1346 { 1347 env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) | 1348 (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL); 1349 } 1350 1351 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) 1352 { 1353 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; 1354 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); 1355 } 1356 1357 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) 1358 { 1359 env->CP0_Framemask = arg1; /* XXX */ 1360 } 1361 1362 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) 1363 { 1364 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); 1365 if (arg1 & (1 << CP0DB_DM)) { 1366 env->hflags |= MIPS_HFLAG_DM; 1367 } else { 1368 env->hflags &= ~MIPS_HFLAG_DM; 1369 } 1370 } 1371 1372 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) 1373 { 1374 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1375 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); 1376 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1377 1378 /* XXX: Might be wrong, check with EJTAG spec. */ 1379 if (other_tc == other->current_tc) { 1380 other->active_tc.CP0_Debug_tcstatus = val; 1381 } else { 1382 other->tcs[other_tc].CP0_Debug_tcstatus = val; 1383 } 1384 other->CP0_Debug = (other->CP0_Debug & 1385 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 1386 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 1387 } 1388 1389 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) 1390 { 1391 env->CP0_Performance0 = arg1 & 0x000007ff; 1392 } 1393 1394 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1) 1395 { 1396 int32_t wst = arg1 & (1 << CP0EC_WST); 1397 int32_t spr = arg1 & (1 << CP0EC_SPR); 1398 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0; 1399 1400 env->CP0_ErrCtl = wst | spr | itc; 1401 1402 if (itc && !wst && !spr) { 1403 env->hflags |= MIPS_HFLAG_ITC_CACHE; 1404 } else { 1405 env->hflags &= ~MIPS_HFLAG_ITC_CACHE; 1406 } 1407 } 1408 1409 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) 1410 { 1411 if (env->hflags & MIPS_HFLAG_ITC_CACHE) { 1412 /* 1413 * If CACHE instruction is configured for ITC tags then make all 1414 * CP0.TagLo bits writable. The actual write to ITC Configuration 1415 * Tag will take care of the read-only bits. 1416 */ 1417 env->CP0_TagLo = arg1; 1418 } else { 1419 env->CP0_TagLo = arg1 & 0xFFFFFCF6; 1420 } 1421 } 1422 1423 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) 1424 { 1425 env->CP0_DataLo = arg1; /* XXX */ 1426 } 1427 1428 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) 1429 { 1430 env->CP0_TagHi = arg1; /* XXX */ 1431 } 1432 1433 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) 1434 { 1435 env->CP0_DataHi = arg1; /* XXX */ 1436 } 1437 1438 /* MIPS MT functions */ 1439 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) 1440 { 1441 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1442 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1443 1444 if (other_tc == other->current_tc) { 1445 return other->active_tc.gpr[sel]; 1446 } else { 1447 return other->tcs[other_tc].gpr[sel]; 1448 } 1449 } 1450 1451 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) 1452 { 1453 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1454 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1455 1456 if (other_tc == other->current_tc) { 1457 return other->active_tc.LO[sel]; 1458 } else { 1459 return other->tcs[other_tc].LO[sel]; 1460 } 1461 } 1462 1463 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) 1464 { 1465 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1466 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1467 1468 if (other_tc == other->current_tc) { 1469 return other->active_tc.HI[sel]; 1470 } else { 1471 return other->tcs[other_tc].HI[sel]; 1472 } 1473 } 1474 1475 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) 1476 { 1477 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1478 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1479 1480 if (other_tc == other->current_tc) { 1481 return other->active_tc.ACX[sel]; 1482 } else { 1483 return other->tcs[other_tc].ACX[sel]; 1484 } 1485 } 1486 1487 target_ulong helper_mftdsp(CPUMIPSState *env) 1488 { 1489 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1490 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1491 1492 if (other_tc == other->current_tc) { 1493 return other->active_tc.DSPControl; 1494 } else { 1495 return other->tcs[other_tc].DSPControl; 1496 } 1497 } 1498 1499 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1500 { 1501 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1502 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1503 1504 if (other_tc == other->current_tc) { 1505 other->active_tc.gpr[sel] = arg1; 1506 } else { 1507 other->tcs[other_tc].gpr[sel] = arg1; 1508 } 1509 } 1510 1511 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1512 { 1513 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1514 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1515 1516 if (other_tc == other->current_tc) { 1517 other->active_tc.LO[sel] = arg1; 1518 } else { 1519 other->tcs[other_tc].LO[sel] = arg1; 1520 } 1521 } 1522 1523 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1524 { 1525 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1526 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1527 1528 if (other_tc == other->current_tc) { 1529 other->active_tc.HI[sel] = arg1; 1530 } else { 1531 other->tcs[other_tc].HI[sel] = arg1; 1532 } 1533 } 1534 1535 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1536 { 1537 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1538 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1539 1540 if (other_tc == other->current_tc) { 1541 other->active_tc.ACX[sel] = arg1; 1542 } else { 1543 other->tcs[other_tc].ACX[sel] = arg1; 1544 } 1545 } 1546 1547 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) 1548 { 1549 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1550 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1551 1552 if (other_tc == other->current_tc) { 1553 other->active_tc.DSPControl = arg1; 1554 } else { 1555 other->tcs[other_tc].DSPControl = arg1; 1556 } 1557 } 1558 1559 /* MIPS MT functions */ 1560 target_ulong helper_dmt(void) 1561 { 1562 /* TODO */ 1563 return 0; 1564 } 1565 1566 target_ulong helper_emt(void) 1567 { 1568 /* TODO */ 1569 return 0; 1570 } 1571 1572 target_ulong helper_dvpe(CPUMIPSState *env) 1573 { 1574 CPUState *other_cs = first_cpu; 1575 target_ulong prev = env->mvp->CP0_MVPControl; 1576 1577 CPU_FOREACH(other_cs) { 1578 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1579 /* Turn off all VPEs except the one executing the dvpe. */ 1580 if (&other_cpu->env != env) { 1581 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); 1582 mips_vpe_sleep(other_cpu); 1583 } 1584 } 1585 return prev; 1586 } 1587 1588 target_ulong helper_evpe(CPUMIPSState *env) 1589 { 1590 CPUState *other_cs = first_cpu; 1591 target_ulong prev = env->mvp->CP0_MVPControl; 1592 1593 CPU_FOREACH(other_cs) { 1594 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1595 1596 if (&other_cpu->env != env 1597 /* If the VPE is WFI, don't disturb its sleep. */ 1598 && !mips_vpe_is_wfi(other_cpu)) { 1599 /* Enable the VPE. */ 1600 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); 1601 mips_vpe_wake(other_cpu); /* And wake it up. */ 1602 } 1603 } 1604 return prev; 1605 } 1606 1607 /* R6 Multi-threading */ 1608 target_ulong helper_dvp(CPUMIPSState *env) 1609 { 1610 CPUState *other_cs = first_cpu; 1611 target_ulong prev = env->CP0_VPControl; 1612 1613 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 1614 CPU_FOREACH(other_cs) { 1615 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1616 /* Turn off all VPs except the one executing the dvp. */ 1617 if (&other_cpu->env != env) { 1618 mips_vpe_sleep(other_cpu); 1619 } 1620 } 1621 env->CP0_VPControl |= (1 << CP0VPCtl_DIS); 1622 } 1623 return prev; 1624 } 1625 1626 target_ulong helper_evp(CPUMIPSState *env) 1627 { 1628 CPUState *other_cs = first_cpu; 1629 target_ulong prev = env->CP0_VPControl; 1630 1631 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 1632 CPU_FOREACH(other_cs) { 1633 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1634 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { 1635 /* 1636 * If the VP is WFI, don't disturb its sleep. 1637 * Otherwise, wake it up. 1638 */ 1639 mips_vpe_wake(other_cpu); 1640 } 1641 } 1642 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); 1643 } 1644 return prev; 1645 } 1646