1 /* 2 * Helpers for emulation of CP0-related MIPS instructions. 3 * 4 * Copyright (C) 2004-2005 Jocelyn Mayer 5 * Copyright (C) 2020 Wave Computing, Inc. 6 * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 * 21 */ 22 23 #include "qemu/osdep.h" 24 #include "qemu/log.h" 25 #include "qemu/main-loop.h" 26 #include "cpu.h" 27 #include "internal.h" 28 #include "qemu/host-utils.h" 29 #include "exec/helper-proto.h" 30 #include "exec/exec-all.h" 31 32 33 /* SMP helpers. */ 34 static bool mips_vpe_is_wfi(MIPSCPU *c) 35 { 36 CPUState *cpu = CPU(c); 37 CPUMIPSState *env = &c->env; 38 39 /* 40 * If the VPE is halted but otherwise active, it means it's waiting for 41 * an interrupt.\ 42 */ 43 return cpu->halted && mips_vpe_active(env); 44 } 45 46 static bool mips_vp_is_wfi(MIPSCPU *c) 47 { 48 CPUState *cpu = CPU(c); 49 CPUMIPSState *env = &c->env; 50 51 return cpu->halted && mips_vp_active(env); 52 } 53 54 static inline void mips_vpe_wake(MIPSCPU *c) 55 { 56 /* 57 * Don't set ->halted = 0 directly, let it be done via cpu_has_work 58 * because there might be other conditions that state that c should 59 * be sleeping. 60 */ 61 bql_lock(); 62 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); 63 bql_unlock(); 64 } 65 66 static inline void mips_vpe_sleep(MIPSCPU *cpu) 67 { 68 CPUState *cs = CPU(cpu); 69 70 /* 71 * The VPE was shut off, really go to bed. 72 * Reset any old _WAKE requests. 73 */ 74 cs->halted = 1; 75 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); 76 } 77 78 static inline void mips_tc_wake(MIPSCPU *cpu, int tc) 79 { 80 CPUMIPSState *c = &cpu->env; 81 82 /* FIXME: TC reschedule. */ 83 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { 84 mips_vpe_wake(cpu); 85 } 86 } 87 88 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) 89 { 90 CPUMIPSState *c = &cpu->env; 91 92 /* FIXME: TC reschedule. */ 93 if (!mips_vpe_active(c)) { 94 mips_vpe_sleep(cpu); 95 } 96 } 97 98 /** 99 * mips_cpu_map_tc: 100 * @env: CPU from which mapping is performed. 101 * @tc: Should point to an int with the value of the global TC index. 102 * 103 * This function will transform @tc into a local index within the 104 * returned #CPUMIPSState. 105 */ 106 107 /* 108 * FIXME: This code assumes that all VPEs have the same number of TCs, 109 * which depends on runtime setup. Can probably be fixed by 110 * walking the list of CPUMIPSStates. 111 */ 112 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) 113 { 114 MIPSCPU *cpu; 115 CPUState *cs; 116 CPUState *other_cs; 117 int vpe_idx; 118 int tc_idx = *tc; 119 120 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { 121 /* Not allowed to address other CPUs. */ 122 *tc = env->current_tc; 123 return env; 124 } 125 126 cs = env_cpu(env); 127 vpe_idx = tc_idx / cs->nr_threads; 128 *tc = tc_idx % cs->nr_threads; 129 other_cs = qemu_get_cpu(vpe_idx); 130 if (other_cs == NULL) { 131 return env; 132 } 133 cpu = MIPS_CPU(other_cs); 134 return &cpu->env; 135 } 136 137 /* 138 * The per VPE CP0_Status register shares some fields with the per TC 139 * CP0_TCStatus registers. These fields are wired to the same registers, 140 * so changes to either of them should be reflected on both registers. 141 * 142 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts. 143 * 144 * These helper call synchronizes the regs for a given cpu. 145 */ 146 147 /* 148 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. 149 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, 150 * int tc); 151 */ 152 153 /* Called for updates to CP0_TCStatus. */ 154 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, 155 target_ulong v) 156 { 157 uint32_t status; 158 uint32_t tcu, tmx, tasid, tksu; 159 uint32_t mask = ((1U << CP0St_CU3) 160 | (1 << CP0St_CU2) 161 | (1 << CP0St_CU1) 162 | (1 << CP0St_CU0) 163 | (1 << CP0St_MX) 164 | (3 << CP0St_KSU)); 165 166 tcu = (v >> CP0TCSt_TCU0) & 0xf; 167 tmx = (v >> CP0TCSt_TMX) & 0x1; 168 tasid = v & cpu->CP0_EntryHi_ASID_mask; 169 tksu = (v >> CP0TCSt_TKSU) & 0x3; 170 171 status = tcu << CP0St_CU0; 172 status |= tmx << CP0St_MX; 173 status |= tksu << CP0St_KSU; 174 175 cpu->CP0_Status &= ~mask; 176 cpu->CP0_Status |= status; 177 178 /* Sync the TASID with EntryHi. */ 179 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask; 180 cpu->CP0_EntryHi |= tasid; 181 182 compute_hflags(cpu); 183 } 184 185 /* Called for updates to CP0_EntryHi. */ 186 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) 187 { 188 int32_t *tcst; 189 uint32_t asid, v = cpu->CP0_EntryHi; 190 191 asid = v & cpu->CP0_EntryHi_ASID_mask; 192 193 if (tc == cpu->current_tc) { 194 tcst = &cpu->active_tc.CP0_TCStatus; 195 } else { 196 tcst = &cpu->tcs[tc].CP0_TCStatus; 197 } 198 199 *tcst &= ~cpu->CP0_EntryHi_ASID_mask; 200 *tcst |= asid; 201 } 202 203 /* XXX: do not use a global */ 204 uint32_t cpu_mips_get_random(CPUMIPSState *env) 205 { 206 static uint32_t seed = 1; 207 static uint32_t prev_idx; 208 uint32_t idx; 209 uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired; 210 211 if (nb_rand_tlb == 1) { 212 return env->tlb->nb_tlb - 1; 213 } 214 215 /* Don't return same value twice, so get another value */ 216 do { 217 /* 218 * Use a simple algorithm of Linear Congruential Generator 219 * from ISO/IEC 9899 standard. 220 */ 221 seed = 1103515245 * seed + 12345; 222 idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired; 223 } while (idx == prev_idx); 224 prev_idx = idx; 225 return idx; 226 } 227 228 /* CP0 helpers */ 229 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) 230 { 231 return env->mvp->CP0_MVPControl; 232 } 233 234 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) 235 { 236 return env->mvp->CP0_MVPConf0; 237 } 238 239 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) 240 { 241 return env->mvp->CP0_MVPConf1; 242 } 243 244 target_ulong helper_mfc0_random(CPUMIPSState *env) 245 { 246 return (int32_t)cpu_mips_get_random(env); 247 } 248 249 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) 250 { 251 return env->active_tc.CP0_TCStatus; 252 } 253 254 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) 255 { 256 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 257 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 258 259 if (other_tc == other->current_tc) { 260 return other->active_tc.CP0_TCStatus; 261 } else { 262 return other->tcs[other_tc].CP0_TCStatus; 263 } 264 } 265 266 target_ulong helper_mfc0_tcbind(CPUMIPSState *env) 267 { 268 return env->active_tc.CP0_TCBind; 269 } 270 271 target_ulong helper_mftc0_tcbind(CPUMIPSState *env) 272 { 273 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 274 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 275 276 if (other_tc == other->current_tc) { 277 return other->active_tc.CP0_TCBind; 278 } else { 279 return other->tcs[other_tc].CP0_TCBind; 280 } 281 } 282 283 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) 284 { 285 return env->active_tc.PC; 286 } 287 288 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) 289 { 290 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 291 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 292 293 if (other_tc == other->current_tc) { 294 return other->active_tc.PC; 295 } else { 296 return other->tcs[other_tc].PC; 297 } 298 } 299 300 target_ulong helper_mfc0_tchalt(CPUMIPSState *env) 301 { 302 return env->active_tc.CP0_TCHalt; 303 } 304 305 target_ulong helper_mftc0_tchalt(CPUMIPSState *env) 306 { 307 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 308 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 309 310 if (other_tc == other->current_tc) { 311 return other->active_tc.CP0_TCHalt; 312 } else { 313 return other->tcs[other_tc].CP0_TCHalt; 314 } 315 } 316 317 target_ulong helper_mfc0_tccontext(CPUMIPSState *env) 318 { 319 return env->active_tc.CP0_TCContext; 320 } 321 322 target_ulong helper_mftc0_tccontext(CPUMIPSState *env) 323 { 324 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 325 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 326 327 if (other_tc == other->current_tc) { 328 return other->active_tc.CP0_TCContext; 329 } else { 330 return other->tcs[other_tc].CP0_TCContext; 331 } 332 } 333 334 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) 335 { 336 return env->active_tc.CP0_TCSchedule; 337 } 338 339 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) 340 { 341 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 342 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 343 344 if (other_tc == other->current_tc) { 345 return other->active_tc.CP0_TCSchedule; 346 } else { 347 return other->tcs[other_tc].CP0_TCSchedule; 348 } 349 } 350 351 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) 352 { 353 return env->active_tc.CP0_TCScheFBack; 354 } 355 356 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) 357 { 358 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 359 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 360 361 if (other_tc == other->current_tc) { 362 return other->active_tc.CP0_TCScheFBack; 363 } else { 364 return other->tcs[other_tc].CP0_TCScheFBack; 365 } 366 } 367 368 target_ulong helper_mfc0_count(CPUMIPSState *env) 369 { 370 return (int32_t)cpu_mips_get_count(env); 371 } 372 373 target_ulong helper_mftc0_entryhi(CPUMIPSState *env) 374 { 375 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 376 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 377 378 return other->CP0_EntryHi; 379 } 380 381 target_ulong helper_mftc0_cause(CPUMIPSState *env) 382 { 383 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 384 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 385 386 return other->CP0_Cause; 387 } 388 389 target_ulong helper_mftc0_status(CPUMIPSState *env) 390 { 391 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 392 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 393 394 return other->CP0_Status; 395 } 396 397 target_ulong helper_mfc0_lladdr(CPUMIPSState *env) 398 { 399 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift); 400 } 401 402 target_ulong helper_mfc0_maar(CPUMIPSState *env) 403 { 404 return (int32_t) env->CP0_MAAR[env->CP0_MAARI]; 405 } 406 407 target_ulong helper_mfhc0_maar(CPUMIPSState *env) 408 { 409 return env->CP0_MAAR[env->CP0_MAARI] >> 32; 410 } 411 412 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) 413 { 414 return (int32_t)env->CP0_WatchLo[sel]; 415 } 416 417 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) 418 { 419 return (int32_t) env->CP0_WatchHi[sel]; 420 } 421 422 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel) 423 { 424 return env->CP0_WatchHi[sel] >> 32; 425 } 426 427 target_ulong helper_mfc0_debug(CPUMIPSState *env) 428 { 429 target_ulong t0 = env->CP0_Debug; 430 if (env->hflags & MIPS_HFLAG_DM) { 431 t0 |= 1 << CP0DB_DM; 432 } 433 434 return t0; 435 } 436 437 target_ulong helper_mftc0_debug(CPUMIPSState *env) 438 { 439 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 440 int32_t tcstatus; 441 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 442 443 if (other_tc == other->current_tc) { 444 tcstatus = other->active_tc.CP0_Debug_tcstatus; 445 } else { 446 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; 447 } 448 449 /* XXX: Might be wrong, check with EJTAG spec. */ 450 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 451 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 452 } 453 454 #if defined(TARGET_MIPS64) 455 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) 456 { 457 return env->active_tc.PC; 458 } 459 460 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) 461 { 462 return env->active_tc.CP0_TCHalt; 463 } 464 465 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) 466 { 467 return env->active_tc.CP0_TCContext; 468 } 469 470 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) 471 { 472 return env->active_tc.CP0_TCSchedule; 473 } 474 475 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) 476 { 477 return env->active_tc.CP0_TCScheFBack; 478 } 479 480 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) 481 { 482 return env->CP0_LLAddr >> env->CP0_LLAddr_shift; 483 } 484 485 target_ulong helper_dmfc0_maar(CPUMIPSState *env) 486 { 487 return env->CP0_MAAR[env->CP0_MAARI]; 488 } 489 490 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) 491 { 492 return env->CP0_WatchLo[sel]; 493 } 494 495 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) 496 { 497 return env->CP0_WatchHi[sel]; 498 } 499 500 #endif /* TARGET_MIPS64 */ 501 502 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) 503 { 504 uint32_t index_p = env->CP0_Index & 0x80000000; 505 uint32_t tlb_index = arg1 & 0x7fffffff; 506 if (tlb_index < env->tlb->nb_tlb) { 507 if (env->insn_flags & ISA_MIPS_R6) { 508 index_p |= arg1 & 0x80000000; 509 } 510 env->CP0_Index = index_p | tlb_index; 511 } 512 } 513 514 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) 515 { 516 uint32_t mask = 0; 517 uint32_t newval; 518 519 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 520 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | 521 (1 << CP0MVPCo_EVP); 522 } 523 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 524 mask |= (1 << CP0MVPCo_STLB); 525 } 526 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); 527 528 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */ 529 530 env->mvp->CP0_MVPControl = newval; 531 } 532 533 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 534 { 535 uint32_t mask; 536 uint32_t newval; 537 538 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 539 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 540 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); 541 542 /* 543 * Yield scheduler intercept not implemented. 544 * Gating storage scheduler intercept not implemented. 545 */ 546 547 /* TODO: Enable/disable TCs. */ 548 549 env->CP0_VPEControl = newval; 550 } 551 552 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 553 { 554 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 555 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 556 uint32_t mask; 557 uint32_t newval; 558 559 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 560 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 561 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); 562 563 /* TODO: Enable/disable TCs. */ 564 565 other->CP0_VPEControl = newval; 566 } 567 568 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) 569 { 570 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 571 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 572 /* FIXME: Mask away return zero on read bits. */ 573 return other->CP0_VPEControl; 574 } 575 576 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) 577 { 578 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 579 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 580 581 return other->CP0_VPEConf0; 582 } 583 584 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 585 { 586 uint32_t mask = 0; 587 uint32_t newval; 588 589 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 590 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) { 591 mask |= (0xff << CP0VPEC0_XTC); 592 } 593 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 594 } 595 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); 596 597 /* TODO: TC exclusive handling due to ERL/EXL. */ 598 599 env->CP0_VPEConf0 = newval; 600 } 601 602 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 603 { 604 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 605 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 606 uint32_t mask = 0; 607 uint32_t newval; 608 609 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 610 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); 611 612 /* TODO: TC exclusive handling due to ERL/EXL. */ 613 other->CP0_VPEConf0 = newval; 614 } 615 616 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) 617 { 618 uint32_t mask = 0; 619 uint32_t newval; 620 621 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) 622 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | 623 (0xff << CP0VPEC1_NCP1); 624 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); 625 626 /* UDI not implemented. */ 627 /* CP2 not implemented. */ 628 629 /* TODO: Handle FPU (CP1) binding. */ 630 631 env->CP0_VPEConf1 = newval; 632 } 633 634 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) 635 { 636 /* Yield qualifier inputs not implemented. */ 637 env->CP0_YQMask = 0x00000000; 638 } 639 640 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) 641 { 642 env->CP0_VPEOpt = arg1 & 0x0000ffff; 643 } 644 645 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF) 646 647 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) 648 { 649 /* 1k pages not implemented */ 650 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 651 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env)) 652 | (rxi << (CP0EnLo_XI - 30)); 653 } 654 655 #if defined(TARGET_MIPS64) 656 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6) 657 658 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) 659 { 660 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 661 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 662 } 663 #endif 664 665 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 666 { 667 uint32_t mask = env->CP0_TCStatus_rw_bitmask; 668 uint32_t newval; 669 670 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); 671 672 env->active_tc.CP0_TCStatus = newval; 673 sync_c0_tcstatus(env, env->current_tc, newval); 674 } 675 676 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 677 { 678 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 679 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 680 681 if (other_tc == other->current_tc) { 682 other->active_tc.CP0_TCStatus = arg1; 683 } else { 684 other->tcs[other_tc].CP0_TCStatus = arg1; 685 } 686 sync_c0_tcstatus(other, other_tc, arg1); 687 } 688 689 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) 690 { 691 uint32_t mask = (1 << CP0TCBd_TBE); 692 uint32_t newval; 693 694 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 695 mask |= (1 << CP0TCBd_CurVPE); 696 } 697 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 698 env->active_tc.CP0_TCBind = newval; 699 } 700 701 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) 702 { 703 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 704 uint32_t mask = (1 << CP0TCBd_TBE); 705 uint32_t newval; 706 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 707 708 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 709 mask |= (1 << CP0TCBd_CurVPE); 710 } 711 if (other_tc == other->current_tc) { 712 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 713 other->active_tc.CP0_TCBind = newval; 714 } else { 715 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); 716 other->tcs[other_tc].CP0_TCBind = newval; 717 } 718 } 719 720 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 721 { 722 env->active_tc.PC = arg1; 723 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 724 env->CP0_LLAddr = 0; 725 env->lladdr = 0; 726 /* MIPS16 not implemented. */ 727 } 728 729 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 730 { 731 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 732 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 733 734 if (other_tc == other->current_tc) { 735 other->active_tc.PC = arg1; 736 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 737 other->CP0_LLAddr = 0; 738 other->lladdr = 0; 739 /* MIPS16 not implemented. */ 740 } else { 741 other->tcs[other_tc].PC = arg1; 742 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 743 other->CP0_LLAddr = 0; 744 other->lladdr = 0; 745 /* MIPS16 not implemented. */ 746 } 747 } 748 749 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) 750 { 751 MIPSCPU *cpu = env_archcpu(env); 752 753 env->active_tc.CP0_TCHalt = arg1 & 0x1; 754 755 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 756 if (env->active_tc.CP0_TCHalt & 1) { 757 mips_tc_sleep(cpu, env->current_tc); 758 } else { 759 mips_tc_wake(cpu, env->current_tc); 760 } 761 } 762 763 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) 764 { 765 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 766 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 767 MIPSCPU *other_cpu = env_archcpu(other); 768 769 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 770 771 if (other_tc == other->current_tc) { 772 other->active_tc.CP0_TCHalt = arg1; 773 } else { 774 other->tcs[other_tc].CP0_TCHalt = arg1; 775 } 776 777 if (arg1 & 1) { 778 mips_tc_sleep(other_cpu, other_tc); 779 } else { 780 mips_tc_wake(other_cpu, other_tc); 781 } 782 } 783 784 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) 785 { 786 env->active_tc.CP0_TCContext = arg1; 787 } 788 789 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) 790 { 791 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 792 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 793 794 if (other_tc == other->current_tc) { 795 other->active_tc.CP0_TCContext = arg1; 796 } else { 797 other->tcs[other_tc].CP0_TCContext = arg1; 798 } 799 } 800 801 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 802 { 803 env->active_tc.CP0_TCSchedule = arg1; 804 } 805 806 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 807 { 808 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 809 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 810 811 if (other_tc == other->current_tc) { 812 other->active_tc.CP0_TCSchedule = arg1; 813 } else { 814 other->tcs[other_tc].CP0_TCSchedule = arg1; 815 } 816 } 817 818 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 819 { 820 env->active_tc.CP0_TCScheFBack = arg1; 821 } 822 823 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 824 { 825 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 826 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 827 828 if (other_tc == other->current_tc) { 829 other->active_tc.CP0_TCScheFBack = arg1; 830 } else { 831 other->tcs[other_tc].CP0_TCScheFBack = arg1; 832 } 833 } 834 835 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) 836 { 837 /* 1k pages not implemented */ 838 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 839 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env)) 840 | (rxi << (CP0EnLo_XI - 30)); 841 } 842 843 #if defined(TARGET_MIPS64) 844 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) 845 { 846 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 847 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 848 } 849 #endif 850 851 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) 852 { 853 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); 854 } 855 856 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) 857 { 858 int32_t old; 859 old = env->CP0_MemoryMapID; 860 env->CP0_MemoryMapID = (int32_t) arg1; 861 /* If the MemoryMapID changes, flush qemu's TLB. */ 862 if (old != env->CP0_MemoryMapID) { 863 cpu_mips_tlb_flush(env); 864 } 865 } 866 867 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) 868 { 869 uint32_t mask; 870 int maskbits; 871 872 /* Don't care MASKX as we don't support 1KB page */ 873 mask = extract32((uint32_t)arg1, CP0PM_MASK, 16); 874 maskbits = cto32(mask); 875 876 /* Ensure no more set bit after first zero */ 877 if ((mask >> maskbits) != 0) { 878 goto invalid; 879 } 880 /* We don't support VTLB entry smaller than target page */ 881 if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) { 882 goto invalid; 883 } 884 env->CP0_PageMask = mask << CP0PM_MASK; 885 886 return; 887 888 invalid: 889 /* When invalid, set to default target page size. */ 890 mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN); 891 env->CP0_PageMask = mask << CP0PM_MASK; 892 } 893 894 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) 895 { 896 update_pagemask(env, arg1, &env->CP0_PageMask); 897 } 898 899 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) 900 { 901 /* SmartMIPS not implemented */ 902 /* 1k pages not implemented */ 903 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | 904 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); 905 compute_hflags(env); 906 restore_pamask(env); 907 } 908 909 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) 910 { 911 CPUState *cs = env_cpu(env); 912 913 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; 914 tlb_flush(cs); 915 } 916 917 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) 918 { 919 CPUState *cs = env_cpu(env); 920 921 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; 922 tlb_flush(cs); 923 } 924 925 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) 926 { 927 CPUState *cs = env_cpu(env); 928 929 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; 930 tlb_flush(cs); 931 } 932 933 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) 934 { 935 #if defined(TARGET_MIPS64) 936 uint64_t mask = 0x3F3FFFFFFFULL; 937 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; 938 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; 939 940 if ((env->insn_flags & ISA_MIPS_R6)) { 941 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { 942 mask &= ~(0x3FULL << CP0PF_BDI); 943 } 944 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { 945 mask &= ~(0x3FULL << CP0PF_GDI); 946 } 947 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { 948 mask &= ~(0x3FULL << CP0PF_UDI); 949 } 950 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { 951 mask &= ~(0x3FULL << CP0PF_MDI); 952 } 953 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { 954 mask &= ~(0x3FULL << CP0PF_PTI); 955 } 956 } 957 env->CP0_PWField = arg1 & mask; 958 959 if ((new_ptei >= 32) || 960 ((env->insn_flags & ISA_MIPS_R6) && 961 (new_ptei == 0 || new_ptei == 1))) { 962 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | 963 (old_ptei << CP0PF_PTEI); 964 } 965 #else 966 uint32_t mask = 0x3FFFFFFF; 967 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; 968 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; 969 970 if ((env->insn_flags & ISA_MIPS_R6)) { 971 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { 972 mask &= ~(0x3F << CP0PF_GDW); 973 } 974 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { 975 mask &= ~(0x3F << CP0PF_UDW); 976 } 977 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { 978 mask &= ~(0x3F << CP0PF_MDW); 979 } 980 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { 981 mask &= ~(0x3F << CP0PF_PTW); 982 } 983 } 984 env->CP0_PWField = arg1 & mask; 985 986 if ((new_ptew >= 32) || 987 ((env->insn_flags & ISA_MIPS_R6) && 988 (new_ptew == 0 || new_ptew == 1))) { 989 env->CP0_PWField = (env->CP0_PWField & ~0x3F) | 990 (old_ptew << CP0PF_PTEW); 991 } 992 #endif 993 } 994 995 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) 996 { 997 #if defined(TARGET_MIPS64) 998 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; 999 #else 1000 env->CP0_PWSize = arg1 & 0x3FFFFFFF; 1001 #endif 1002 } 1003 1004 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) 1005 { 1006 if (env->insn_flags & ISA_MIPS_R6) { 1007 if (arg1 < env->tlb->nb_tlb) { 1008 env->CP0_Wired = arg1; 1009 } 1010 } else { 1011 env->CP0_Wired = arg1 % env->tlb->nb_tlb; 1012 } 1013 } 1014 1015 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) 1016 { 1017 #if defined(TARGET_MIPS64) 1018 /* PWEn = 0. Hardware page table walking is not implemented. */ 1019 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); 1020 #else 1021 env->CP0_PWCtl = (arg1 & 0x800000FF); 1022 #endif 1023 } 1024 1025 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) 1026 { 1027 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; 1028 } 1029 1030 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) 1031 { 1032 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; 1033 } 1034 1035 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) 1036 { 1037 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; 1038 } 1039 1040 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) 1041 { 1042 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; 1043 } 1044 1045 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) 1046 { 1047 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; 1048 } 1049 1050 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) 1051 { 1052 uint32_t mask = 0x0000000F; 1053 1054 if ((env->CP0_Config1 & (1 << CP0C1_PC)) && 1055 (env->insn_flags & ISA_MIPS_R6)) { 1056 mask |= (1 << 4); 1057 } 1058 if (env->insn_flags & ISA_MIPS_R6) { 1059 mask |= (1 << 5); 1060 } 1061 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { 1062 mask |= (1 << 29); 1063 1064 if (arg1 & (1 << 29)) { 1065 env->hflags |= MIPS_HFLAG_HWRENA_ULR; 1066 } else { 1067 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; 1068 } 1069 } 1070 1071 env->CP0_HWREna = arg1 & mask; 1072 } 1073 1074 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) 1075 { 1076 cpu_mips_store_count(env, arg1); 1077 } 1078 1079 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1080 { 1081 target_ulong old, val, mask; 1082 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; 1083 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { 1084 mask |= 1 << CP0EnHi_EHINV; 1085 } 1086 1087 /* 1k pages not implemented */ 1088 #if defined(TARGET_MIPS64) 1089 if (env->insn_flags & ISA_MIPS_R6) { 1090 int entryhi_r = extract64(arg1, 62, 2); 1091 int config0_at = extract32(env->CP0_Config0, 13, 2); 1092 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; 1093 if ((entryhi_r == 2) || 1094 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { 1095 /* skip EntryHi.R field if new value is reserved */ 1096 mask &= ~(0x3ull << 62); 1097 } 1098 } 1099 mask &= env->SEGMask; 1100 #endif 1101 old = env->CP0_EntryHi; 1102 val = (arg1 & mask) | (old & ~mask); 1103 env->CP0_EntryHi = val; 1104 if (ase_mt_available(env)) { 1105 sync_c0_entryhi(env, env->current_tc); 1106 } 1107 /* If the ASID changes, flush qemu's TLB. */ 1108 if ((old & env->CP0_EntryHi_ASID_mask) != 1109 (val & env->CP0_EntryHi_ASID_mask)) { 1110 tlb_flush(env_cpu(env)); 1111 } 1112 } 1113 1114 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1115 { 1116 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1117 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1118 1119 other->CP0_EntryHi = arg1; 1120 sync_c0_entryhi(other, other_tc); 1121 } 1122 1123 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) 1124 { 1125 cpu_mips_store_compare(env, arg1); 1126 } 1127 1128 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) 1129 { 1130 uint32_t val, old; 1131 1132 old = env->CP0_Status; 1133 cpu_mips_store_status(env, arg1); 1134 val = env->CP0_Status; 1135 1136 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 1137 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", 1138 old, old & env->CP0_Cause & CP0Ca_IP_mask, 1139 val, val & env->CP0_Cause & CP0Ca_IP_mask, 1140 env->CP0_Cause); 1141 switch (mips_env_mmu_index(env)) { 1142 case 3: 1143 qemu_log(", ERL\n"); 1144 break; 1145 case MIPS_HFLAG_UM: 1146 qemu_log(", UM\n"); 1147 break; 1148 case MIPS_HFLAG_SM: 1149 qemu_log(", SM\n"); 1150 break; 1151 case MIPS_HFLAG_KM: 1152 qemu_log("\n"); 1153 break; 1154 default: 1155 cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 1156 break; 1157 } 1158 } 1159 } 1160 1161 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) 1162 { 1163 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1164 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018; 1165 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1166 1167 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask); 1168 sync_c0_status(env, other, other_tc); 1169 } 1170 1171 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) 1172 { 1173 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); 1174 } 1175 1176 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) 1177 { 1178 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); 1179 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); 1180 } 1181 1182 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) 1183 { 1184 cpu_mips_store_cause(env, arg1); 1185 } 1186 1187 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) 1188 { 1189 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1190 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1191 1192 cpu_mips_store_cause(other, arg1); 1193 } 1194 1195 target_ulong helper_mftc0_epc(CPUMIPSState *env) 1196 { 1197 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1198 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1199 1200 return other->CP0_EPC; 1201 } 1202 1203 target_ulong helper_mftc0_ebase(CPUMIPSState *env) 1204 { 1205 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1206 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1207 1208 return other->CP0_EBase; 1209 } 1210 1211 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) 1212 { 1213 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1214 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1215 mask |= ~0x3FFFFFFF; 1216 } 1217 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask); 1218 } 1219 1220 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) 1221 { 1222 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1223 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1224 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1225 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1226 mask |= ~0x3FFFFFFF; 1227 } 1228 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask); 1229 } 1230 1231 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) 1232 { 1233 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1234 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1235 1236 switch (idx) { 1237 case 0: return other->CP0_Config0; 1238 case 1: return other->CP0_Config1; 1239 case 2: return other->CP0_Config2; 1240 case 3: return other->CP0_Config3; 1241 /* 4 and 5 are reserved. */ 1242 case 6: return other->CP0_Config6; 1243 case 7: return other->CP0_Config7; 1244 default: 1245 break; 1246 } 1247 return 0; 1248 } 1249 1250 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) 1251 { 1252 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); 1253 } 1254 1255 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) 1256 { 1257 /* tertiary/secondary caches not implemented */ 1258 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); 1259 } 1260 1261 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1) 1262 { 1263 if (env->insn_flags & ASE_MICROMIPS) { 1264 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) | 1265 (arg1 & (1 << CP0C3_ISA_ON_EXC)); 1266 } 1267 } 1268 1269 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) 1270 { 1271 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | 1272 (arg1 & env->CP0_Config4_rw_bitmask); 1273 } 1274 1275 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) 1276 { 1277 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | 1278 (arg1 & env->CP0_Config5_rw_bitmask); 1279 env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 1280 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; 1281 compute_hflags(env); 1282 } 1283 1284 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) 1285 { 1286 target_long mask = env->CP0_LLAddr_rw_bitmask; 1287 arg1 = arg1 << env->CP0_LLAddr_shift; 1288 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask); 1289 } 1290 1291 #define MTC0_MAAR_MASK(env) \ 1292 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3) 1293 1294 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1) 1295 { 1296 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env); 1297 } 1298 1299 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1) 1300 { 1301 env->CP0_MAAR[env->CP0_MAARI] = 1302 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) | 1303 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL); 1304 } 1305 1306 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1) 1307 { 1308 int index = arg1 & 0x3f; 1309 if (index == 0x3f) { 1310 /* 1311 * Software may write all ones to INDEX to determine the 1312 * maximum value supported. 1313 */ 1314 env->CP0_MAARI = MIPS_MAAR_MAX - 1; 1315 } else if (index < MIPS_MAAR_MAX) { 1316 env->CP0_MAARI = index; 1317 } 1318 /* 1319 * Other than the all ones, if the value written is not supported, 1320 * then INDEX is unchanged from its previous value. 1321 */ 1322 } 1323 1324 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1325 { 1326 /* 1327 * Watch exceptions for instructions, data loads, data stores 1328 * not implemented. 1329 */ 1330 env->CP0_WatchLo[sel] = (arg1 & ~0x7); 1331 } 1332 1333 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1334 { 1335 uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); 1336 uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */ 1337 if ((env->CP0_Config5 >> CP0C5_MI) & 1) { 1338 mask |= 0xFFFFFFFF00000000ULL; /* MMID */ 1339 } 1340 env->CP0_WatchHi[sel] = m_bit | (arg1 & mask); 1341 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); 1342 } 1343 1344 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1345 { 1346 env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) | 1347 (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL); 1348 } 1349 1350 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) 1351 { 1352 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; 1353 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); 1354 } 1355 1356 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) 1357 { 1358 env->CP0_Framemask = arg1; /* XXX */ 1359 } 1360 1361 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) 1362 { 1363 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); 1364 if (arg1 & (1 << CP0DB_DM)) { 1365 env->hflags |= MIPS_HFLAG_DM; 1366 } else { 1367 env->hflags &= ~MIPS_HFLAG_DM; 1368 } 1369 } 1370 1371 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) 1372 { 1373 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1374 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); 1375 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1376 1377 /* XXX: Might be wrong, check with EJTAG spec. */ 1378 if (other_tc == other->current_tc) { 1379 other->active_tc.CP0_Debug_tcstatus = val; 1380 } else { 1381 other->tcs[other_tc].CP0_Debug_tcstatus = val; 1382 } 1383 other->CP0_Debug = (other->CP0_Debug & 1384 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 1385 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 1386 } 1387 1388 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) 1389 { 1390 env->CP0_Performance0 = arg1 & 0x000007ff; 1391 } 1392 1393 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1) 1394 { 1395 int32_t wst = arg1 & (1 << CP0EC_WST); 1396 int32_t spr = arg1 & (1 << CP0EC_SPR); 1397 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0; 1398 1399 env->CP0_ErrCtl = wst | spr | itc; 1400 1401 if (itc && !wst && !spr) { 1402 env->hflags |= MIPS_HFLAG_ITC_CACHE; 1403 } else { 1404 env->hflags &= ~MIPS_HFLAG_ITC_CACHE; 1405 } 1406 } 1407 1408 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) 1409 { 1410 if (env->hflags & MIPS_HFLAG_ITC_CACHE) { 1411 /* 1412 * If CACHE instruction is configured for ITC tags then make all 1413 * CP0.TagLo bits writable. The actual write to ITC Configuration 1414 * Tag will take care of the read-only bits. 1415 */ 1416 env->CP0_TagLo = arg1; 1417 } else { 1418 env->CP0_TagLo = arg1 & 0xFFFFFCF6; 1419 } 1420 } 1421 1422 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) 1423 { 1424 env->CP0_DataLo = arg1; /* XXX */ 1425 } 1426 1427 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) 1428 { 1429 env->CP0_TagHi = arg1; /* XXX */ 1430 } 1431 1432 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) 1433 { 1434 env->CP0_DataHi = arg1; /* XXX */ 1435 } 1436 1437 /* MIPS MT functions */ 1438 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) 1439 { 1440 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1441 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1442 1443 if (other_tc == other->current_tc) { 1444 return other->active_tc.gpr[sel]; 1445 } else { 1446 return other->tcs[other_tc].gpr[sel]; 1447 } 1448 } 1449 1450 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) 1451 { 1452 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1453 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1454 1455 if (other_tc == other->current_tc) { 1456 return other->active_tc.LO[sel]; 1457 } else { 1458 return other->tcs[other_tc].LO[sel]; 1459 } 1460 } 1461 1462 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) 1463 { 1464 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1465 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1466 1467 if (other_tc == other->current_tc) { 1468 return other->active_tc.HI[sel]; 1469 } else { 1470 return other->tcs[other_tc].HI[sel]; 1471 } 1472 } 1473 1474 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) 1475 { 1476 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1477 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1478 1479 if (other_tc == other->current_tc) { 1480 return other->active_tc.ACX[sel]; 1481 } else { 1482 return other->tcs[other_tc].ACX[sel]; 1483 } 1484 } 1485 1486 target_ulong helper_mftdsp(CPUMIPSState *env) 1487 { 1488 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1489 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1490 1491 if (other_tc == other->current_tc) { 1492 return other->active_tc.DSPControl; 1493 } else { 1494 return other->tcs[other_tc].DSPControl; 1495 } 1496 } 1497 1498 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1499 { 1500 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1501 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1502 1503 if (other_tc == other->current_tc) { 1504 other->active_tc.gpr[sel] = arg1; 1505 } else { 1506 other->tcs[other_tc].gpr[sel] = arg1; 1507 } 1508 } 1509 1510 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1511 { 1512 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1513 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1514 1515 if (other_tc == other->current_tc) { 1516 other->active_tc.LO[sel] = arg1; 1517 } else { 1518 other->tcs[other_tc].LO[sel] = arg1; 1519 } 1520 } 1521 1522 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1523 { 1524 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1525 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1526 1527 if (other_tc == other->current_tc) { 1528 other->active_tc.HI[sel] = arg1; 1529 } else { 1530 other->tcs[other_tc].HI[sel] = arg1; 1531 } 1532 } 1533 1534 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1535 { 1536 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1537 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1538 1539 if (other_tc == other->current_tc) { 1540 other->active_tc.ACX[sel] = arg1; 1541 } else { 1542 other->tcs[other_tc].ACX[sel] = arg1; 1543 } 1544 } 1545 1546 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) 1547 { 1548 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1549 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1550 1551 if (other_tc == other->current_tc) { 1552 other->active_tc.DSPControl = arg1; 1553 } else { 1554 other->tcs[other_tc].DSPControl = arg1; 1555 } 1556 } 1557 1558 /* MIPS MT functions */ 1559 target_ulong helper_dmt(void) 1560 { 1561 /* TODO */ 1562 return 0; 1563 } 1564 1565 target_ulong helper_emt(void) 1566 { 1567 /* TODO */ 1568 return 0; 1569 } 1570 1571 target_ulong helper_dvpe(CPUMIPSState *env) 1572 { 1573 CPUState *other_cs = first_cpu; 1574 target_ulong prev = env->mvp->CP0_MVPControl; 1575 1576 CPU_FOREACH(other_cs) { 1577 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1578 /* Turn off all VPEs except the one executing the dvpe. */ 1579 if (&other_cpu->env != env) { 1580 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); 1581 mips_vpe_sleep(other_cpu); 1582 } 1583 } 1584 return prev; 1585 } 1586 1587 target_ulong helper_evpe(CPUMIPSState *env) 1588 { 1589 CPUState *other_cs = first_cpu; 1590 target_ulong prev = env->mvp->CP0_MVPControl; 1591 1592 CPU_FOREACH(other_cs) { 1593 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1594 1595 if (&other_cpu->env != env 1596 /* If the VPE is WFI, don't disturb its sleep. */ 1597 && !mips_vpe_is_wfi(other_cpu)) { 1598 /* Enable the VPE. */ 1599 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); 1600 mips_vpe_wake(other_cpu); /* And wake it up. */ 1601 } 1602 } 1603 return prev; 1604 } 1605 1606 /* R6 Multi-threading */ 1607 target_ulong helper_dvp(CPUMIPSState *env) 1608 { 1609 CPUState *other_cs = first_cpu; 1610 target_ulong prev = env->CP0_VPControl; 1611 1612 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 1613 CPU_FOREACH(other_cs) { 1614 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1615 /* Turn off all VPs except the one executing the dvp. */ 1616 if (&other_cpu->env != env) { 1617 mips_vpe_sleep(other_cpu); 1618 } 1619 } 1620 env->CP0_VPControl |= (1 << CP0VPCtl_DIS); 1621 } 1622 return prev; 1623 } 1624 1625 target_ulong helper_evp(CPUMIPSState *env) 1626 { 1627 CPUState *other_cs = first_cpu; 1628 target_ulong prev = env->CP0_VPControl; 1629 1630 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 1631 CPU_FOREACH(other_cs) { 1632 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 1633 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { 1634 /* 1635 * If the VP is WFI, don't disturb its sleep. 1636 * Otherwise, wake it up. 1637 */ 1638 mips_vpe_wake(other_cpu); 1639 } 1640 } 1641 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); 1642 } 1643 return prev; 1644 } 1645