translate.c (a76779ee3b1291b2e29a04229299545a0348160f) translate.c (c03a0fd15cb0cd694240a68964be630dd3232aca)
1/*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public

--- 2148 unchanged lines hidden (view full) ---

2157
2158static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
2159 TCGv_i32 asi, TCGv_i32 mop)
2160{
2161 g_assert_not_reached();
2162}
2163#endif
2164
1/*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public

--- 2148 unchanged lines hidden (view full) ---

2157
2158static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
2159 TCGv_i32 asi, TCGv_i32 mop)
2160{
2161 g_assert_not_reached();
2162}
2163#endif
2164
2165static void __attribute__((unused))
2166gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, MemOp memop)
2165static void gen_ld_asi0(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2167{
2166{
2168 DisasASI da = get_asi(dc, insn, memop);
2169
2170 switch (da.type) {
2167 switch (da->type) {
2171 case GET_ASI_EXCP:
2172 break;
2173 case GET_ASI_DTWINX: /* Reserved for ldda. */
2174 gen_exception(dc, TT_ILL_INSN);
2175 break;
2176 case GET_ASI_DIRECT:
2168 case GET_ASI_EXCP:
2169 break;
2170 case GET_ASI_DTWINX: /* Reserved for ldda. */
2171 gen_exception(dc, TT_ILL_INSN);
2172 break;
2173 case GET_ASI_DIRECT:
2177 gen_address_mask(dc, addr);
2178 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
2174 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
2179 break;
2180 default:
2181 {
2175 break;
2176 default:
2177 {
2182 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2183 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2178 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2179 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2184
2185 save_state(dc);
2186#ifdef TARGET_SPARC64
2187 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2188#else
2189 {
2190 TCGv_i64 t64 = tcg_temp_new_i64();
2191 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2192 tcg_gen_trunc_i64_tl(dst, t64);
2193 }
2194#endif
2195 }
2196 break;
2197 }
2198}
2199
2200static void __attribute__((unused))
2180
2181 save_state(dc);
2182#ifdef TARGET_SPARC64
2183 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
2184#else
2185 {
2186 TCGv_i64 t64 = tcg_temp_new_i64();
2187 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2188 tcg_gen_trunc_i64_tl(dst, t64);
2189 }
2190#endif
2191 }
2192 break;
2193 }
2194}
2195
2196static void __attribute__((unused))
2201gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, MemOp memop)
2197gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, MemOp memop)
2202{
2203 DisasASI da = get_asi(dc, insn, memop);
2204
2198{
2199 DisasASI da = get_asi(dc, insn, memop);
2200
2205 switch (da.type) {
2201 gen_address_mask(dc, addr);
2202 gen_ld_asi0(dc, &da, dst, addr);
2203}
2204
2205static void gen_st_asi0(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
2206{
2207 switch (da->type) {
2206 case GET_ASI_EXCP:
2207 break;
2208 case GET_ASI_EXCP:
2209 break;
2210
2208 case GET_ASI_DTWINX: /* Reserved for stda. */
2211 case GET_ASI_DTWINX: /* Reserved for stda. */
2209#ifndef TARGET_SPARC64
2210 gen_exception(dc, TT_ILL_INSN);
2211 break;
2212#else
2213 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2212 if (TARGET_LONG_BITS == 32) {
2213 gen_exception(dc, TT_ILL_INSN);
2214 break;
2215 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2214 /* Pre OpenSPARC CPUs don't have these */
2215 gen_exception(dc, TT_ILL_INSN);
2216 /* Pre OpenSPARC CPUs don't have these */
2217 gen_exception(dc, TT_ILL_INSN);
2216 return;
2218 break;
2217 }
2219 }
2218 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2219 * are ST_BLKINIT_ ASIs */
2220#endif
2220 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
2221 /* fall through */
2221 /* fall through */
2222
2222 case GET_ASI_DIRECT:
2223 case GET_ASI_DIRECT:
2223 gen_address_mask(dc, addr);
2224 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
2224 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
2225 break;
2225 break;
2226#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2226
2227 case GET_ASI_BCOPY:
2227 case GET_ASI_BCOPY:
2228 assert(TARGET_LONG_BITS == 32);
2228 /* Copy 32 bytes from the address in SRC to ADDR. */
2229 /* ??? The original qemu code suggests 4-byte alignment, dropping
2230 the low bits, but the only place I can see this used is in the
2231 Linux kernel with 32 byte alignment, which would make more sense
2232 as a cacheline-style operation. */
2233 {
2234 TCGv saddr = tcg_temp_new();
2235 TCGv daddr = tcg_temp_new();
2236 TCGv four = tcg_constant_tl(4);
2237 TCGv_i32 tmp = tcg_temp_new_i32();
2238 int i;
2239
2240 tcg_gen_andi_tl(saddr, src, -4);
2241 tcg_gen_andi_tl(daddr, addr, -4);
2242 for (i = 0; i < 32; i += 4) {
2243 /* Since the loads and stores are paired, allow the
2244 copy to happen in the host endianness. */
2229 /* Copy 32 bytes from the address in SRC to ADDR. */
2230 /* ??? The original qemu code suggests 4-byte alignment, dropping
2231 the low bits, but the only place I can see this used is in the
2232 Linux kernel with 32 byte alignment, which would make more sense
2233 as a cacheline-style operation. */
2234 {
2235 TCGv saddr = tcg_temp_new();
2236 TCGv daddr = tcg_temp_new();
2237 TCGv four = tcg_constant_tl(4);
2238 TCGv_i32 tmp = tcg_temp_new_i32();
2239 int i;
2240
2241 tcg_gen_andi_tl(saddr, src, -4);
2242 tcg_gen_andi_tl(daddr, addr, -4);
2243 for (i = 0; i < 32; i += 4) {
2244 /* Since the loads and stores are paired, allow the
2245 copy to happen in the host endianness. */
2245 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2246 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2246 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
2247 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
2247 tcg_gen_add_tl(saddr, saddr, four);
2248 tcg_gen_add_tl(daddr, daddr, four);
2249 }
2250 }
2251 break;
2248 tcg_gen_add_tl(saddr, saddr, four);
2249 tcg_gen_add_tl(daddr, daddr, four);
2250 }
2251 }
2252 break;
2252#endif
2253
2253 default:
2254 {
2254 default:
2255 {
2255 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2256 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2256 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2257 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
2257
2258 save_state(dc);
2259#ifdef TARGET_SPARC64
2260 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2261#else
2262 {
2263 TCGv_i64 t64 = tcg_temp_new_i64();
2264 tcg_gen_extu_tl_i64(t64, src);

--- 4 unchanged lines hidden (view full) ---

2269 /* A write to a TLB register may alter page maps. End the TB. */
2270 dc->npc = DYNAMIC_PC;
2271 }
2272 break;
2273 }
2274}
2275
2276static void __attribute__((unused))
2258
2259 save_state(dc);
2260#ifdef TARGET_SPARC64
2261 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
2262#else
2263 {
2264 TCGv_i64 t64 = tcg_temp_new_i64();
2265 tcg_gen_extu_tl_i64(t64, src);

--- 4 unchanged lines hidden (view full) ---

2270 /* A write to a TLB register may alter page maps. End the TB. */
2271 dc->npc = DYNAMIC_PC;
2272 }
2273 break;
2274 }
2275}
2276
2277static void __attribute__((unused))
2277gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn)
2278gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, MemOp memop)
2278{
2279{
2279 DisasASI da = get_asi(dc, insn, MO_TEUL);
2280 DisasASI da = get_asi(dc, insn, memop);
2280
2281
2281 switch (da.type) {
2282 gen_address_mask(dc, addr);
2283 gen_st_asi0(dc, &da, src, addr);
2284}
2285
2286static void gen_swap_asi0(DisasContext *dc, DisasASI *da,
2287 TCGv dst, TCGv src, TCGv addr)
2288{
2289 switch (da->type) {
2282 case GET_ASI_EXCP:
2283 break;
2284 case GET_ASI_DIRECT:
2290 case GET_ASI_EXCP:
2291 break;
2292 case GET_ASI_DIRECT:
2285 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2293 gen_swap(dc, dst, src, addr, da->mem_idx, da->memop);
2286 break;
2287 default:
2288 /* ??? Should be DAE_invalid_asi. */
2289 gen_exception(dc, TT_DATA_ACCESS);
2290 break;
2291 }
2292}
2293
2294static void __attribute__((unused))
2294 break;
2295 default:
2296 /* ??? Should be DAE_invalid_asi. */
2297 gen_exception(dc, TT_DATA_ACCESS);
2298 break;
2299 }
2300}
2301
2302static void __attribute__((unused))
2295gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
2303gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn)
2296{
2297 DisasASI da = get_asi(dc, insn, MO_TEUL);
2304{
2305 DisasASI da = get_asi(dc, insn, MO_TEUL);
2298 TCGv oldv;
2299
2306
2300 switch (da.type) {
2307 gen_address_mask(dc, addr);
2308 gen_swap_asi0(dc, &da, dst, src, addr);
2309}
2310
2311static void gen_cas_asi0(DisasContext *dc, DisasASI *da,
2312 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
2313{
2314 switch (da->type) {
2301 case GET_ASI_EXCP:
2302 return;
2303 case GET_ASI_DIRECT:
2315 case GET_ASI_EXCP:
2316 return;
2317 case GET_ASI_DIRECT:
2304 oldv = tcg_temp_new();
2305 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2306 da.mem_idx, da.memop | MO_ALIGN);
2307 gen_store_gpr(dc, rd, oldv);
2318 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
2319 da->mem_idx, da->memop | MO_ALIGN);
2308 break;
2309 default:
2310 /* ??? Should be DAE_invalid_asi. */
2311 gen_exception(dc, TT_DATA_ACCESS);
2312 break;
2313 }
2314}
2315
2316static void __attribute__((unused))
2320 break;
2321 default:
2322 /* ??? Should be DAE_invalid_asi. */
2323 gen_exception(dc, TT_DATA_ACCESS);
2324 break;
2325 }
2326}
2327
2328static void __attribute__((unused))
2317gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2329gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
2318{
2330{
2319 DisasASI da = get_asi(dc, insn, MO_UB);
2331 DisasASI da = get_asi(dc, insn, MO_TEUL);
2332 TCGv oldv = gen_dest_gpr(dc, rd);
2333 TCGv newv = gen_load_gpr(dc, rd);
2320
2334
2321 switch (da.type) {
2335 gen_address_mask(dc, addr);
2336 gen_cas_asi0(dc, &da, oldv, newv, cmpv, addr);
2337 gen_store_gpr(dc, rd, oldv);
2338}
2339
2340static void __attribute__((unused))
2341gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
2342{
2343 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2344 TCGv oldv = gen_dest_gpr(dc, rd);
2345 TCGv newv = gen_load_gpr(dc, rd);
2346
2347 gen_address_mask(dc, addr);
2348 gen_cas_asi0(dc, &da, oldv, newv, cmpv, addr);
2349 gen_store_gpr(dc, rd, oldv);
2350}
2351
2352static void gen_ldstub_asi0(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
2353{
2354 switch (da->type) {
2322 case GET_ASI_EXCP:
2323 break;
2324 case GET_ASI_DIRECT:
2355 case GET_ASI_EXCP:
2356 break;
2357 case GET_ASI_DIRECT:
2325 gen_ldstub(dc, dst, addr, da.mem_idx);
2358 gen_ldstub(dc, dst, addr, da->mem_idx);
2326 break;
2327 default:
2328 /* ??? In theory, this should be raise DAE_invalid_asi.
2329 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2330 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2331 gen_helper_exit_atomic(tcg_env);
2332 } else {
2359 break;
2360 default:
2361 /* ??? In theory, this should be raise DAE_invalid_asi.
2362 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2363 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
2364 gen_helper_exit_atomic(tcg_env);
2365 } else {
2333 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2366 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2334 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2335 TCGv_i64 s64, t64;
2336
2337 save_state(dc);
2338 t64 = tcg_temp_new_i64();
2339 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2340
2341 s64 = tcg_constant_i64(0xff);

--- 4 unchanged lines hidden (view full) ---

2346 /* End the TB. */
2347 dc->npc = DYNAMIC_PC;
2348 }
2349 break;
2350 }
2351}
2352
2353static void __attribute__((unused))
2367 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
2368 TCGv_i64 s64, t64;
2369
2370 save_state(dc);
2371 t64 = tcg_temp_new_i64();
2372 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
2373
2374 s64 = tcg_constant_i64(0xff);

--- 4 unchanged lines hidden (view full) ---

2379 /* End the TB. */
2380 dc->npc = DYNAMIC_PC;
2381 }
2382 break;
2383 }
2384}
2385
2386static void __attribute__((unused))
2387gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2388{
2389 DisasASI da = get_asi(dc, insn, MO_UB);
2390
2391 gen_address_mask(dc, addr);
2392 gen_ldstub_asi0(dc, &da, dst, addr);
2393}
2394
2395static void __attribute__((unused))
2354gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
2355{
2356 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2357 TCGv_i32 d32;
2358 TCGv_i64 d64;
2359
2360 switch (da.type) {
2361 case GET_ASI_EXCP:

--- 175 unchanged lines hidden (view full) ---

2537 /* According to the table in the UA2011 manual, the only
2538 other asis that are valid for ldfa/lddfa/ldqfa are
2539 the PST* asis, which aren't currently handled. */
2540 gen_exception(dc, TT_ILL_INSN);
2541 break;
2542 }
2543}
2544
2396gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
2397{
2398 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
2399 TCGv_i32 d32;
2400 TCGv_i64 d64;
2401
2402 switch (da.type) {
2403 case GET_ASI_EXCP:

--- 175 unchanged lines hidden (view full) ---

2579 /* According to the table in the UA2011 manual, the only
2580 other asis that are valid for ldfa/lddfa/ldqfa are
2581 the PST* asis, which aren't currently handled. */
2582 gen_exception(dc, TT_ILL_INSN);
2583 break;
2584 }
2585}
2586
2545static void __attribute__((unused))
2546gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2587static void gen_ldda_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2547{
2588{
2548 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2549 TCGv hi = gen_dest_gpr(dc, rd);
2550 TCGv lo = gen_dest_gpr(dc, rd + 1);
2551
2589 TCGv hi = gen_dest_gpr(dc, rd);
2590 TCGv lo = gen_dest_gpr(dc, rd + 1);
2591
2552 switch (da.type) {
2592 switch (da->type) {
2553 case GET_ASI_EXCP:
2554 return;
2555
2556 case GET_ASI_DTWINX:
2557 assert(TARGET_LONG_BITS == 64);
2593 case GET_ASI_EXCP:
2594 return;
2595
2596 case GET_ASI_DTWINX:
2597 assert(TARGET_LONG_BITS == 64);
2558 gen_address_mask(dc, addr);
2559 tcg_gen_qemu_ld_tl(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2598 tcg_gen_qemu_ld_tl(hi, addr, da->mem_idx, da->memop | MO_ALIGN_16);
2560 tcg_gen_addi_tl(addr, addr, 8);
2599 tcg_gen_addi_tl(addr, addr, 8);
2561 tcg_gen_qemu_ld_tl(lo, addr, da.mem_idx, da.memop);
2600 tcg_gen_qemu_ld_tl(lo, addr, da->mem_idx, da->memop);
2562 break;
2563
2564 case GET_ASI_DIRECT:
2565 {
2566 TCGv_i64 tmp = tcg_temp_new_i64();
2567
2601 break;
2602
2603 case GET_ASI_DIRECT:
2604 {
2605 TCGv_i64 tmp = tcg_temp_new_i64();
2606
2568 gen_address_mask(dc, addr);
2569 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
2607 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2570
2571 /* Note that LE ldda acts as if each 32-bit register
2572 result is byte swapped. Having just performed one
2573 64-bit bswap, we need now to swap the writebacks. */
2608
2609 /* Note that LE ldda acts as if each 32-bit register
2610 result is byte swapped. Having just performed one
2611 64-bit bswap, we need now to swap the writebacks. */
2574 if ((da.memop & MO_BSWAP) == MO_TE) {
2612 if ((da->memop & MO_BSWAP) == MO_TE) {
2575 tcg_gen_extr_i64_tl(lo, hi, tmp);
2576 } else {
2577 tcg_gen_extr_i64_tl(hi, lo, tmp);
2578 }
2579 }
2580 break;
2581
2582 default:
2583 /* ??? In theory we've handled all of the ASIs that are valid
2584 for ldda, and this should raise DAE_invalid_asi. However,
2585 real hardware allows others. This can be seen with e.g.
2586 FreeBSD 10.3 wrt ASI_IC_TAG. */
2587 {
2613 tcg_gen_extr_i64_tl(lo, hi, tmp);
2614 } else {
2615 tcg_gen_extr_i64_tl(hi, lo, tmp);
2616 }
2617 }
2618 break;
2619
2620 default:
2621 /* ??? In theory we've handled all of the ASIs that are valid
2622 for ldda, and this should raise DAE_invalid_asi. However,
2623 real hardware allows others. This can be seen with e.g.
2624 FreeBSD 10.3 wrt ASI_IC_TAG. */
2625 {
2588 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2589 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2626 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2627 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2590 TCGv_i64 tmp = tcg_temp_new_i64();
2591
2592 save_state(dc);
2593 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2594
2595 /* See above. */
2628 TCGv_i64 tmp = tcg_temp_new_i64();
2629
2630 save_state(dc);
2631 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2632
2633 /* See above. */
2596 if ((da.memop & MO_BSWAP) == MO_TE) {
2634 if ((da->memop & MO_BSWAP) == MO_TE) {
2597 tcg_gen_extr_i64_tl(lo, hi, tmp);
2598 } else {
2599 tcg_gen_extr_i64_tl(hi, lo, tmp);
2600 }
2601 }
2602 break;
2603 }
2604
2605 gen_store_gpr(dc, rd, hi);
2606 gen_store_gpr(dc, rd + 1, lo);
2607}
2608
2609static void __attribute__((unused))
2635 tcg_gen_extr_i64_tl(lo, hi, tmp);
2636 } else {
2637 tcg_gen_extr_i64_tl(hi, lo, tmp);
2638 }
2639 }
2640 break;
2641 }
2642
2643 gen_store_gpr(dc, rd, hi);
2644 gen_store_gpr(dc, rd + 1, lo);
2645}
2646
2647static void __attribute__((unused))
2610gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd)
2648gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2611{
2612 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2649{
2650 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2651
2652 gen_address_mask(dc, addr);
2653 gen_ldda_asi0(dc, &da, addr, rd);
2654}
2655
2656static void gen_stda_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2657{
2658 TCGv hi = gen_load_gpr(dc, rd);
2613 TCGv lo = gen_load_gpr(dc, rd + 1);
2614
2659 TCGv lo = gen_load_gpr(dc, rd + 1);
2660
2615 switch (da.type) {
2661 switch (da->type) {
2616 case GET_ASI_EXCP:
2617 break;
2618
2619 case GET_ASI_DTWINX:
2620 assert(TARGET_LONG_BITS == 64);
2662 case GET_ASI_EXCP:
2663 break;
2664
2665 case GET_ASI_DTWINX:
2666 assert(TARGET_LONG_BITS == 64);
2621 gen_address_mask(dc, addr);
2622 tcg_gen_qemu_st_tl(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2667 tcg_gen_qemu_st_tl(hi, addr, da->mem_idx, da->memop | MO_ALIGN_16);
2623 tcg_gen_addi_tl(addr, addr, 8);
2668 tcg_gen_addi_tl(addr, addr, 8);
2624 tcg_gen_qemu_st_tl(lo, addr, da.mem_idx, da.memop);
2669 tcg_gen_qemu_st_tl(lo, addr, da->mem_idx, da->memop);
2625 break;
2626
2627 case GET_ASI_DIRECT:
2628 {
2629 TCGv_i64 t64 = tcg_temp_new_i64();
2630
2631 /* Note that LE stda acts as if each 32-bit register result is
2632 byte swapped. We will perform one 64-bit LE store, so now
2633 we must swap the order of the construction. */
2670 break;
2671
2672 case GET_ASI_DIRECT:
2673 {
2674 TCGv_i64 t64 = tcg_temp_new_i64();
2675
2676 /* Note that LE stda acts as if each 32-bit register result is
2677 byte swapped. We will perform one 64-bit LE store, so now
2678 we must swap the order of the construction. */
2634 if ((da.memop & MO_BSWAP) == MO_TE) {
2679 if ((da->memop & MO_BSWAP) == MO_TE) {
2635 tcg_gen_concat_tl_i64(t64, lo, hi);
2636 } else {
2637 tcg_gen_concat_tl_i64(t64, hi, lo);
2638 }
2680 tcg_gen_concat_tl_i64(t64, lo, hi);
2681 } else {
2682 tcg_gen_concat_tl_i64(t64, hi, lo);
2683 }
2639 gen_address_mask(dc, addr);
2640 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
2684 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2641 }
2642 break;
2643
2644 case GET_ASI_BFILL:
2645 assert(TARGET_LONG_BITS == 32);
2646 /* Store 32 bytes of T64 to ADDR. */
2647 /* ??? The original qemu code suggests 8-byte alignment, dropping
2648 the low bits, but the only place I can see this used is in the
2649 Linux kernel with 32 byte alignment, which would make more sense
2650 as a cacheline-style operation. */
2651 {
2652 TCGv_i64 t64 = tcg_temp_new_i64();
2653 TCGv d_addr = tcg_temp_new();
2654 TCGv eight = tcg_constant_tl(8);
2655 int i;
2656
2657 tcg_gen_concat_tl_i64(t64, lo, hi);
2658 tcg_gen_andi_tl(d_addr, addr, -8);
2659 for (i = 0; i < 32; i += 8) {
2685 }
2686 break;
2687
2688 case GET_ASI_BFILL:
2689 assert(TARGET_LONG_BITS == 32);
2690 /* Store 32 bytes of T64 to ADDR. */
2691 /* ??? The original qemu code suggests 8-byte alignment, dropping
2692 the low bits, but the only place I can see this used is in the
2693 Linux kernel with 32 byte alignment, which would make more sense
2694 as a cacheline-style operation. */
2695 {
2696 TCGv_i64 t64 = tcg_temp_new_i64();
2697 TCGv d_addr = tcg_temp_new();
2698 TCGv eight = tcg_constant_tl(8);
2699 int i;
2700
2701 tcg_gen_concat_tl_i64(t64, lo, hi);
2702 tcg_gen_andi_tl(d_addr, addr, -8);
2703 for (i = 0; i < 32; i += 8) {
2660 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2704 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2661 tcg_gen_add_tl(d_addr, d_addr, eight);
2662 }
2663 }
2664 break;
2665
2666 default:
2667 /* ??? In theory we've handled all of the ASIs that are valid
2668 for stda, and this should raise DAE_invalid_asi. */
2669 {
2705 tcg_gen_add_tl(d_addr, d_addr, eight);
2706 }
2707 }
2708 break;
2709
2710 default:
2711 /* ??? In theory we've handled all of the ASIs that are valid
2712 for stda, and this should raise DAE_invalid_asi. */
2713 {
2670 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
2671 TCGv_i32 r_mop = tcg_constant_i32(da.memop);
2714 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2715 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2672 TCGv_i64 t64 = tcg_temp_new_i64();
2673
2674 /* See above. */
2716 TCGv_i64 t64 = tcg_temp_new_i64();
2717
2718 /* See above. */
2675 if ((da.memop & MO_BSWAP) == MO_TE) {
2719 if ((da->memop & MO_BSWAP) == MO_TE) {
2676 tcg_gen_concat_tl_i64(t64, lo, hi);
2677 } else {
2678 tcg_gen_concat_tl_i64(t64, hi, lo);
2679 }
2680
2681 save_state(dc);
2682 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2683 }
2684 break;
2685 }
2686}
2687
2688static void __attribute__((unused))
2720 tcg_gen_concat_tl_i64(t64, lo, hi);
2721 } else {
2722 tcg_gen_concat_tl_i64(t64, hi, lo);
2723 }
2724
2725 save_state(dc);
2726 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2727 }
2728 break;
2729 }
2730}
2731
2732static void __attribute__((unused))
2689gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
2733gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd)
2690{
2691 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2734{
2735 DisasASI da = get_asi(dc, insn, MO_TEUQ);
2692 TCGv oldv;
2693
2736
2694 switch (da.type) {
2695 case GET_ASI_EXCP:
2696 return;
2697 case GET_ASI_DIRECT:
2698 oldv = tcg_temp_new();
2699 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2700 da.mem_idx, da.memop | MO_ALIGN);
2701 gen_store_gpr(dc, rd, oldv);
2702 break;
2703 default:
2704 /* ??? Should be DAE_invalid_asi. */
2705 gen_exception(dc, TT_DATA_ACCESS);
2706 break;
2707 }
2737 gen_address_mask(dc, addr);
2738 gen_stda_asi0(dc, &da, addr, rd);
2708}
2709
2710static TCGv get_src1(DisasContext *dc, unsigned int insn)
2711{
2712 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2713 return gen_load_gpr(dc, rs1);
2714}
2715

--- 3242 unchanged lines hidden ---
2739}
2740
2741static TCGv get_src1(DisasContext *dc, unsigned int insn)
2742{
2743 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2744 return gen_load_gpr(dc, rs1);
2745}
2746

--- 3242 unchanged lines hidden ---