vmx-impl.c.inc (6630bc04bccadcf868165ad6bca5a964bb69b067) vmx-impl.c.inc (21b5f5464f97f68f025c86330146d038d2ee79ad)
1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/*** Altivec vector extension ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12 TCGv_ptr r = tcg_temp_new_ptr();
13 tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
14 return r;
15}
16
1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/*** Altivec vector extension ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12 TCGv_ptr r = tcg_temp_new_ptr();
13 tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
14 return r;
15}
16
17#define GEN_VR_LDX(name, opc2, opc3) \
18static void glue(gen_, name)(DisasContext *ctx) \
19{ \
20 TCGv EA; \
21 TCGv_i64 avr; \
22 if (unlikely(!ctx->altivec_enabled)) { \
23 gen_exception(ctx, POWERPC_EXCP_VPU); \
24 return; \
25 } \
26 gen_set_access_type(ctx, ACCESS_INT); \
27 avr = tcg_temp_new_i64(); \
28 EA = tcg_temp_new(); \
29 gen_addr_reg_index(ctx, EA); \
30 tcg_gen_andi_tl(EA, EA, ~0xf); \
31 /* \
32 * We only need to swap high and low halves. gen_qemu_ld64_i64 \
33 * does necessary 64-bit byteswap already. \
34 */ \
35 if (ctx->le_mode) { \
36 gen_qemu_ld64_i64(ctx, avr, EA); \
37 set_avr64(rD(ctx->opcode), avr, false); \
38 tcg_gen_addi_tl(EA, EA, 8); \
39 gen_qemu_ld64_i64(ctx, avr, EA); \
40 set_avr64(rD(ctx->opcode), avr, true); \
41 } else { \
42 gen_qemu_ld64_i64(ctx, avr, EA); \
43 set_avr64(rD(ctx->opcode), avr, true); \
44 tcg_gen_addi_tl(EA, EA, 8); \
45 gen_qemu_ld64_i64(ctx, avr, EA); \
46 set_avr64(rD(ctx->opcode), avr, false); \
47 } \
17static bool trans_LVX(DisasContext *ctx, arg_X *a)
18{
19 TCGv EA;
20 TCGv_i64 avr;
21 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
22 REQUIRE_VECTOR(ctx);
23 gen_set_access_type(ctx, ACCESS_INT);
24 avr = tcg_temp_new_i64();
25 EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
26 tcg_gen_andi_tl(EA, EA, ~0xf);
27 /*
28 * We only need to swap high and low halves. gen_qemu_ld64_i64
29 * does necessary 64-bit byteswap already.
30 */
31 gen_qemu_ld64_i64(ctx, avr, EA);
32 set_avr64(a->rt, avr, !ctx->le_mode);
33 tcg_gen_addi_tl(EA, EA, 8);
34 gen_qemu_ld64_i64(ctx, avr, EA);
35 set_avr64(a->rt, avr, ctx->le_mode);
36 return true;
48}
49
37}
38
50#define GEN_VR_STX(name, opc2, opc3) \
51static void gen_st##name(DisasContext *ctx) \
52{ \
53 TCGv EA; \
54 TCGv_i64 avr; \
55 if (unlikely(!ctx->altivec_enabled)) { \
56 gen_exception(ctx, POWERPC_EXCP_VPU); \
57 return; \
58 } \
59 gen_set_access_type(ctx, ACCESS_INT); \
60 avr = tcg_temp_new_i64(); \
61 EA = tcg_temp_new(); \
62 gen_addr_reg_index(ctx, EA); \
63 tcg_gen_andi_tl(EA, EA, ~0xf); \
64 /* \
65 * We only need to swap high and low halves. gen_qemu_st64_i64 \
66 * does necessary 64-bit byteswap already. \
67 */ \
68 if (ctx->le_mode) { \
69 get_avr64(avr, rD(ctx->opcode), false); \
70 gen_qemu_st64_i64(ctx, avr, EA); \
71 tcg_gen_addi_tl(EA, EA, 8); \
72 get_avr64(avr, rD(ctx->opcode), true); \
73 gen_qemu_st64_i64(ctx, avr, EA); \
74 } else { \
75 get_avr64(avr, rD(ctx->opcode), true); \
76 gen_qemu_st64_i64(ctx, avr, EA); \
77 tcg_gen_addi_tl(EA, EA, 8); \
78 get_avr64(avr, rD(ctx->opcode), false); \
79 gen_qemu_st64_i64(ctx, avr, EA); \
80 } \
39/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
40QEMU_FLATTEN
41static bool trans_LVXL(DisasContext *ctx, arg_LVXL *a)
42{
43 return trans_LVX(ctx, a);
81}
82
44}
45
83#define GEN_VR_LVE(name, opc2, opc3, size) \
84static void gen_lve##name(DisasContext *ctx) \
85 { \
86 TCGv EA; \
87 TCGv_ptr rs; \
88 if (unlikely(!ctx->altivec_enabled)) { \
89 gen_exception(ctx, POWERPC_EXCP_VPU); \
90 return; \
91 } \
92 gen_set_access_type(ctx, ACCESS_INT); \
93 EA = tcg_temp_new(); \
94 gen_addr_reg_index(ctx, EA); \
95 if (size > 1) { \
96 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
97 } \
98 rs = gen_avr_ptr(rS(ctx->opcode)); \
99 gen_helper_lve##name(tcg_env, rs, EA); \
100 }
46static bool trans_STVX(DisasContext *ctx, arg_STVX *a)
47{
48 TCGv EA;
49 TCGv_i64 avr;
50 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
51 REQUIRE_VECTOR(ctx);
52 gen_set_access_type(ctx, ACCESS_INT);
53 avr = tcg_temp_new_i64();
54 EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
55 tcg_gen_andi_tl(EA, EA, ~0xf);
56 /*
57 * We only need to swap high and low halves. gen_qemu_st64_i64
58 * does necessary 64-bit byteswap already.
59 */
60 get_avr64(avr, a->rt, !ctx->le_mode);
61 gen_qemu_st64_i64(ctx, avr, EA);
62 tcg_gen_addi_tl(EA, EA, 8);
63 get_avr64(avr, a->rt, ctx->le_mode);
64 gen_qemu_st64_i64(ctx, avr, EA);
65 return true;
66}
101
67
102#define GEN_VR_STVE(name, opc2, opc3, size) \
103static void gen_stve##name(DisasContext *ctx) \
104 { \
105 TCGv EA; \
106 TCGv_ptr rs; \
107 if (unlikely(!ctx->altivec_enabled)) { \
108 gen_exception(ctx, POWERPC_EXCP_VPU); \
109 return; \
110 } \
111 gen_set_access_type(ctx, ACCESS_INT); \
112 EA = tcg_temp_new(); \
113 gen_addr_reg_index(ctx, EA); \
114 if (size > 1) { \
115 tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
116 } \
117 rs = gen_avr_ptr(rS(ctx->opcode)); \
118 gen_helper_stve##name(tcg_env, rs, EA); \
68/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
69QEMU_FLATTEN
70static bool trans_STVXL(DisasContext *ctx, arg_STVXL *a)
71{
72 return trans_STVX(ctx, a);
73}
74
75static bool do_ldst_ve_X(DisasContext *ctx, arg_X *a, int size,
76 void (*helper)(TCGv_env, TCGv_ptr, TCGv))
77{
78 TCGv EA;
79 TCGv_ptr vrt;
80 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
81 REQUIRE_VECTOR(ctx);
82 gen_set_access_type(ctx, ACCESS_INT);
83 EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
84 if (size > 1) {
85 tcg_gen_andi_tl(EA, EA, ~(size - 1));
119 }
86 }
87 vrt = gen_avr_ptr(a->rt);
88 helper(tcg_env, vrt, EA);
89 return true;
90}
120
91
121GEN_VR_LDX(lvx, 0x07, 0x03);
122/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
123GEN_VR_LDX(lvxl, 0x07, 0x0B);
92TRANS(LVEBX, do_ldst_ve_X, 1, gen_helper_LVEBX);
93TRANS(LVEHX, do_ldst_ve_X, 2, gen_helper_LVEHX);
94TRANS(LVEWX, do_ldst_ve_X, 4, gen_helper_LVEWX);
124
95
125GEN_VR_LVE(bx, 0x07, 0x00, 1);
126GEN_VR_LVE(hx, 0x07, 0x01, 2);
127GEN_VR_LVE(wx, 0x07, 0x02, 4);
96TRANS(STVEBX, do_ldst_ve_X, 1, gen_helper_STVEBX);
97TRANS(STVEHX, do_ldst_ve_X, 2, gen_helper_STVEHX);
98TRANS(STVEWX, do_ldst_ve_X, 4, gen_helper_STVEWX);
128
99
129GEN_VR_STX(svx, 0x07, 0x07);
130/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
131GEN_VR_STX(svxl, 0x07, 0x0F);
132
133GEN_VR_STVE(bx, 0x07, 0x04, 1);
134GEN_VR_STVE(hx, 0x07, 0x05, 2);
135GEN_VR_STVE(wx, 0x07, 0x06, 4);
136
137static void gen_mfvscr(DisasContext *ctx)
138{
139 TCGv_i32 t;
140 TCGv_i64 avr;
141 if (unlikely(!ctx->altivec_enabled)) {
142 gen_exception(ctx, POWERPC_EXCP_VPU);
143 return;
144 }

--- 310 unchanged lines hidden (view full) ---

455
456/*
457 * lvsl VRT,RA,RB - Load Vector for Shift Left
458 *
459 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
460 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
461 * Bytes sh:sh+15 of X are placed into vD.
462 */
100static void gen_mfvscr(DisasContext *ctx)
101{
102 TCGv_i32 t;
103 TCGv_i64 avr;
104 if (unlikely(!ctx->altivec_enabled)) {
105 gen_exception(ctx, POWERPC_EXCP_VPU);
106 return;
107 }

--- 310 unchanged lines hidden (view full) ---

418
419/*
420 * lvsl VRT,RA,RB - Load Vector for Shift Left
421 *
422 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
423 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
424 * Bytes sh:sh+15 of X are placed into vD.
425 */
463static void trans_lvsl(DisasContext *ctx)
426static bool trans_LVSL(DisasContext *ctx, arg_LVSL *a)
464{
427{
465 int VT = rD(ctx->opcode);
466 TCGv_i64 result = tcg_temp_new_i64();
467 TCGv_i64 sh = tcg_temp_new_i64();
468 TCGv EA = tcg_temp_new();
469
428 TCGv_i64 result = tcg_temp_new_i64();
429 TCGv_i64 sh = tcg_temp_new_i64();
430 TCGv EA = tcg_temp_new();
431
432 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
433 REQUIRE_VECTOR(ctx);
434
470 /* Get sh(from description) by anding EA with 0xf. */
435 /* Get sh(from description) by anding EA with 0xf. */
471 gen_addr_reg_index(ctx, EA);
436 EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
472 tcg_gen_extu_tl_i64(sh, EA);
473 tcg_gen_andi_i64(sh, sh, 0xfULL);
474
475 /*
476 * Create bytes sh:sh+7 of X(from description) and place them in
477 * higher doubleword of vD.
478 */
479 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
480 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
437 tcg_gen_extu_tl_i64(sh, EA);
438 tcg_gen_andi_i64(sh, sh, 0xfULL);
439
440 /*
441 * Create bytes sh:sh+7 of X(from description) and place them in
442 * higher doubleword of vD.
443 */
444 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
445 tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
481 set_avr64(VT, result, true);
446 set_avr64(a->rt, result, true);
482 /*
483 * Create bytes sh+8:sh+15 of X(from description) and place them in
484 * lower doubleword of vD.
485 */
486 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
447 /*
448 * Create bytes sh+8:sh+15 of X(from description) and place them in
449 * lower doubleword of vD.
450 */
451 tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
487 set_avr64(VT, result, false);
452 set_avr64(a->rt, result, false);
453 return true;
488}
489
490/*
491 * lvsr VRT,RA,RB - Load Vector for Shift Right
492 *
493 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
494 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
495 * Bytes (16-sh):(31-sh) of X are placed into vD.
496 */
454}
455
456/*
457 * lvsr VRT,RA,RB - Load Vector for Shift Right
458 *
459 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
460 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
461 * Bytes (16-sh):(31-sh) of X are placed into vD.
462 */
497static void trans_lvsr(DisasContext *ctx)
463static bool trans_LVSR(DisasContext *ctx, arg_LVSR *a)
498{
464{
499 int VT = rD(ctx->opcode);
500 TCGv_i64 result = tcg_temp_new_i64();
501 TCGv_i64 sh = tcg_temp_new_i64();
502 TCGv EA = tcg_temp_new();
503
465 TCGv_i64 result = tcg_temp_new_i64();
466 TCGv_i64 sh = tcg_temp_new_i64();
467 TCGv EA = tcg_temp_new();
468
469 REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
470 REQUIRE_VECTOR(ctx);
504
505 /* Get sh(from description) by anding EA with 0xf. */
471
472 /* Get sh(from description) by anding EA with 0xf. */
506 gen_addr_reg_index(ctx, EA);
473 EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
507 tcg_gen_extu_tl_i64(sh, EA);
508 tcg_gen_andi_i64(sh, sh, 0xfULL);
509
510 /*
511 * Create bytes (16-sh):(23-sh) of X(from description) and place them in
512 * higher doubleword of vD.
513 */
514 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
515 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
474 tcg_gen_extu_tl_i64(sh, EA);
475 tcg_gen_andi_i64(sh, sh, 0xfULL);
476
477 /*
478 * Create bytes (16-sh):(23-sh) of X(from description) and place them in
479 * higher doubleword of vD.
480 */
481 tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
482 tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
516 set_avr64(VT, result, true);
483 set_avr64(a->rt, result, true);
517 /*
518 * Create bytes (24-sh):(32-sh) of X(from description) and place them in
519 * lower doubleword of vD.
520 */
521 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
484 /*
485 * Create bytes (24-sh):(32-sh) of X(from description) and place them in
486 * lower doubleword of vD.
487 */
488 tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
522 set_avr64(VT, result, false);
489 set_avr64(a->rt, result, false);
490 return true;
523}
524
525/*
526 * vsl VRT,VRA,VRB - Vector Shift Left
527 *
528 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
529 * Lowest 3 bits in each byte element of register vB must be identical or
530 * result is undefined.

--- 622 unchanged lines hidden (view full) ---

1153GEN_VXFORM_HETRO(vextublx, 6, 24)
1154GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1155GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1156GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1157 vextuwlx, PPC_NONE, PPC2_ISA300)
1158GEN_VXFORM_HETRO(vextubrx, 6, 28)
1159GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1160GEN_VXFORM_HETRO(vextuwrx, 6, 30)
491}
492
493/*
494 * vsl VRT,VRA,VRB - Vector Shift Left
495 *
496 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
497 * Lowest 3 bits in each byte element of register vB must be identical or
498 * result is undefined.

--- 622 unchanged lines hidden (view full) ---

1121GEN_VXFORM_HETRO(vextublx, 6, 24)
1122GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1123GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1124GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1125 vextuwlx, PPC_NONE, PPC2_ISA300)
1126GEN_VXFORM_HETRO(vextubrx, 6, 28)
1127GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1128GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1161GEN_VXFORM_TRANS(lvsl, 6, 31)
1162GEN_VXFORM_TRANS(lvsr, 6, 32)
1163GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1164 vextuwrx, PPC_NONE, PPC2_ISA300)
1165
1166#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
1167static void glue(gen_, name)(DisasContext *ctx) \
1168 { \
1169 TCGv_ptr ra, rb, rd; \
1170 if (unlikely(!ctx->altivec_enabled)) { \

--- 2189 unchanged lines hidden (view full) ---

3360TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3361TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3362
3363#undef DIVS32
3364#undef DIVU32
3365#undef DIVS64
3366#undef DIVU64
3367
1129GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1130 vextuwrx, PPC_NONE, PPC2_ISA300)
1131
1132#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
1133static void glue(gen_, name)(DisasContext *ctx) \
1134 { \
1135 TCGv_ptr ra, rb, rd; \
1136 if (unlikely(!ctx->altivec_enabled)) { \

--- 2189 unchanged lines hidden (view full) ---

3326TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3327TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3328
3329#undef DIVS32
3330#undef DIVU32
3331#undef DIVS64
3332#undef DIVU64
3333
3368#undef GEN_VR_LDX
3369#undef GEN_VR_STX
3370#undef GEN_VR_LVE
3371#undef GEN_VR_STVE
3372
3373#undef GEN_VX_LOGICAL
3374#undef GEN_VX_LOGICAL_207
3375#undef GEN_VXFORM
3376#undef GEN_VXFORM_207
3377#undef GEN_VXFORM_DUAL
3378#undef GEN_VXRFORM_DUAL
3379#undef GEN_VXRFORM1
3380#undef GEN_VXRFORM
3381#undef GEN_VXFORM_VSPLTI
3382#undef GEN_VXFORM_NOA
3383#undef GEN_VXFORM_UIMM
3384#undef GEN_VAFORM_PAIRED
3385
3386#undef GEN_BCD2
3334#undef GEN_VX_LOGICAL
3335#undef GEN_VX_LOGICAL_207
3336#undef GEN_VXFORM
3337#undef GEN_VXFORM_207
3338#undef GEN_VXFORM_DUAL
3339#undef GEN_VXRFORM_DUAL
3340#undef GEN_VXRFORM1
3341#undef GEN_VXRFORM
3342#undef GEN_VXFORM_VSPLTI
3343#undef GEN_VXFORM_NOA
3344#undef GEN_VXFORM_UIMM
3345#undef GEN_VAFORM_PAIRED
3346
3347#undef GEN_BCD2