1/* 2 * RISC-V translation routines for the RV64A Standard Extension. 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de 6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2 or later, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#define REQUIRE_A_OR_ZAAMO(ctx) do { \ 22 if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \ 23 return false; \ 24 } \ 25} while (0) 26 27#define REQUIRE_A_OR_ZALRSC(ctx) do { \ 28 if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \ 29 return false; \ 30 } \ 31} while (0) 32 33static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) 34{ 35 TCGv src1; 36 37 decode_save_opc(ctx); 38 src1 = get_address(ctx, a->rs1, 0); 39 if (a->rl) { 40 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 41 } 42 tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); 43 /* 44 * TSO defines AMOs as acquire+release-RCsc, but does not define LR/SC as 45 * AMOs. Instead treat them like loads. 46 */ 47 if (a->aq || ctx->ztso) { 48 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 49 } 50 51 /* Put addr in load_res, data in load_val. */ 52 tcg_gen_mov_tl(load_res, src1); 53 gen_set_gpr(ctx, a->rd, load_val); 54 55 return true; 56} 57 58static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) 59{ 60 TCGv dest, src1, src2; 61 TCGLabel *l1 = gen_new_label(); 62 TCGLabel *l2 = gen_new_label(); 63 64 decode_save_opc(ctx); 65 src1 = get_address(ctx, a->rs1, 0); 66 tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); 67 68 /* 69 * Note that the TCG atomic primitives are SC, 70 * so we can ignore AQ/RL along this path. 71 */ 72 dest = dest_gpr(ctx, a->rd); 73 src2 = get_gpr(ctx, a->rs2, EXT_NONE); 74 tcg_gen_atomic_cmpxchg_tl(dest, load_res, load_val, src2, 75 ctx->mem_idx, mop); 76 tcg_gen_setcond_tl(TCG_COND_NE, dest, dest, load_val); 77 gen_set_gpr(ctx, a->rd, dest); 78 tcg_gen_br(l2); 79 80 gen_set_label(l1); 81 /* 82 * Address comparison failure. However, we still need to 83 * provide the memory barrier implied by AQ/RL/TSO. 84 */ 85 TCGBar bar_strl = (ctx->ztso || a->rl) ? TCG_BAR_STRL : 0; 86 tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + bar_strl); 87 gen_set_gpr(ctx, a->rd, tcg_constant_tl(1)); 88 89 gen_set_label(l2); 90 /* 91 * Clear the load reservation, since an SC must fail if there is 92 * an SC to any address, in between an LR and SC pair. 93 */ 94 tcg_gen_movi_tl(load_res, -1); 95 96 return true; 97} 98 99static bool gen_amo(DisasContext *ctx, arg_atomic *a, 100 void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), 101 MemOp mop) 102{ 103 TCGv dest = dest_gpr(ctx, a->rd); 104 TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); 105 106 decode_save_opc(ctx); 107 src1 = get_address(ctx, a->rs1, 0); 108 func(dest, src1, src2, ctx->mem_idx, mop); 109 110 gen_set_gpr(ctx, a->rd, dest); 111 return true; 112} 113 114static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) 115{ 116 REQUIRE_A_OR_ZALRSC(ctx); 117 return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); 118} 119 120static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) 121{ 122 REQUIRE_A_OR_ZALRSC(ctx); 123 return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); 124} 125 126static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) 127{ 128 REQUIRE_A_OR_ZAAMO(ctx); 129 return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); 130} 131 132static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) 133{ 134 REQUIRE_A_OR_ZAAMO(ctx); 135 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); 136} 137 138static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) 139{ 140 REQUIRE_A_OR_ZAAMO(ctx); 141 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); 142} 143 144static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) 145{ 146 REQUIRE_A_OR_ZAAMO(ctx); 147 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); 148} 149 150static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) 151{ 152 REQUIRE_A_OR_ZAAMO(ctx); 153 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); 154} 155 156static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) 157{ 158 REQUIRE_A_OR_ZAAMO(ctx); 159 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); 160} 161 162static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) 163{ 164 REQUIRE_A_OR_ZAAMO(ctx); 165 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); 166} 167 168static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) 169{ 170 REQUIRE_A_OR_ZAAMO(ctx); 171 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); 172} 173 174static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) 175{ 176 REQUIRE_A_OR_ZAAMO(ctx); 177 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); 178} 179 180static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) 181{ 182 REQUIRE_64BIT(ctx); 183 REQUIRE_A_OR_ZALRSC(ctx); 184 return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ); 185} 186 187static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) 188{ 189 REQUIRE_64BIT(ctx); 190 REQUIRE_A_OR_ZALRSC(ctx); 191 return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ)); 192} 193 194static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) 195{ 196 REQUIRE_64BIT(ctx); 197 REQUIRE_A_OR_ZAAMO(ctx); 198 return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); 199} 200 201static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) 202{ 203 REQUIRE_64BIT(ctx); 204 REQUIRE_A_OR_ZAAMO(ctx); 205 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); 206} 207 208static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) 209{ 210 REQUIRE_64BIT(ctx); 211 REQUIRE_A_OR_ZAAMO(ctx); 212 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); 213} 214 215static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) 216{ 217 REQUIRE_64BIT(ctx); 218 REQUIRE_A_OR_ZAAMO(ctx); 219 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); 220} 221 222static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) 223{ 224 REQUIRE_64BIT(ctx); 225 REQUIRE_A_OR_ZAAMO(ctx); 226 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); 227} 228 229static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) 230{ 231 REQUIRE_64BIT(ctx); 232 REQUIRE_A_OR_ZAAMO(ctx); 233 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); 234} 235 236static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) 237{ 238 REQUIRE_64BIT(ctx); 239 REQUIRE_A_OR_ZAAMO(ctx); 240 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); 241} 242 243static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) 244{ 245 REQUIRE_64BIT(ctx); 246 REQUIRE_A_OR_ZAAMO(ctx); 247 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); 248} 249 250static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) 251{ 252 REQUIRE_64BIT(ctx); 253 REQUIRE_A_OR_ZAAMO(ctx); 254 return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); 255} 256