1/*
2 * RISC-V translation routines for the RV64F Standard Extension.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define REQUIRE_FPU do {\
22    if (ctx->mstatus_fs == EXT_STATUS_DISABLED) {                           \
23        ctx->virt_inst_excp = ctx->virt_enabled && ctx->cfg_ptr->ext_zfinx; \
24        return false;                                                       \
25    }                                                                       \
26} while (0)
27
28#define REQUIRE_ZFINX_OR_F(ctx) do {\
29    if (!ctx->cfg_ptr->ext_zfinx) { \
30        REQUIRE_EXT(ctx, RVF); \
31    } \
32} while (0)
33
34#define REQUIRE_ZCF_OR_FC(ctx) do {                     \
35    if (!ctx->cfg_ptr->ext_zcf) {                       \
36        if (!has_ext(ctx, RVF) || !has_ext(ctx, RVC)) { \
37            return false;                               \
38        }                                               \
39    }                                                   \
40} while (0)
41
42static bool trans_flw(DisasContext *ctx, arg_flw *a)
43{
44    TCGv_i64 dest;
45    TCGv addr;
46    MemOp memop = MO_TEUL;
47
48    REQUIRE_FPU;
49    REQUIRE_EXT(ctx, RVF);
50
51    if (ctx->cfg_ptr->ext_zama16b) {
52        memop |= MO_ATOM_WITHIN16;
53    }
54
55    decode_save_opc(ctx, 0);
56    addr = get_address(ctx, a->rs1, a->imm);
57    dest = cpu_fpr[a->rd];
58    tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
59    gen_nanbox_s(dest, dest);
60
61    mark_fs_dirty(ctx);
62    return true;
63}
64
65static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
66{
67    TCGv addr;
68    MemOp memop = MO_TEUL;
69
70    REQUIRE_FPU;
71    REQUIRE_EXT(ctx, RVF);
72
73    if (ctx->cfg_ptr->ext_zama16b) {
74        memop |= MO_ATOM_WITHIN16;
75    }
76
77    decode_save_opc(ctx, 0);
78    addr = get_address(ctx, a->rs1, a->imm);
79    tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
80    return true;
81}
82
83static bool trans_c_flw(DisasContext *ctx, arg_flw *a)
84{
85    REQUIRE_ZCF_OR_FC(ctx);
86    return trans_flw(ctx, a);
87}
88
89static bool trans_c_fsw(DisasContext *ctx, arg_fsw *a)
90{
91    REQUIRE_ZCF_OR_FC(ctx);
92    return trans_fsw(ctx, a);
93}
94
95static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
96{
97    REQUIRE_FPU;
98    REQUIRE_ZFINX_OR_F(ctx);
99
100    TCGv_i64 dest = dest_fpr(ctx, a->rd);
101    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
102    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
103    TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
104
105    gen_set_rm(ctx, a->rm);
106    gen_helper_fmadd_s(dest, tcg_env, src1, src2, src3);
107    gen_set_fpr_hs(ctx, a->rd, dest);
108    mark_fs_dirty(ctx);
109    return true;
110}
111
112static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
113{
114    REQUIRE_FPU;
115    REQUIRE_ZFINX_OR_F(ctx);
116
117    TCGv_i64 dest = dest_fpr(ctx, a->rd);
118    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
119    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
120    TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
121
122    gen_set_rm(ctx, a->rm);
123    gen_helper_fmsub_s(dest, tcg_env, src1, src2, src3);
124    gen_set_fpr_hs(ctx, a->rd, dest);
125    mark_fs_dirty(ctx);
126    return true;
127}
128
129static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
130{
131    REQUIRE_FPU;
132    REQUIRE_ZFINX_OR_F(ctx);
133
134    TCGv_i64 dest = dest_fpr(ctx, a->rd);
135    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
136    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
137    TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
138
139    gen_set_rm(ctx, a->rm);
140    gen_helper_fnmsub_s(dest, tcg_env, src1, src2, src3);
141    gen_set_fpr_hs(ctx, a->rd, dest);
142    mark_fs_dirty(ctx);
143    return true;
144}
145
146static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
147{
148    REQUIRE_FPU;
149    REQUIRE_ZFINX_OR_F(ctx);
150
151    TCGv_i64 dest = dest_fpr(ctx, a->rd);
152    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
153    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
154    TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
155
156    gen_set_rm(ctx, a->rm);
157    gen_helper_fnmadd_s(dest, tcg_env, src1, src2, src3);
158    gen_set_fpr_hs(ctx, a->rd, dest);
159    mark_fs_dirty(ctx);
160    return true;
161}
162
163static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
164{
165    REQUIRE_FPU;
166    REQUIRE_ZFINX_OR_F(ctx);
167
168    TCGv_i64 dest = dest_fpr(ctx, a->rd);
169    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
170    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
171
172    gen_set_rm(ctx, a->rm);
173    gen_helper_fadd_s(dest, tcg_env, src1, src2);
174    gen_set_fpr_hs(ctx, a->rd, dest);
175    mark_fs_dirty(ctx);
176    return true;
177}
178
179static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
180{
181    REQUIRE_FPU;
182    REQUIRE_ZFINX_OR_F(ctx);
183
184    TCGv_i64 dest = dest_fpr(ctx, a->rd);
185    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
186    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
187
188    gen_set_rm(ctx, a->rm);
189    gen_helper_fsub_s(dest, tcg_env, src1, src2);
190    gen_set_fpr_hs(ctx, a->rd, dest);
191    mark_fs_dirty(ctx);
192    return true;
193}
194
195static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
196{
197    REQUIRE_FPU;
198    REQUIRE_ZFINX_OR_F(ctx);
199
200    TCGv_i64 dest = dest_fpr(ctx, a->rd);
201    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
202    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
203
204    gen_set_rm(ctx, a->rm);
205    gen_helper_fmul_s(dest, tcg_env, src1, src2);
206    gen_set_fpr_hs(ctx, a->rd, dest);
207    mark_fs_dirty(ctx);
208    return true;
209}
210
211static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
212{
213    REQUIRE_FPU;
214    REQUIRE_ZFINX_OR_F(ctx);
215
216    TCGv_i64 dest = dest_fpr(ctx, a->rd);
217    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
218    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
219
220    gen_set_rm(ctx, a->rm);
221    gen_helper_fdiv_s(dest, tcg_env, src1, src2);
222    gen_set_fpr_hs(ctx, a->rd, dest);
223    mark_fs_dirty(ctx);
224    return true;
225}
226
227static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
228{
229    REQUIRE_FPU;
230    REQUIRE_ZFINX_OR_F(ctx);
231
232    TCGv_i64 dest = dest_fpr(ctx, a->rd);
233    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
234
235    gen_set_rm(ctx, a->rm);
236    gen_helper_fsqrt_s(dest, tcg_env, src1);
237    gen_set_fpr_hs(ctx, a->rd, dest);
238    mark_fs_dirty(ctx);
239    return true;
240}
241
242static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
243{
244    REQUIRE_FPU;
245    REQUIRE_ZFINX_OR_F(ctx);
246
247    TCGv_i64 dest = dest_fpr(ctx, a->rd);
248    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
249
250    if (a->rs1 == a->rs2) { /* FMOV */
251        if (!ctx->cfg_ptr->ext_zfinx) {
252            gen_check_nanbox_s(dest, src1);
253        } else {
254            tcg_gen_ext32s_i64(dest, src1);
255        }
256    } else { /* FSGNJ */
257        TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
258
259        if (!ctx->cfg_ptr->ext_zfinx) {
260            TCGv_i64 rs1 = tcg_temp_new_i64();
261            TCGv_i64 rs2 = tcg_temp_new_i64();
262            gen_check_nanbox_s(rs1, src1);
263            gen_check_nanbox_s(rs2, src2);
264
265            /* This formulation retains the nanboxing of rs2 in normal 'F'. */
266            tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31);
267        } else {
268            tcg_gen_deposit_i64(dest, src2, src1, 0, 31);
269            tcg_gen_ext32s_i64(dest, dest);
270        }
271    }
272    gen_set_fpr_hs(ctx, a->rd, dest);
273    mark_fs_dirty(ctx);
274    return true;
275}
276
277static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
278{
279    TCGv_i64 rs1, rs2, mask;
280
281    REQUIRE_FPU;
282    REQUIRE_ZFINX_OR_F(ctx);
283
284    TCGv_i64 dest = dest_fpr(ctx, a->rd);
285    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
286
287    rs1 = tcg_temp_new_i64();
288    if (!ctx->cfg_ptr->ext_zfinx) {
289        gen_check_nanbox_s(rs1, src1);
290    } else {
291        tcg_gen_mov_i64(rs1, src1);
292    }
293    if (a->rs1 == a->rs2) { /* FNEG */
294        tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1));
295    } else {
296        TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
297        rs2 = tcg_temp_new_i64();
298        if (!ctx->cfg_ptr->ext_zfinx) {
299            gen_check_nanbox_s(rs2, src2);
300        } else {
301            tcg_gen_mov_i64(rs2, src2);
302        }
303
304        /*
305         * Replace bit 31 in rs1 with inverse in rs2.
306         * This formulation retains the nanboxing of rs1.
307         */
308        mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
309        tcg_gen_nor_i64(rs2, rs2, mask);
310        tcg_gen_and_i64(dest, mask, rs1);
311        tcg_gen_or_i64(dest, dest, rs2);
312    }
313    /* signed-extended instead of nanboxing for result if enable zfinx */
314    if (ctx->cfg_ptr->ext_zfinx) {
315        tcg_gen_ext32s_i64(dest, dest);
316    }
317    gen_set_fpr_hs(ctx, a->rd, dest);
318    mark_fs_dirty(ctx);
319    return true;
320}
321
322static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
323{
324    TCGv_i64 rs1, rs2;
325
326    REQUIRE_FPU;
327    REQUIRE_ZFINX_OR_F(ctx);
328
329    TCGv_i64 dest = dest_fpr(ctx, a->rd);
330    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
331    rs1 = tcg_temp_new_i64();
332
333    if (!ctx->cfg_ptr->ext_zfinx) {
334        gen_check_nanbox_s(rs1, src1);
335    } else {
336        tcg_gen_mov_i64(rs1, src1);
337    }
338
339    if (a->rs1 == a->rs2) { /* FABS */
340        tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1));
341    } else {
342        TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
343        rs2 = tcg_temp_new_i64();
344
345        if (!ctx->cfg_ptr->ext_zfinx) {
346            gen_check_nanbox_s(rs2, src2);
347        } else {
348            tcg_gen_mov_i64(rs2, src2);
349        }
350
351        /*
352         * Xor bit 31 in rs1 with that in rs2.
353         * This formulation retains the nanboxing of rs1.
354         */
355        tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1));
356        tcg_gen_xor_i64(dest, rs1, dest);
357    }
358    /* signed-extended instead of nanboxing for result if enable zfinx */
359    if (ctx->cfg_ptr->ext_zfinx) {
360        tcg_gen_ext32s_i64(dest, dest);
361    }
362    gen_set_fpr_hs(ctx, a->rd, dest);
363    mark_fs_dirty(ctx);
364    return true;
365}
366
367static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
368{
369    REQUIRE_FPU;
370    REQUIRE_ZFINX_OR_F(ctx);
371
372    TCGv_i64 dest = dest_fpr(ctx, a->rd);
373    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
374    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
375
376    gen_helper_fmin_s(dest, tcg_env, src1, src2);
377    gen_set_fpr_hs(ctx, a->rd, dest);
378    mark_fs_dirty(ctx);
379    return true;
380}
381
382static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
383{
384    REQUIRE_FPU;
385    REQUIRE_ZFINX_OR_F(ctx);
386
387    TCGv_i64 dest = dest_fpr(ctx, a->rd);
388    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
389    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
390
391    gen_helper_fmax_s(dest, tcg_env, src1, src2);
392    gen_set_fpr_hs(ctx, a->rd, dest);
393    mark_fs_dirty(ctx);
394    return true;
395}
396
397static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
398{
399    REQUIRE_FPU;
400    REQUIRE_ZFINX_OR_F(ctx);
401
402    TCGv dest = dest_gpr(ctx, a->rd);
403    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
404
405    gen_set_rm(ctx, a->rm);
406    gen_helper_fcvt_w_s(dest, tcg_env, src1);
407    gen_set_gpr(ctx, a->rd, dest);
408    return true;
409}
410
411static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
412{
413    REQUIRE_FPU;
414    REQUIRE_ZFINX_OR_F(ctx);
415
416    TCGv dest = dest_gpr(ctx, a->rd);
417    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
418
419    gen_set_rm(ctx, a->rm);
420    gen_helper_fcvt_wu_s(dest, tcg_env, src1);
421    gen_set_gpr(ctx, a->rd, dest);
422    return true;
423}
424
425static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
426{
427    /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
428    REQUIRE_FPU;
429    REQUIRE_ZFINX_OR_F(ctx);
430
431    TCGv dest = dest_gpr(ctx, a->rd);
432    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
433#if defined(TARGET_RISCV64)
434    tcg_gen_ext32s_tl(dest, src1);
435#else
436    tcg_gen_extrl_i64_i32(dest, src1);
437#endif
438
439    gen_set_gpr(ctx, a->rd, dest);
440    return true;
441}
442
443static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
444{
445    REQUIRE_FPU;
446    REQUIRE_ZFINX_OR_F(ctx);
447
448    TCGv dest = dest_gpr(ctx, a->rd);
449    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
450    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
451
452    gen_helper_feq_s(dest, tcg_env, src1, src2);
453    gen_set_gpr(ctx, a->rd, dest);
454    return true;
455}
456
457static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
458{
459    REQUIRE_FPU;
460    REQUIRE_ZFINX_OR_F(ctx);
461
462    TCGv dest = dest_gpr(ctx, a->rd);
463    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
464    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
465
466    gen_helper_flt_s(dest, tcg_env, src1, src2);
467    gen_set_gpr(ctx, a->rd, dest);
468    return true;
469}
470
471static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
472{
473    REQUIRE_FPU;
474    REQUIRE_ZFINX_OR_F(ctx);
475
476    TCGv dest = dest_gpr(ctx, a->rd);
477    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
478    TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
479
480    gen_helper_fle_s(dest, tcg_env, src1, src2);
481    gen_set_gpr(ctx, a->rd, dest);
482    return true;
483}
484
485static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
486{
487    REQUIRE_FPU;
488    REQUIRE_ZFINX_OR_F(ctx);
489
490    TCGv dest = dest_gpr(ctx, a->rd);
491    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
492
493    gen_helper_fclass_s(dest, tcg_env, src1);
494    gen_set_gpr(ctx, a->rd, dest);
495    return true;
496}
497
498static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
499{
500    REQUIRE_FPU;
501    REQUIRE_ZFINX_OR_F(ctx);
502
503    TCGv_i64 dest = dest_fpr(ctx, a->rd);
504    TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
505
506    gen_set_rm(ctx, a->rm);
507    gen_helper_fcvt_s_w(dest, tcg_env, src);
508    gen_set_fpr_hs(ctx, a->rd, dest);
509    mark_fs_dirty(ctx);
510    return true;
511}
512
513static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
514{
515    REQUIRE_FPU;
516    REQUIRE_ZFINX_OR_F(ctx);
517
518    TCGv_i64 dest = dest_fpr(ctx, a->rd);
519    TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
520
521    gen_set_rm(ctx, a->rm);
522    gen_helper_fcvt_s_wu(dest, tcg_env, src);
523    gen_set_fpr_hs(ctx, a->rd, dest);
524    mark_fs_dirty(ctx);
525    return true;
526}
527
528static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
529{
530    /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
531    REQUIRE_FPU;
532    REQUIRE_ZFINX_OR_F(ctx);
533
534    TCGv_i64 dest = dest_fpr(ctx, a->rd);
535    TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
536
537    tcg_gen_extu_tl_i64(dest, src);
538    gen_nanbox_s(dest, dest);
539    gen_set_fpr_hs(ctx, a->rd, dest);
540    mark_fs_dirty(ctx);
541    return true;
542}
543
544static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
545{
546    REQUIRE_64BIT(ctx);
547    REQUIRE_FPU;
548    REQUIRE_ZFINX_OR_F(ctx);
549
550    TCGv dest = dest_gpr(ctx, a->rd);
551    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
552
553    gen_set_rm(ctx, a->rm);
554    gen_helper_fcvt_l_s(dest, tcg_env, src1);
555    gen_set_gpr(ctx, a->rd, dest);
556    return true;
557}
558
559static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
560{
561    REQUIRE_64BIT(ctx);
562    REQUIRE_FPU;
563    REQUIRE_ZFINX_OR_F(ctx);
564
565    TCGv dest = dest_gpr(ctx, a->rd);
566    TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
567
568    gen_set_rm(ctx, a->rm);
569    gen_helper_fcvt_lu_s(dest, tcg_env, src1);
570    gen_set_gpr(ctx, a->rd, dest);
571    return true;
572}
573
574static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
575{
576    REQUIRE_64BIT(ctx);
577    REQUIRE_FPU;
578    REQUIRE_ZFINX_OR_F(ctx);
579
580    TCGv_i64 dest = dest_fpr(ctx, a->rd);
581    TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
582
583    gen_set_rm(ctx, a->rm);
584    gen_helper_fcvt_s_l(dest, tcg_env, src);
585    gen_set_fpr_hs(ctx, a->rd, dest);
586    mark_fs_dirty(ctx);
587    return true;
588}
589
590static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
591{
592    REQUIRE_64BIT(ctx);
593    REQUIRE_FPU;
594    REQUIRE_ZFINX_OR_F(ctx);
595
596    TCGv_i64 dest = dest_fpr(ctx, a->rd);
597    TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
598
599    gen_set_rm(ctx, a->rm);
600    gen_helper_fcvt_s_lu(dest, tcg_env, src);
601    gen_set_fpr_hs(ctx, a->rd, dest);
602    mark_fs_dirty(ctx);
603    return true;
604}
605