1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch emulation of Linux signals 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu.h" 10 #include "user-internals.h" 11 #include "signal-common.h" 12 #include "linux-user/trace.h" 13 14 #include "target/loongarch/internals.h" 15 #include "target/loongarch/vec.h" 16 17 /* FP context was used */ 18 #define SC_USED_FP (1 << 0) 19 20 struct target_sigcontext { 21 uint64_t sc_pc; 22 uint64_t sc_regs[32]; 23 uint32_t sc_flags; 24 uint64_t sc_extcontext[0] QEMU_ALIGNED(16); 25 }; 26 27 28 #define FPU_CTX_MAGIC 0x46505501 29 #define FPU_CTX_ALIGN 8 30 struct target_fpu_context { 31 uint64_t regs[32]; 32 uint64_t fcc; 33 uint32_t fcsr; 34 } QEMU_ALIGNED(FPU_CTX_ALIGN); 35 36 #define CONTEXT_INFO_ALIGN 16 37 struct target_sctx_info { 38 uint32_t magic; 39 uint32_t size; 40 uint64_t padding; 41 } QEMU_ALIGNED(CONTEXT_INFO_ALIGN); 42 43 struct target_ucontext { 44 abi_ulong tuc_flags; 45 abi_ptr tuc_link; 46 target_stack_t tuc_stack; 47 target_sigset_t tuc_sigmask; 48 uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)]; 49 struct target_sigcontext tuc_mcontext; 50 }; 51 52 struct target_rt_sigframe { 53 struct target_siginfo rs_info; 54 struct target_ucontext rs_uc; 55 }; 56 57 /* 58 * These two structures are not present in guest memory, are private 59 * to the signal implementation, but are largely copied from the 60 * kernel's signal implementation. 61 */ 62 struct ctx_layout { 63 void *haddr; 64 abi_ptr gaddr; 65 unsigned int size; 66 }; 67 68 struct extctx_layout { 69 unsigned int size; 70 unsigned int flags; 71 struct ctx_layout fpu; 72 struct ctx_layout end; 73 }; 74 75 static abi_ptr extframe_alloc(struct extctx_layout *extctx, 76 struct ctx_layout *sctx, unsigned size, 77 unsigned align, abi_ptr orig_sp) 78 { 79 abi_ptr sp = orig_sp; 80 81 sp -= sizeof(struct target_sctx_info) + size; 82 align = MAX(align, CONTEXT_INFO_ALIGN); 83 sp = ROUND_DOWN(sp, align); 84 sctx->gaddr = sp; 85 86 size = orig_sp - sp; 87 sctx->size = size; 88 extctx->size += size; 89 90 return sp; 91 } 92 93 static abi_ptr setup_extcontext(struct extctx_layout *extctx, abi_ptr sp) 94 { 95 memset(extctx, 0, sizeof(struct extctx_layout)); 96 97 /* Grow down, alloc "end" context info first. */ 98 sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp); 99 100 /* For qemu, there is no lazy fp context switch, so fp always present. */ 101 extctx->flags = SC_USED_FP; 102 sp = extframe_alloc(extctx, &extctx->fpu, 103 sizeof(struct target_rt_sigframe), FPU_CTX_ALIGN, sp); 104 105 return sp; 106 } 107 108 static void setup_sigframe(CPULoongArchState *env, 109 struct target_sigcontext *sc, 110 struct extctx_layout *extctx) 111 { 112 struct target_sctx_info *info; 113 struct target_fpu_context *fpu_ctx; 114 int i; 115 116 __put_user(extctx->flags, &sc->sc_flags); 117 __put_user(env->pc, &sc->sc_pc); 118 __put_user(0, &sc->sc_regs[0]); 119 for (i = 1; i < 32; ++i) { 120 __put_user(env->gpr[i], &sc->sc_regs[i]); 121 } 122 123 /* 124 * Set fpu context 125 */ 126 info = extctx->fpu.haddr; 127 __put_user(FPU_CTX_MAGIC, &info->magic); 128 __put_user(extctx->fpu.size, &info->size); 129 130 fpu_ctx = (struct target_fpu_context *)(info + 1); 131 for (i = 0; i < 32; ++i) { 132 __put_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]); 133 } 134 __put_user(read_fcc(env), &fpu_ctx->fcc); 135 __put_user(env->fcsr0, &fpu_ctx->fcsr); 136 137 /* 138 * Set end context 139 */ 140 info = extctx->end.haddr; 141 __put_user(0, &info->magic); 142 __put_user(extctx->end.size, &info->size); 143 } 144 145 static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame) 146 { 147 memset(extctx, 0, sizeof(*extctx)); 148 149 while (1) { 150 uint32_t magic, size; 151 152 if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) { 153 return false; 154 } 155 156 switch (magic) { 157 case 0: /* END */ 158 extctx->end.gaddr = frame; 159 extctx->end.size = size; 160 extctx->size += size; 161 return true; 162 163 case FPU_CTX_MAGIC: 164 if (size < (sizeof(struct target_sctx_info) + 165 sizeof(struct target_fpu_context))) { 166 return false; 167 } 168 extctx->fpu.gaddr = frame; 169 extctx->fpu.size = size; 170 extctx->size += size; 171 break; 172 default: 173 return false; 174 } 175 176 frame += size; 177 } 178 } 179 180 static void restore_sigframe(CPULoongArchState *env, 181 struct target_sigcontext *sc, 182 struct extctx_layout *extctx) 183 { 184 int i; 185 186 __get_user(env->pc, &sc->sc_pc); 187 for (i = 1; i < 32; ++i) { 188 __get_user(env->gpr[i], &sc->sc_regs[i]); 189 } 190 191 if (extctx->fpu.haddr) { 192 struct target_fpu_context *fpu_ctx = 193 extctx->fpu.haddr + sizeof(struct target_sctx_info); 194 uint64_t fcc; 195 196 for (i = 0; i < 32; ++i) { 197 __get_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]); 198 } 199 __get_user(fcc, &fpu_ctx->fcc); 200 write_fcc(env, fcc); 201 __get_user(env->fcsr0, &fpu_ctx->fcsr); 202 restore_fp_status(env); 203 } 204 } 205 206 /* 207 * Determine which stack to use. 208 */ 209 static abi_ptr get_sigframe(struct target_sigaction *ka, 210 CPULoongArchState *env, 211 struct extctx_layout *extctx) 212 { 213 abi_ulong sp; 214 215 sp = target_sigsp(get_sp_from_cpustate(env), ka); 216 sp = ROUND_DOWN(sp, 16); 217 sp = setup_extcontext(extctx, sp); 218 sp -= sizeof(struct target_rt_sigframe); 219 220 assert(QEMU_IS_ALIGNED(sp, 16)); 221 222 return sp; 223 } 224 225 void setup_rt_frame(int sig, struct target_sigaction *ka, 226 target_siginfo_t *info, 227 target_sigset_t *set, CPULoongArchState *env) 228 { 229 struct target_rt_sigframe *frame; 230 struct extctx_layout extctx; 231 abi_ptr frame_addr; 232 int i; 233 234 frame_addr = get_sigframe(ka, env, &extctx); 235 trace_user_setup_rt_frame(env, frame_addr); 236 237 frame = lock_user(VERIFY_WRITE, frame_addr, 238 sizeof(*frame) + extctx.size, 0); 239 if (!frame) { 240 force_sigsegv(sig); 241 return; 242 } 243 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); 244 extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr); 245 246 tswap_siginfo(&frame->rs_info, info); 247 248 __put_user(0, &frame->rs_uc.tuc_flags); 249 __put_user(0, &frame->rs_uc.tuc_link); 250 target_save_altstack(&frame->rs_uc.tuc_stack, env); 251 252 setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); 253 254 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 255 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 256 } 257 258 env->gpr[4] = sig; 259 env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info); 260 env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc); 261 env->gpr[3] = frame_addr; 262 env->gpr[1] = default_rt_sigreturn; 263 264 env->pc = ka->_sa_handler; 265 unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size); 266 } 267 268 long do_rt_sigreturn(CPULoongArchState *env) 269 { 270 struct target_rt_sigframe *frame; 271 struct extctx_layout extctx; 272 abi_ulong frame_addr; 273 sigset_t blocked; 274 275 frame_addr = env->gpr[3]; 276 trace_user_do_rt_sigreturn(env, frame_addr); 277 278 if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) { 279 goto badframe; 280 } 281 282 frame = lock_user(VERIFY_READ, frame_addr, 283 sizeof(*frame) + extctx.size, 1); 284 if (!frame) { 285 goto badframe; 286 } 287 if (extctx.fpu.gaddr) { 288 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); 289 } 290 291 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 292 set_sigmask(&blocked); 293 294 restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); 295 296 target_restore_altstack(&frame->rs_uc.tuc_stack, env); 297 298 unlock_user(frame, frame_addr, 0); 299 return -QEMU_ESIGRETURN; 300 301 badframe: 302 force_sig(TARGET_SIGSEGV); 303 return -QEMU_ESIGRETURN; 304 } 305 306 void setup_sigtramp(abi_ulong sigtramp_page) 307 { 308 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); 309 assert(tramp != NULL); 310 311 __put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */ 312 __put_user(0x002b0000, tramp + 1); /* syscall 0 */ 313 314 default_rt_sigreturn = sigtramp_page; 315 unlock_user(tramp, sigtramp_page, 8); 316 } 317