1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * LoongArch emulation of Linux signals 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu.h" 10 #include "user-internals.h" 11 #include "signal-common.h" 12 #include "linux-user/trace.h" 13 14 #include "target/loongarch/internals.h" 15 16 /* FP context was used */ 17 #define SC_USED_FP (1 << 0) 18 19 struct target_sigcontext { 20 uint64_t sc_pc; 21 uint64_t sc_regs[32]; 22 uint32_t sc_flags; 23 uint64_t sc_extcontext[0] QEMU_ALIGNED(16); 24 }; 25 26 27 #define FPU_CTX_MAGIC 0x46505501 28 #define FPU_CTX_ALIGN 8 29 struct target_fpu_context { 30 uint64_t regs[32]; 31 uint64_t fcc; 32 uint32_t fcsr; 33 } QEMU_ALIGNED(FPU_CTX_ALIGN); 34 35 #define CONTEXT_INFO_ALIGN 16 36 struct target_sctx_info { 37 uint32_t magic; 38 uint32_t size; 39 uint64_t padding; 40 } QEMU_ALIGNED(CONTEXT_INFO_ALIGN); 41 42 struct target_ucontext { 43 abi_ulong tuc_flags; 44 abi_ptr tuc_link; 45 target_stack_t tuc_stack; 46 target_sigset_t tuc_sigmask; 47 uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)]; 48 struct target_sigcontext tuc_mcontext; 49 }; 50 51 struct target_rt_sigframe { 52 struct target_siginfo rs_info; 53 struct target_ucontext rs_uc; 54 }; 55 56 /* 57 * These two structures are not present in guest memory, are private 58 * to the signal implementation, but are largely copied from the 59 * kernel's signal implementation. 60 */ 61 struct ctx_layout { 62 void *haddr; 63 abi_ptr gaddr; 64 unsigned int size; 65 }; 66 67 struct extctx_layout { 68 unsigned int size; 69 unsigned int flags; 70 struct ctx_layout fpu; 71 struct ctx_layout end; 72 }; 73 74 /* The kernel's sc_save_fcc macro is a sequence of MOVCF2GR+BSTRINS. */ 75 static uint64_t read_all_fcc(CPULoongArchState *env) 76 { 77 uint64_t ret = 0; 78 79 for (int i = 0; i < 8; ++i) { 80 ret |= (uint64_t)env->cf[i] << (i * 8); 81 } 82 83 return ret; 84 } 85 86 /* The kernel's sc_restore_fcc macro is a sequence of BSTRPICK+MOVGR2CF. */ 87 static void write_all_fcc(CPULoongArchState *env, uint64_t val) 88 { 89 for (int i = 0; i < 8; ++i) { 90 env->cf[i] = (val >> (i * 8)) & 1; 91 } 92 } 93 94 static abi_ptr extframe_alloc(struct extctx_layout *extctx, 95 struct ctx_layout *sctx, unsigned size, 96 unsigned align, abi_ptr orig_sp) 97 { 98 abi_ptr sp = orig_sp; 99 100 sp -= sizeof(struct target_sctx_info) + size; 101 align = MAX(align, CONTEXT_INFO_ALIGN); 102 sp = ROUND_DOWN(sp, align); 103 sctx->gaddr = sp; 104 105 size = orig_sp - sp; 106 sctx->size = size; 107 extctx->size += size; 108 109 return sp; 110 } 111 112 static abi_ptr setup_extcontext(struct extctx_layout *extctx, abi_ptr sp) 113 { 114 memset(extctx, 0, sizeof(struct extctx_layout)); 115 116 /* Grow down, alloc "end" context info first. */ 117 sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp); 118 119 /* For qemu, there is no lazy fp context switch, so fp always present. */ 120 extctx->flags = SC_USED_FP; 121 sp = extframe_alloc(extctx, &extctx->fpu, 122 sizeof(struct target_rt_sigframe), FPU_CTX_ALIGN, sp); 123 124 return sp; 125 } 126 127 static void setup_sigframe(CPULoongArchState *env, 128 struct target_sigcontext *sc, 129 struct extctx_layout *extctx) 130 { 131 struct target_sctx_info *info; 132 struct target_fpu_context *fpu_ctx; 133 int i; 134 135 __put_user(extctx->flags, &sc->sc_flags); 136 __put_user(env->pc, &sc->sc_pc); 137 __put_user(0, &sc->sc_regs[0]); 138 for (i = 1; i < 32; ++i) { 139 __put_user(env->gpr[i], &sc->sc_regs[i]); 140 } 141 142 /* 143 * Set fpu context 144 */ 145 info = extctx->fpu.haddr; 146 __put_user(FPU_CTX_MAGIC, &info->magic); 147 __put_user(extctx->fpu.size, &info->size); 148 149 fpu_ctx = (struct target_fpu_context *)(info + 1); 150 for (i = 0; i < 32; ++i) { 151 __put_user(env->fpr[i], &fpu_ctx->regs[i]); 152 } 153 __put_user(read_all_fcc(env), &fpu_ctx->fcc); 154 __put_user(env->fcsr0, &fpu_ctx->fcsr); 155 156 /* 157 * Set end context 158 */ 159 info = extctx->end.haddr; 160 __put_user(0, &info->magic); 161 __put_user(extctx->end.size, &info->size); 162 } 163 164 static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame) 165 { 166 memset(extctx, 0, sizeof(*extctx)); 167 168 while (1) { 169 uint32_t magic, size; 170 171 if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) { 172 return false; 173 } 174 175 switch (magic) { 176 case 0: /* END */ 177 extctx->end.gaddr = frame; 178 extctx->end.size = size; 179 extctx->size += size; 180 return true; 181 182 case FPU_CTX_MAGIC: 183 if (size < (sizeof(struct target_sctx_info) + 184 sizeof(struct target_fpu_context))) { 185 return false; 186 } 187 extctx->fpu.gaddr = frame; 188 extctx->fpu.size = size; 189 extctx->size += size; 190 break; 191 default: 192 return false; 193 } 194 195 frame += size; 196 } 197 } 198 199 static void restore_sigframe(CPULoongArchState *env, 200 struct target_sigcontext *sc, 201 struct extctx_layout *extctx) 202 { 203 int i; 204 205 __get_user(env->pc, &sc->sc_pc); 206 for (i = 1; i < 32; ++i) { 207 __get_user(env->gpr[i], &sc->sc_regs[i]); 208 } 209 210 if (extctx->fpu.haddr) { 211 struct target_fpu_context *fpu_ctx = 212 extctx->fpu.haddr + sizeof(struct target_sctx_info); 213 uint64_t fcc; 214 215 for (i = 0; i < 32; ++i) { 216 __get_user(env->fpr[i], &fpu_ctx->regs[i]); 217 } 218 __get_user(fcc, &fpu_ctx->fcc); 219 write_all_fcc(env, fcc); 220 __get_user(env->fcsr0, &fpu_ctx->fcsr); 221 restore_fp_status(env); 222 } 223 } 224 225 /* 226 * Determine which stack to use. 227 */ 228 static abi_ptr get_sigframe(struct target_sigaction *ka, 229 CPULoongArchState *env, 230 struct extctx_layout *extctx) 231 { 232 abi_ulong sp; 233 234 sp = target_sigsp(get_sp_from_cpustate(env), ka); 235 sp = ROUND_DOWN(sp, 16); 236 sp = setup_extcontext(extctx, sp); 237 sp -= sizeof(struct target_rt_sigframe); 238 239 assert(QEMU_IS_ALIGNED(sp, 16)); 240 241 return sp; 242 } 243 244 void setup_rt_frame(int sig, struct target_sigaction *ka, 245 target_siginfo_t *info, 246 target_sigset_t *set, CPULoongArchState *env) 247 { 248 struct target_rt_sigframe *frame; 249 struct extctx_layout extctx; 250 abi_ptr frame_addr; 251 int i; 252 253 frame_addr = get_sigframe(ka, env, &extctx); 254 trace_user_setup_rt_frame(env, frame_addr); 255 256 frame = lock_user(VERIFY_WRITE, frame_addr, 257 sizeof(*frame) + extctx.size, 0); 258 if (!frame) { 259 force_sigsegv(sig); 260 return; 261 } 262 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); 263 extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr); 264 265 tswap_siginfo(&frame->rs_info, info); 266 267 __put_user(0, &frame->rs_uc.tuc_flags); 268 __put_user(0, &frame->rs_uc.tuc_link); 269 target_save_altstack(&frame->rs_uc.tuc_stack, env); 270 271 setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); 272 273 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 274 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 275 } 276 277 env->gpr[4] = sig; 278 env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info); 279 env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc); 280 env->gpr[3] = frame_addr; 281 env->gpr[1] = default_rt_sigreturn; 282 283 env->pc = ka->_sa_handler; 284 unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size); 285 } 286 287 long do_rt_sigreturn(CPULoongArchState *env) 288 { 289 struct target_rt_sigframe *frame; 290 struct extctx_layout extctx; 291 abi_ulong frame_addr; 292 sigset_t blocked; 293 294 frame_addr = env->gpr[3]; 295 trace_user_do_rt_sigreturn(env, frame_addr); 296 297 if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) { 298 goto badframe; 299 } 300 301 frame = lock_user(VERIFY_READ, frame_addr, 302 sizeof(*frame) + extctx.size, 1); 303 if (!frame) { 304 goto badframe; 305 } 306 if (extctx.fpu.gaddr) { 307 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr); 308 } 309 310 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 311 set_sigmask(&blocked); 312 313 restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx); 314 315 target_restore_altstack(&frame->rs_uc.tuc_stack, env); 316 317 unlock_user(frame, frame_addr, 0); 318 return -QEMU_ESIGRETURN; 319 320 badframe: 321 force_sig(TARGET_SIGSEGV); 322 return -QEMU_ESIGRETURN; 323 } 324 325 void setup_sigtramp(abi_ulong sigtramp_page) 326 { 327 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); 328 assert(tramp != NULL); 329 330 __put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */ 331 __put_user(0x002b0000, tramp + 1); /* syscall 0 */ 332 333 default_rt_sigreturn = sigtramp_page; 334 unlock_user(tramp, sigtramp_page, 8); 335 } 336