1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu.h"
21 #include "user-internals.h"
22 #include "signal-common.h"
23 #include "linux-user/trace.h"
24 #include "target/arm/cpu-features.h"
25
26 struct target_sigcontext {
27 uint64_t fault_address;
28 /* AArch64 registers */
29 uint64_t regs[31];
30 uint64_t sp;
31 uint64_t pc;
32 uint64_t pstate;
33 /* 4K reserved for FP/SIMD state and future expansion */
34 char __reserved[4096] __attribute__((__aligned__(16)));
35 };
36
37 struct target_ucontext {
38 abi_ulong tuc_flags;
39 abi_ulong tuc_link;
40 target_stack_t tuc_stack;
41 target_sigset_t tuc_sigmask;
42 /* glibc uses a 1024-bit sigset_t */
43 char __unused[1024 / 8 - sizeof(target_sigset_t)];
44 /* last for future expansion */
45 struct target_sigcontext tuc_mcontext;
46 };
47
48 /*
49 * Header to be used at the beginning of structures extending the user
50 * context. Such structures must be placed after the rt_sigframe on the stack
51 * and be 16-byte aligned. The last structure must be a dummy one with the
52 * magic and size set to 0.
53 */
54 struct target_aarch64_ctx {
55 uint32_t magic;
56 uint32_t size;
57 };
58
59 #define TARGET_FPSIMD_MAGIC 0x46508001
60
61 struct target_fpsimd_context {
62 struct target_aarch64_ctx head;
63 uint32_t fpsr;
64 uint32_t fpcr;
65 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
66 };
67
68 #define TARGET_EXTRA_MAGIC 0x45585401
69
70 struct target_extra_context {
71 struct target_aarch64_ctx head;
72 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
73 uint32_t size; /* size in bytes of the extra space */
74 uint32_t reserved[3];
75 };
76
77 #define TARGET_SVE_MAGIC 0x53564501
78
79 struct target_sve_context {
80 struct target_aarch64_ctx head;
81 uint16_t vl;
82 uint16_t flags;
83 uint16_t reserved[2];
84 /* The actual SVE data immediately follows. It is laid out
85 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
86 * the original struct pointer.
87 */
88 };
89
90 #define TARGET_SVE_VQ_BYTES 16
91
92 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
93 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
94
95 #define TARGET_SVE_SIG_REGS_OFFSET \
96 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
97 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
98 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
99 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
100 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
101 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
102 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
103 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
104 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
105
106 #define TARGET_SVE_SIG_FLAG_SM 1
107
108 #define TARGET_ZA_MAGIC 0x54366345
109
110 struct target_za_context {
111 struct target_aarch64_ctx head;
112 uint16_t vl;
113 uint16_t reserved[3];
114 /* The actual ZA data immediately follows. */
115 };
116
117 #define TARGET_ZA_SIG_REGS_OFFSET \
118 QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
119 #define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
120 (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
121 #define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
122 TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
123
124 #define TARGET_TPIDR2_MAGIC 0x54504902
125
126 struct target_tpidr2_context {
127 struct target_aarch64_ctx head;
128 uint64_t tpidr2;
129 };
130
131 #define TARGET_ZT_MAGIC 0x5a544e01
132
133 struct target_zt_context {
134 struct target_aarch64_ctx head;
135 uint16_t nregs;
136 uint16_t reserved[3];
137 /* ZTn register data immediately follows */
138 };
139
140 #define TARGET_ZT_SIG_REG_BYTES (512 / 8)
141 #define TARGET_ZT_SIG_REGS_SIZE(n) (TARGET_ZT_SIG_REG_BYTES * (n))
142 #define TARGET_ZT_SIG_CONTEXT_SIZE(n) (sizeof(struct target_zt_context) + \
143 TARGET_ZT_SIG_REGS_SIZE(n))
144 #define TARGET_ZT_SIG_REGS_OFFSET sizeof(struct target_zt_context)
145 QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \
146 sizeof_field(CPUARMState, za_state.zt0));
147
148 struct target_rt_sigframe {
149 struct target_siginfo info;
150 struct target_ucontext uc;
151 };
152
153 struct target_rt_frame_record {
154 uint64_t fp;
155 uint64_t lr;
156 };
157
target_setup_general_frame(struct target_rt_sigframe * sf,CPUARMState * env,target_sigset_t * set)158 static void target_setup_general_frame(struct target_rt_sigframe *sf,
159 CPUARMState *env, target_sigset_t *set)
160 {
161 int i;
162
163 __put_user(0, &sf->uc.tuc_flags);
164 __put_user(0, &sf->uc.tuc_link);
165
166 target_save_altstack(&sf->uc.tuc_stack, env);
167
168 for (i = 0; i < 31; i++) {
169 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
170 }
171 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
172 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
173 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
174
175 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
176
177 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
178 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
179 }
180 }
181
target_setup_fpsimd_record(struct target_fpsimd_context * fpsimd,CPUARMState * env)182 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
183 CPUARMState *env)
184 {
185 int i;
186
187 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
188 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
189 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
190 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
191
192 for (i = 0; i < 32; i++) {
193 uint64_t *q = aa64_vfp_qreg(env, i);
194 #if TARGET_BIG_ENDIAN
195 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
196 __put_user(q[1], &fpsimd->vregs[i * 2]);
197 #else
198 __put_user(q[0], &fpsimd->vregs[i * 2]);
199 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
200 #endif
201 }
202 }
203
target_setup_extra_record(struct target_extra_context * extra,uint64_t datap,uint32_t extra_size)204 static void target_setup_extra_record(struct target_extra_context *extra,
205 uint64_t datap, uint32_t extra_size)
206 {
207 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
208 __put_user(sizeof(struct target_extra_context), &extra->head.size);
209 __put_user(datap, &extra->datap);
210 __put_user(extra_size, &extra->size);
211 }
212
target_setup_end_record(struct target_aarch64_ctx * end)213 static void target_setup_end_record(struct target_aarch64_ctx *end)
214 {
215 __put_user(0, &end->magic);
216 __put_user(0, &end->size);
217 }
218
target_setup_sve_record(struct target_sve_context * sve,CPUARMState * env,int size)219 static void target_setup_sve_record(struct target_sve_context *sve,
220 CPUARMState *env, int size)
221 {
222 int i, j, vq = sve_vq(env);
223
224 memset(sve, 0, sizeof(*sve));
225 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
226 __put_user(size, &sve->head.size);
227 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
228 if (FIELD_EX64(env->svcr, SVCR, SM)) {
229 __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
230 }
231
232 /* Note that SVE regs are stored as a byte stream, with each byte element
233 * at a subsequent address. This corresponds to a little-endian store
234 * of our 64-bit hunks.
235 */
236 for (i = 0; i < 32; ++i) {
237 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
238 for (j = 0; j < vq * 2; ++j) {
239 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
240 }
241 }
242 for (i = 0; i <= 16; ++i) {
243 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
244 for (j = 0; j < vq; ++j) {
245 uint64_t r = env->vfp.pregs[i].p[j >> 2];
246 __put_user_e(r >> ((j & 3) * 16), p + j, le);
247 }
248 }
249 }
250
target_setup_za_record(struct target_za_context * za,CPUARMState * env,int size)251 static void target_setup_za_record(struct target_za_context *za,
252 CPUARMState *env, int size)
253 {
254 int vq = sme_vq(env);
255 int vl = vq * TARGET_SVE_VQ_BYTES;
256 int i, j;
257
258 memset(za, 0, sizeof(*za));
259 __put_user(TARGET_ZA_MAGIC, &za->head.magic);
260 __put_user(size, &za->head.size);
261 __put_user(vl, &za->vl);
262
263 if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
264 return;
265 }
266 assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
267
268 /*
269 * Note that ZA vectors are stored as a byte stream,
270 * with each byte element at a subsequent address.
271 */
272 for (i = 0; i < vl; ++i) {
273 uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
274 for (j = 0; j < vq * 2; ++j) {
275 __put_user_e(env->za_state.za[i].d[j], z + j, le);
276 }
277 }
278 }
279
target_setup_tpidr2_record(struct target_tpidr2_context * tpidr2,CPUARMState * env)280 static void target_setup_tpidr2_record(struct target_tpidr2_context *tpidr2,
281 CPUARMState *env)
282 {
283 __put_user(TARGET_TPIDR2_MAGIC, &tpidr2->head.magic);
284 __put_user(sizeof(struct target_tpidr2_context), &tpidr2->head.size);
285 __put_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
286 }
287
target_setup_zt_record(struct target_zt_context * zt,CPUARMState * env,int size)288 static void target_setup_zt_record(struct target_zt_context *zt,
289 CPUARMState *env, int size)
290 {
291 uint64_t *z;
292
293 memset(zt, 0, sizeof(*zt));
294 __put_user(TARGET_ZT_MAGIC, &zt->head.magic);
295 __put_user(size, &zt->head.size);
296 /*
297 * The record format allows for multiple ZT regs, but
298 * currently there is only one, ZT0.
299 */
300 __put_user(1, &zt->nregs);
301 assert(size == TARGET_ZT_SIG_CONTEXT_SIZE(1));
302
303 /* ZT0 is the same byte-stream format as SVE regs and ZA */
304 z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
305 for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
306 __put_user_e(env->za_state.zt0[i], z + i, le);
307 }
308 }
309
target_restore_general_frame(CPUARMState * env,struct target_rt_sigframe * sf)310 static void target_restore_general_frame(CPUARMState *env,
311 struct target_rt_sigframe *sf)
312 {
313 sigset_t set;
314 uint64_t pstate;
315 int i;
316
317 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
318 set_sigmask(&set);
319
320 for (i = 0; i < 31; i++) {
321 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
322 }
323
324 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
325 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
326 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
327 pstate_write(env, pstate);
328 }
329
target_restore_fpsimd_record(CPUARMState * env,struct target_fpsimd_context * fpsimd)330 static void target_restore_fpsimd_record(CPUARMState *env,
331 struct target_fpsimd_context *fpsimd)
332 {
333 uint32_t fpsr, fpcr;
334 int i;
335
336 __get_user(fpsr, &fpsimd->fpsr);
337 vfp_set_fpsr(env, fpsr);
338 __get_user(fpcr, &fpsimd->fpcr);
339 vfp_set_fpcr(env, fpcr);
340
341 for (i = 0; i < 32; i++) {
342 uint64_t *q = aa64_vfp_qreg(env, i);
343 #if TARGET_BIG_ENDIAN
344 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
345 __get_user(q[1], &fpsimd->vregs[i * 2]);
346 #else
347 __get_user(q[0], &fpsimd->vregs[i * 2]);
348 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
349 #endif
350 }
351 }
352
target_restore_sve_record(CPUARMState * env,struct target_sve_context * sve,int size,int * svcr)353 static bool target_restore_sve_record(CPUARMState *env,
354 struct target_sve_context *sve,
355 int size, int *svcr)
356 {
357 int i, j, vl, vq, flags;
358 bool sm;
359
360 __get_user(vl, &sve->vl);
361 __get_user(flags, &sve->flags);
362
363 sm = flags & TARGET_SVE_SIG_FLAG_SM;
364
365 /* The cpu must support Streaming or Non-streaming SVE. */
366 if (sm
367 ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
368 : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
369 return false;
370 }
371
372 /*
373 * Note that we cannot use sve_vq() because that depends on the
374 * current setting of PSTATE.SM, not the state to be restored.
375 */
376 vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
377
378 /* Reject mismatched VL. */
379 if (vl != vq * TARGET_SVE_VQ_BYTES) {
380 return false;
381 }
382
383 /* Accept empty record -- used to clear PSTATE.SM. */
384 if (size <= sizeof(*sve)) {
385 return true;
386 }
387
388 /* Reject non-empty but incomplete record. */
389 if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
390 return false;
391 }
392
393 *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
394
395 /*
396 * Note that SVE regs are stored as a byte stream, with each byte element
397 * at a subsequent address. This corresponds to a little-endian load
398 * of our 64-bit hunks.
399 */
400 for (i = 0; i < 32; ++i) {
401 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
402 for (j = 0; j < vq * 2; ++j) {
403 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
404 }
405 }
406 for (i = 0; i <= 16; ++i) {
407 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
408 for (j = 0; j < vq; ++j) {
409 uint16_t r;
410 __get_user_e(r, p + j, le);
411 if (j & 3) {
412 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
413 } else {
414 env->vfp.pregs[i].p[j >> 2] = r;
415 }
416 }
417 }
418 return true;
419 }
420
target_restore_za_record(CPUARMState * env,struct target_za_context * za,int size,int * svcr)421 static bool target_restore_za_record(CPUARMState *env,
422 struct target_za_context *za,
423 int size, int *svcr)
424 {
425 int i, j, vl, vq;
426
427 if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
428 return false;
429 }
430
431 __get_user(vl, &za->vl);
432 vq = sme_vq(env);
433
434 /* Reject mismatched VL. */
435 if (vl != vq * TARGET_SVE_VQ_BYTES) {
436 return false;
437 }
438
439 /* Accept empty record -- used to clear PSTATE.ZA. */
440 if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
441 return true;
442 }
443
444 /* Reject non-empty but incomplete record. */
445 if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
446 return false;
447 }
448
449 *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
450
451 for (i = 0; i < vl; ++i) {
452 uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
453 for (j = 0; j < vq * 2; ++j) {
454 __get_user_e(env->za_state.za[i].d[j], z + j, le);
455 }
456 }
457 return true;
458 }
459
target_restore_tpidr2_record(CPUARMState * env,struct target_tpidr2_context * tpidr2)460 static void target_restore_tpidr2_record(CPUARMState *env,
461 struct target_tpidr2_context *tpidr2)
462 {
463 __get_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
464 }
465
target_restore_zt_record(CPUARMState * env,struct target_zt_context * zt,int size,int svcr)466 static bool target_restore_zt_record(CPUARMState *env,
467 struct target_zt_context *zt, int size,
468 int svcr)
469 {
470 uint16_t nregs;
471 uint64_t *z;
472
473 if (!(FIELD_EX64(svcr, SVCR, ZA))) {
474 return false;
475 }
476
477 __get_user(nregs, &zt->nregs);
478
479 if (nregs != 1) {
480 return false;
481 }
482
483 z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
484 for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
485 __get_user_e(env->za_state.zt0[i], z + i, le);
486 }
487 return true;
488 }
489
target_restore_sigframe(CPUARMState * env,struct target_rt_sigframe * sf)490 static int target_restore_sigframe(CPUARMState *env,
491 struct target_rt_sigframe *sf)
492 {
493 struct target_aarch64_ctx *ctx, *extra = NULL;
494 struct target_fpsimd_context *fpsimd = NULL;
495 struct target_sve_context *sve = NULL;
496 struct target_za_context *za = NULL;
497 struct target_tpidr2_context *tpidr2 = NULL;
498 struct target_zt_context *zt = NULL;
499 uint64_t extra_datap = 0;
500 bool used_extra = false;
501 int sve_size = 0;
502 int za_size = 0;
503 int zt_size = 0;
504 int svcr = 0;
505
506 target_restore_general_frame(env, sf);
507
508 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
509 while (ctx) {
510 uint32_t magic, size, extra_size;
511
512 __get_user(magic, &ctx->magic);
513 __get_user(size, &ctx->size);
514 switch (magic) {
515 case 0:
516 if (size != 0) {
517 goto err;
518 }
519 if (used_extra) {
520 ctx = NULL;
521 } else {
522 ctx = extra;
523 used_extra = true;
524 }
525 continue;
526
527 case TARGET_FPSIMD_MAGIC:
528 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
529 goto err;
530 }
531 fpsimd = (struct target_fpsimd_context *)ctx;
532 break;
533
534 case TARGET_SVE_MAGIC:
535 if (sve || size < sizeof(struct target_sve_context)) {
536 goto err;
537 }
538 sve = (struct target_sve_context *)ctx;
539 sve_size = size;
540 break;
541
542 case TARGET_ZA_MAGIC:
543 if (za || size < sizeof(struct target_za_context)) {
544 goto err;
545 }
546 za = (struct target_za_context *)ctx;
547 za_size = size;
548 break;
549
550 case TARGET_TPIDR2_MAGIC:
551 if (tpidr2 || size != sizeof(struct target_tpidr2_context) ||
552 !cpu_isar_feature(aa64_sme, env_archcpu(env))) {
553 goto err;
554 }
555 tpidr2 = (struct target_tpidr2_context *)ctx;
556 break;
557
558 case TARGET_ZT_MAGIC:
559 if (zt || size != TARGET_ZT_SIG_CONTEXT_SIZE(1) ||
560 !cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
561 goto err;
562 }
563 zt = (struct target_zt_context *)ctx;
564 zt_size = size;
565 break;
566
567 case TARGET_EXTRA_MAGIC:
568 if (extra || size != sizeof(struct target_extra_context)) {
569 goto err;
570 }
571 __get_user(extra_datap,
572 &((struct target_extra_context *)ctx)->datap);
573 __get_user(extra_size,
574 &((struct target_extra_context *)ctx)->size);
575 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
576 if (!extra) {
577 return 1;
578 }
579 break;
580
581 default:
582 /* Unknown record -- we certainly didn't generate it.
583 * Did we in fact get out of sync?
584 */
585 goto err;
586 }
587 ctx = (void *)ctx + size;
588 }
589
590 /* Require FPSIMD always. */
591 if (fpsimd) {
592 target_restore_fpsimd_record(env, fpsimd);
593 } else {
594 goto err;
595 }
596
597 /* SVE data, if present, overwrites FPSIMD data. */
598 if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
599 goto err;
600 }
601 if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
602 goto err;
603 }
604 if (tpidr2) {
605 target_restore_tpidr2_record(env, tpidr2);
606 }
607 /*
608 * NB that we must restore ZT after ZA so the check that there's
609 * no ZT record if SVCR.ZA is 0 gets the right value of SVCR.
610 */
611 if (zt && !target_restore_zt_record(env, zt, zt_size, svcr)) {
612 goto err;
613 }
614 if (env->svcr != svcr) {
615 env->svcr = svcr;
616 arm_rebuild_hflags(env);
617 }
618 unlock_user(extra, extra_datap, 0);
619 return 0;
620
621 err:
622 unlock_user(extra, extra_datap, 0);
623 return 1;
624 }
625
get_sigframe(struct target_sigaction * ka,CPUARMState * env,int size)626 static abi_ulong get_sigframe(struct target_sigaction *ka,
627 CPUARMState *env, int size)
628 {
629 abi_ulong sp;
630
631 sp = target_sigsp(get_sp_from_cpustate(env), ka);
632
633 sp = (sp - size) & ~15;
634
635 return sp;
636 }
637
638 typedef struct {
639 int total_size;
640 int extra_base;
641 int extra_size;
642 int std_end_ofs;
643 int extra_ofs;
644 int extra_end_ofs;
645 } target_sigframe_layout;
646
alloc_sigframe_space(int this_size,target_sigframe_layout * l)647 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
648 {
649 /* Make sure there will always be space for the end marker. */
650 const int std_size = sizeof(struct target_rt_sigframe)
651 - sizeof(struct target_aarch64_ctx);
652 int this_loc = l->total_size;
653
654 if (l->extra_base) {
655 /* Once we have begun an extra space, all allocations go there. */
656 l->extra_size += this_size;
657 } else if (this_size + this_loc > std_size) {
658 /* This allocation does not fit in the standard space. */
659 /* Allocate the extra record. */
660 l->extra_ofs = this_loc;
661 l->total_size += sizeof(struct target_extra_context);
662
663 /* Allocate the standard end record. */
664 l->std_end_ofs = l->total_size;
665 l->total_size += sizeof(struct target_aarch64_ctx);
666
667 /* Allocate the requested record. */
668 l->extra_base = this_loc = l->total_size;
669 l->extra_size = this_size;
670 }
671 l->total_size += this_size;
672
673 return this_loc;
674 }
675
target_setup_frame(int usig,struct target_sigaction * ka,target_siginfo_t * info,target_sigset_t * set,CPUARMState * env)676 static void target_setup_frame(int usig, struct target_sigaction *ka,
677 target_siginfo_t *info, target_sigset_t *set,
678 CPUARMState *env)
679 {
680 target_sigframe_layout layout = {
681 /* Begin with the size pointing to the reserved space. */
682 .total_size = offsetof(struct target_rt_sigframe,
683 uc.tuc_mcontext.__reserved),
684 };
685 int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0;
686 int zt_ofs = 0;
687 int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0;
688 struct target_rt_sigframe *frame;
689 struct target_rt_frame_record *fr;
690 abi_ulong frame_addr, return_addr;
691
692 /* FPSIMD record is always in the standard space. */
693 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
694 &layout);
695
696 /* SVE state needs saving only if it exists. */
697 if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
698 cpu_isar_feature(aa64_sme, env_archcpu(env))) {
699 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
700 sve_ofs = alloc_sigframe_space(sve_size, &layout);
701 }
702 if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
703 tpidr2_size = sizeof(struct target_tpidr2_context);
704 tpidr2_ofs = alloc_sigframe_space(tpidr2_size, &layout);
705 /* ZA state needs saving only if it is enabled. */
706 if (FIELD_EX64(env->svcr, SVCR, ZA)) {
707 za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
708 } else {
709 za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
710 }
711 za_ofs = alloc_sigframe_space(za_size, &layout);
712 }
713 if (cpu_isar_feature(aa64_sme2, env_archcpu(env)) &&
714 FIELD_EX64(env->svcr, SVCR, ZA)) {
715 /* If SME ZA storage is enabled, we must also save SME2 ZT0 */
716 zt_size = TARGET_ZT_SIG_CONTEXT_SIZE(1);
717 zt_ofs = alloc_sigframe_space(zt_size, &layout);
718 }
719
720 if (layout.extra_ofs) {
721 /* Reserve space for the extra end marker. The standard end marker
722 * will have been allocated when we allocated the extra record.
723 */
724 layout.extra_end_ofs
725 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
726 } else {
727 /* Reserve space for the standard end marker.
728 * Do not use alloc_sigframe_space because we cheat
729 * std_size therein to reserve space for this.
730 */
731 layout.std_end_ofs = layout.total_size;
732 layout.total_size += sizeof(struct target_aarch64_ctx);
733 }
734
735 /* We must always provide at least the standard 4K reserved space,
736 * even if we don't use all of it (this is part of the ABI)
737 */
738 layout.total_size = MAX(layout.total_size,
739 sizeof(struct target_rt_sigframe));
740
741 /*
742 * Reserve space for the standard frame unwind pair: fp, lr.
743 * Despite the name this is not a "real" record within the frame.
744 */
745 fr_ofs = layout.total_size;
746 layout.total_size += sizeof(struct target_rt_frame_record);
747
748 frame_addr = get_sigframe(ka, env, layout.total_size);
749 trace_user_setup_frame(env, frame_addr);
750 frame = lock_user(VERIFY_WRITE, frame_addr, layout.total_size, 0);
751 if (!frame) {
752 goto give_sigsegv;
753 }
754
755 target_setup_general_frame(frame, env, set);
756 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
757 target_setup_end_record((void *)frame + layout.std_end_ofs);
758 if (layout.extra_ofs) {
759 target_setup_extra_record((void *)frame + layout.extra_ofs,
760 frame_addr + layout.extra_base,
761 layout.extra_size);
762 target_setup_end_record((void *)frame + layout.extra_end_ofs);
763 }
764 if (sve_ofs) {
765 target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
766 }
767 if (za_ofs) {
768 target_setup_za_record((void *)frame + za_ofs, env, za_size);
769 }
770 if (tpidr2_ofs) {
771 target_setup_tpidr2_record((void *)frame + tpidr2_ofs, env);
772 }
773 if (zt_ofs) {
774 target_setup_zt_record((void *)frame + zt_ofs, env, zt_size);
775 }
776
777 /* Set up the stack frame for unwinding. */
778 fr = (void *)frame + fr_ofs;
779 __put_user(env->xregs[29], &fr->fp);
780 __put_user(env->xregs[30], &fr->lr);
781
782 if (ka->sa_flags & TARGET_SA_RESTORER) {
783 return_addr = ka->sa_restorer;
784 } else {
785 return_addr = default_rt_sigreturn;
786 }
787 env->xregs[0] = usig;
788 env->xregs[29] = frame_addr + fr_ofs;
789 env->xregs[30] = return_addr;
790 env->xregs[31] = frame_addr;
791 env->pc = ka->_sa_handler;
792
793 /* Invoke the signal handler as if by indirect call. */
794 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
795 env->btype = 2;
796 }
797
798 /*
799 * Invoke the signal handler with a clean SME state: both SM and ZA
800 * disabled and TPIDR2_EL0 cleared.
801 */
802 aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
803 env->cp15.tpidr2_el0 = 0;
804
805 if (info) {
806 frame->info = *info;
807 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
808 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
809 }
810
811 unlock_user(frame, frame_addr, layout.total_size);
812 return;
813
814 give_sigsegv:
815 unlock_user(frame, frame_addr, layout.total_size);
816 force_sigsegv(usig);
817 }
818
setup_rt_frame(int sig,struct target_sigaction * ka,target_siginfo_t * info,target_sigset_t * set,CPUARMState * env)819 void setup_rt_frame(int sig, struct target_sigaction *ka,
820 target_siginfo_t *info, target_sigset_t *set,
821 CPUARMState *env)
822 {
823 target_setup_frame(sig, ka, info, set, env);
824 }
825
setup_frame(int sig,struct target_sigaction * ka,target_sigset_t * set,CPUARMState * env)826 void setup_frame(int sig, struct target_sigaction *ka,
827 target_sigset_t *set, CPUARMState *env)
828 {
829 target_setup_frame(sig, ka, 0, set, env);
830 }
831
do_rt_sigreturn(CPUARMState * env)832 long do_rt_sigreturn(CPUARMState *env)
833 {
834 struct target_rt_sigframe *frame = NULL;
835 abi_ulong frame_addr = env->xregs[31];
836
837 trace_user_do_rt_sigreturn(env, frame_addr);
838 if (frame_addr & 15) {
839 goto badframe;
840 }
841
842 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
843 goto badframe;
844 }
845
846 if (target_restore_sigframe(env, frame)) {
847 goto badframe;
848 }
849
850 target_restore_altstack(&frame->uc.tuc_stack, env);
851
852 unlock_user_struct(frame, frame_addr, 0);
853 return -QEMU_ESIGRETURN;
854
855 badframe:
856 unlock_user_struct(frame, frame_addr, 0);
857 force_sig(TARGET_SIGSEGV);
858 return -QEMU_ESIGRETURN;
859 }
860
do_sigreturn(CPUARMState * env)861 long do_sigreturn(CPUARMState *env)
862 {
863 return do_rt_sigreturn(env);
864 }
865
setup_sigtramp(abi_ulong sigtramp_page)866 void setup_sigtramp(abi_ulong sigtramp_page)
867 {
868 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
869 assert(tramp != NULL);
870
871 /*
872 * mov x8,#__NR_rt_sigreturn; svc #0
873 * Since these are instructions they need to be put as little-endian
874 * regardless of target default or current CPU endianness.
875 */
876 __put_user_e(0xd2801168, &tramp[0], le);
877 __put_user_e(0xd4000001, &tramp[1], le);
878
879 default_rt_sigreturn = sigtramp_page;
880 unlock_user(tramp, sigtramp_page, 8);
881 }
882