xref: /openbmc/qemu/tests/tcg/multiarch/sigbus.c (revision b1f4b9b8)
1 #define _GNU_SOURCE 1
2 
3 #include <assert.h>
4 #include <stdlib.h>
5 #include <signal.h>
6 #include <endian.h>
7 
8 
9 unsigned long long x = 0x8877665544332211ull;
10 void * volatile p = (void *)&x + 1;
11 
12 void sigbus(int sig, siginfo_t *info, void *uc)
13 {
14     assert(sig == SIGBUS);
15     assert(info->si_signo == SIGBUS);
16 #ifdef BUS_ADRALN
17     assert(info->si_code == BUS_ADRALN);
18 #endif
19     assert(info->si_addr == p);
20     exit(EXIT_SUCCESS);
21 }
22 
23 int main()
24 {
25     struct sigaction sa = {
26         .sa_sigaction = sigbus,
27         .sa_flags = SA_SIGINFO
28     };
29     int allow_fail = 0;
30     int tmp;
31 
32     tmp = sigaction(SIGBUS, &sa, NULL);
33     assert(tmp == 0);
34 
35     /*
36      * Select an operation that's likely to enforce alignment.
37      * On many guests that support unaligned accesses by default,
38      * this is often an atomic operation.
39      */
40 #if defined(__aarch64__)
41     asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
42 #elif defined(__alpha__)
43     asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
44 #elif defined(__arm__)
45     asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
46 #elif defined(__powerpc__)
47     asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
48 #elif defined(__riscv_atomic)
49     asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
50 #else
51     /* No insn known to fault unaligned -- try for a straight load. */
52     allow_fail = 1;
53     tmp = *(volatile int *)p;
54 #endif
55 
56     assert(allow_fail);
57 
58     /*
59      * We didn't see a signal.
60      * We might as well validate the unaligned load worked.
61      */
62     if (BYTE_ORDER == LITTLE_ENDIAN) {
63         assert(tmp == 0x55443322);
64     } else {
65         assert(tmp == 0x77665544);
66     }
67     return EXIT_SUCCESS;
68 }
69