xref: /openbmc/qemu/tests/tcg/multiarch/sigbus.c (revision 5c1a101ef6b85537a4ade93c39ea81cadd5c246e)
1*5c1a101eSRichard Henderson #define _GNU_SOURCE 1
2*5c1a101eSRichard Henderson 
3*5c1a101eSRichard Henderson #include <assert.h>
4*5c1a101eSRichard Henderson #include <stdlib.h>
5*5c1a101eSRichard Henderson #include <signal.h>
6*5c1a101eSRichard Henderson #include <endian.h>
7*5c1a101eSRichard Henderson 
8*5c1a101eSRichard Henderson 
9*5c1a101eSRichard Henderson unsigned long long x = 0x8877665544332211ull;
10*5c1a101eSRichard Henderson void * volatile p = (void *)&x + 1;
11*5c1a101eSRichard Henderson 
12*5c1a101eSRichard Henderson void sigbus(int sig, siginfo_t *info, void *uc)
13*5c1a101eSRichard Henderson {
14*5c1a101eSRichard Henderson     assert(sig == SIGBUS);
15*5c1a101eSRichard Henderson     assert(info->si_signo == SIGBUS);
16*5c1a101eSRichard Henderson #ifdef BUS_ADRALN
17*5c1a101eSRichard Henderson     assert(info->si_code == BUS_ADRALN);
18*5c1a101eSRichard Henderson #endif
19*5c1a101eSRichard Henderson     assert(info->si_addr == p);
20*5c1a101eSRichard Henderson     exit(EXIT_SUCCESS);
21*5c1a101eSRichard Henderson }
22*5c1a101eSRichard Henderson 
23*5c1a101eSRichard Henderson int main()
24*5c1a101eSRichard Henderson {
25*5c1a101eSRichard Henderson     struct sigaction sa = {
26*5c1a101eSRichard Henderson         .sa_sigaction = sigbus,
27*5c1a101eSRichard Henderson         .sa_flags = SA_SIGINFO
28*5c1a101eSRichard Henderson     };
29*5c1a101eSRichard Henderson     int allow_fail = 0;
30*5c1a101eSRichard Henderson     int tmp;
31*5c1a101eSRichard Henderson 
32*5c1a101eSRichard Henderson     tmp = sigaction(SIGBUS, &sa, NULL);
33*5c1a101eSRichard Henderson     assert(tmp == 0);
34*5c1a101eSRichard Henderson 
35*5c1a101eSRichard Henderson     /*
36*5c1a101eSRichard Henderson      * Select an operation that's likely to enforce alignment.
37*5c1a101eSRichard Henderson      * On many guests that support unaligned accesses by default,
38*5c1a101eSRichard Henderson      * this is often an atomic operation.
39*5c1a101eSRichard Henderson      */
40*5c1a101eSRichard Henderson #if defined(__aarch64__)
41*5c1a101eSRichard Henderson     asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
42*5c1a101eSRichard Henderson #elif defined(__alpha__)
43*5c1a101eSRichard Henderson     asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
44*5c1a101eSRichard Henderson #elif defined(__arm__)
45*5c1a101eSRichard Henderson     asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
46*5c1a101eSRichard Henderson #elif defined(__powerpc__)
47*5c1a101eSRichard Henderson     asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
48*5c1a101eSRichard Henderson #elif defined(__riscv_atomic)
49*5c1a101eSRichard Henderson     asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
50*5c1a101eSRichard Henderson #else
51*5c1a101eSRichard Henderson     /* No insn known to fault unaligned -- try for a straight load. */
52*5c1a101eSRichard Henderson     allow_fail = 1;
53*5c1a101eSRichard Henderson     tmp = *(volatile int *)p;
54*5c1a101eSRichard Henderson #endif
55*5c1a101eSRichard Henderson 
56*5c1a101eSRichard Henderson     assert(allow_fail);
57*5c1a101eSRichard Henderson 
58*5c1a101eSRichard Henderson     /*
59*5c1a101eSRichard Henderson      * We didn't see a signal.
60*5c1a101eSRichard Henderson      * We might as well validate the unaligned load worked.
61*5c1a101eSRichard Henderson      */
62*5c1a101eSRichard Henderson     if (BYTE_ORDER == LITTLE_ENDIAN) {
63*5c1a101eSRichard Henderson         assert(tmp == 0x55443322);
64*5c1a101eSRichard Henderson     } else {
65*5c1a101eSRichard Henderson         assert(tmp == 0x77665544);
66*5c1a101eSRichard Henderson     }
67*5c1a101eSRichard Henderson     return EXIT_SUCCESS;
68*5c1a101eSRichard Henderson }
69