xref: /openbmc/qemu/util/cpuinfo-riscv.c (revision d481cec7)
1 /*
2  * SPDX-License-Identifier: GPL-2.0-or-later
3  * Host specific cpu identification for RISC-V.
4  */
5 
6 #include "qemu/osdep.h"
7 #include "qemu/host-utils.h"
8 #include "host/cpuinfo.h"
9 
10 #ifdef CONFIG_ASM_HWPROBE_H
11 #include <asm/hwprobe.h>
12 #include <sys/syscall.h>
13 #include <asm/unistd.h>
14 #endif
15 
16 unsigned cpuinfo;
17 unsigned riscv_lg2_vlenb;
18 static volatile sig_atomic_t got_sigill;
19 
20 static void sigill_handler(int signo, siginfo_t *si, void *data)
21 {
22     /* Skip the faulty instruction */
23     ucontext_t *uc = (ucontext_t *)data;
24 
25 #ifdef __linux__
26     uc->uc_mcontext.__gregs[REG_PC] += 4;
27 #elif defined(__OpenBSD__)
28     uc->sc_sepc += 4;
29 #else
30 # error Unsupported OS
31 #endif
32 
33     got_sigill = 1;
34 }
35 
36 /* Called both as constructor and (possibly) via other constructors. */
37 unsigned __attribute__((constructor)) cpuinfo_init(void)
38 {
39     unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND | CPUINFO_ZVE64X;
40     unsigned info = cpuinfo;
41 
42     if (info) {
43         return info;
44     }
45 
46     /* Test for compile-time settings. */
47 #if defined(__riscv_arch_test) && defined(__riscv_zba)
48     info |= CPUINFO_ZBA;
49 #endif
50 #if defined(__riscv_arch_test) && defined(__riscv_zbb)
51     info |= CPUINFO_ZBB;
52 #endif
53 #if defined(__riscv_arch_test) && defined(__riscv_zicond)
54     info |= CPUINFO_ZICOND;
55 #endif
56 #if defined(__riscv_arch_test) && \
57     (defined(__riscv_vector) || defined(__riscv_zve64x))
58     info |= CPUINFO_ZVE64X;
59 #endif
60     left &= ~info;
61 
62 #ifdef CONFIG_ASM_HWPROBE_H
63     if (left) {
64         /*
65          * TODO: glibc 2.40 will introduce <sys/hwprobe.h>, which
66          * provides __riscv_hwprobe and __riscv_hwprobe_one,
67          * which is a slightly cleaner interface.
68          */
69         struct riscv_hwprobe pair = { .key = RISCV_HWPROBE_KEY_IMA_EXT_0 };
70         if (syscall(__NR_riscv_hwprobe, &pair, 1, 0, NULL, 0) == 0
71             && pair.key >= 0) {
72             info |= pair.value & RISCV_HWPROBE_EXT_ZBA ? CPUINFO_ZBA : 0;
73             info |= pair.value & RISCV_HWPROBE_EXT_ZBB ? CPUINFO_ZBB : 0;
74             left &= ~(CPUINFO_ZBA | CPUINFO_ZBB);
75 #ifdef RISCV_HWPROBE_EXT_ZICOND
76             info |= pair.value & RISCV_HWPROBE_EXT_ZICOND ? CPUINFO_ZICOND : 0;
77             left &= ~CPUINFO_ZICOND;
78 #endif
79             /* For rv64, V is Zve64d, a superset of Zve64x. */
80             info |= pair.value & RISCV_HWPROBE_IMA_V ? CPUINFO_ZVE64X : 0;
81 #ifdef RISCV_HWPROBE_EXT_ZVE64X
82             info |= pair.value & RISCV_HWPROBE_EXT_ZVE64X ? CPUINFO_ZVE64X : 0;
83 #endif
84         }
85     }
86 #endif /* CONFIG_ASM_HWPROBE_H */
87 
88     /*
89      * We only detect support for vectors with hwprobe.  All kernels with
90      * support for vectors in userspace also support the hwprobe syscall.
91      */
92     left &= ~CPUINFO_ZVE64X;
93 
94     if (left) {
95         struct sigaction sa_old, sa_new;
96 
97         memset(&sa_new, 0, sizeof(sa_new));
98         sa_new.sa_flags = SA_SIGINFO;
99         sa_new.sa_sigaction = sigill_handler;
100         sigaction(SIGILL, &sa_new, &sa_old);
101 
102         if (left & CPUINFO_ZBA) {
103             /* Probe for Zba: add.uw zero,zero,zero. */
104             got_sigill = 0;
105             asm volatile(".insn r 0x3b, 0, 0x04, zero, zero, zero"
106                          : : : "memory");
107             info |= got_sigill ? 0 : CPUINFO_ZBA;
108             left &= ~CPUINFO_ZBA;
109         }
110 
111         if (left & CPUINFO_ZBB) {
112             /* Probe for Zbb: andn zero,zero,zero. */
113             got_sigill = 0;
114             asm volatile(".insn r 0x33, 7, 0x20, zero, zero, zero"
115                          : : : "memory");
116             info |= got_sigill ? 0 : CPUINFO_ZBB;
117             left &= ~CPUINFO_ZBB;
118         }
119 
120         if (left & CPUINFO_ZICOND) {
121             /* Probe for Zicond: czero.eqz zero,zero,zero. */
122             got_sigill = 0;
123             asm volatile(".insn r 0x33, 5, 0x07, zero, zero, zero"
124                          : : : "memory");
125             info |= got_sigill ? 0 : CPUINFO_ZICOND;
126             left &= ~CPUINFO_ZICOND;
127         }
128 
129         sigaction(SIGILL, &sa_old, NULL);
130         assert(left == 0);
131     }
132 
133     if (info & CPUINFO_ZVE64X) {
134         /*
135          * We are guaranteed by RVV-1.0 that VLEN is a power of 2.
136          * We are guaranteed by Zve64x that VLEN >= 64, and that
137          * EEW of {8,16,32,64} are supported.
138          */
139         unsigned long vlenb;
140         /* csrr %0, vlenb */
141         asm volatile(".insn i 0x73, 0x2, %0, zero, -990" : "=r"(vlenb));
142         assert(vlenb >= 8);
143         assert(is_power_of_2(vlenb));
144         /* Cache VLEN in a convenient form. */
145         riscv_lg2_vlenb = ctz32(vlenb);
146     }
147 
148     info |= CPUINFO_ALWAYS;
149     cpuinfo = info;
150     return info;
151 }
152