1e98b1085SAnup Patel // SPDX-License-Identifier: GPL-2.0
2e98b1085SAnup Patel /*
3e98b1085SAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4e98b1085SAnup Patel * Copyright (C) 2023 Ventana Micro Systems Inc.
5e98b1085SAnup Patel *
6e98b1085SAnup Patel * Authors:
7e98b1085SAnup Patel * Anup Patel <apatel@ventanamicro.com>
8e98b1085SAnup Patel */
9e98b1085SAnup Patel
10e98b1085SAnup Patel #include <linux/bitops.h>
11e98b1085SAnup Patel #include <linux/errno.h>
12e98b1085SAnup Patel #include <linux/err.h>
13e98b1085SAnup Patel #include <linux/uaccess.h>
14e98b1085SAnup Patel #include <linux/kvm_host.h>
15e98b1085SAnup Patel #include <asm/cacheflush.h>
16e98b1085SAnup Patel #include <asm/hwcap.h>
17e98b1085SAnup Patel #include <asm/kvm_vcpu_vector.h>
18e98b1085SAnup Patel #include <asm/vector.h>
19e98b1085SAnup Patel
20e98b1085SAnup Patel #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
21e98b1085SAnup Patel
22e98b1085SAnup Patel #define KVM_ISA_EXT_ARR(ext) \
23e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24e98b1085SAnup Patel
25e98b1085SAnup Patel /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26e98b1085SAnup Patel static const unsigned long kvm_isa_ext_arr[] = {
27d2064d4aSAnup Patel /* Single letter extensions (alphabetically sorted) */
28e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35e98b1085SAnup Patel [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36d2064d4aSAnup Patel /* Multi letter extensions (alphabetically sorted) */
37e98b1085SAnup Patel KVM_ISA_EXT_ARR(SSAIA),
38e98b1085SAnup Patel KVM_ISA_EXT_ARR(SSTC),
39e98b1085SAnup Patel KVM_ISA_EXT_ARR(SVINVAL),
40e98b1085SAnup Patel KVM_ISA_EXT_ARR(SVNAPOT),
41e98b1085SAnup Patel KVM_ISA_EXT_ARR(SVPBMT),
4241716861SAnup Patel KVM_ISA_EXT_ARR(ZBA),
43e98b1085SAnup Patel KVM_ISA_EXT_ARR(ZBB),
4441716861SAnup Patel KVM_ISA_EXT_ARR(ZBS),
45d2064d4aSAnup Patel KVM_ISA_EXT_ARR(ZICBOM),
46d2064d4aSAnup Patel KVM_ISA_EXT_ARR(ZICBOZ),
47043cba06SAnup Patel KVM_ISA_EXT_ARR(ZICNTR),
48043cba06SAnup Patel KVM_ISA_EXT_ARR(ZICSR),
49043cba06SAnup Patel KVM_ISA_EXT_ARR(ZIFENCEI),
50e98b1085SAnup Patel KVM_ISA_EXT_ARR(ZIHINTPAUSE),
51043cba06SAnup Patel KVM_ISA_EXT_ARR(ZIHPM),
52e98b1085SAnup Patel };
53e98b1085SAnup Patel
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)54e98b1085SAnup Patel static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
55e98b1085SAnup Patel {
56e98b1085SAnup Patel unsigned long i;
57e98b1085SAnup Patel
58e98b1085SAnup Patel for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
59e98b1085SAnup Patel if (kvm_isa_ext_arr[i] == base_ext)
60e98b1085SAnup Patel return i;
61e98b1085SAnup Patel }
62e98b1085SAnup Patel
63e98b1085SAnup Patel return KVM_RISCV_ISA_EXT_MAX;
64e98b1085SAnup Patel }
65e98b1085SAnup Patel
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)66e98b1085SAnup Patel static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
67e98b1085SAnup Patel {
68e98b1085SAnup Patel switch (ext) {
69e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_H:
70e98b1085SAnup Patel return false;
71e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_V:
72e98b1085SAnup Patel return riscv_v_vstate_ctrl_user_allowed();
73e98b1085SAnup Patel default:
74e98b1085SAnup Patel break;
75e98b1085SAnup Patel }
76e98b1085SAnup Patel
77e98b1085SAnup Patel return true;
78e98b1085SAnup Patel }
79e98b1085SAnup Patel
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)80e98b1085SAnup Patel static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
81e98b1085SAnup Patel {
82e98b1085SAnup Patel switch (ext) {
83e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_A:
84e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_C:
85e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_I:
86e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_M:
87e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_SSAIA:
88e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_SSTC:
89e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_SVINVAL:
90e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_SVNAPOT:
91d2064d4aSAnup Patel case KVM_RISCV_ISA_EXT_ZBA:
92d2064d4aSAnup Patel case KVM_RISCV_ISA_EXT_ZBB:
93d2064d4aSAnup Patel case KVM_RISCV_ISA_EXT_ZBS:
94043cba06SAnup Patel case KVM_RISCV_ISA_EXT_ZICNTR:
95043cba06SAnup Patel case KVM_RISCV_ISA_EXT_ZICSR:
96043cba06SAnup Patel case KVM_RISCV_ISA_EXT_ZIFENCEI:
97e98b1085SAnup Patel case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
98043cba06SAnup Patel case KVM_RISCV_ISA_EXT_ZIHPM:
99e98b1085SAnup Patel return false;
100e98b1085SAnup Patel default:
101e98b1085SAnup Patel break;
102e98b1085SAnup Patel }
103e98b1085SAnup Patel
104e98b1085SAnup Patel return true;
105e98b1085SAnup Patel }
106e98b1085SAnup Patel
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)107e98b1085SAnup Patel void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
108e98b1085SAnup Patel {
109e98b1085SAnup Patel unsigned long host_isa, i;
110e98b1085SAnup Patel
111e98b1085SAnup Patel for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
112e98b1085SAnup Patel host_isa = kvm_isa_ext_arr[i];
113e98b1085SAnup Patel if (__riscv_isa_extension_available(NULL, host_isa) &&
114e98b1085SAnup Patel kvm_riscv_vcpu_isa_enable_allowed(i))
115e98b1085SAnup Patel set_bit(host_isa, vcpu->arch.isa);
116e98b1085SAnup Patel }
117e98b1085SAnup Patel }
118e98b1085SAnup Patel
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)119e98b1085SAnup Patel static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
120e98b1085SAnup Patel const struct kvm_one_reg *reg)
121e98b1085SAnup Patel {
122e98b1085SAnup Patel unsigned long __user *uaddr =
123e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
124e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
125e98b1085SAnup Patel KVM_REG_SIZE_MASK |
126e98b1085SAnup Patel KVM_REG_RISCV_CONFIG);
127e98b1085SAnup Patel unsigned long reg_val;
128e98b1085SAnup Patel
129e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
130e98b1085SAnup Patel return -EINVAL;
131e98b1085SAnup Patel
132e98b1085SAnup Patel switch (reg_num) {
133e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(isa):
134e98b1085SAnup Patel reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
135e98b1085SAnup Patel break;
136e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
137e98b1085SAnup Patel if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
138a044ef71SDaniel Henrique Barboza return -ENOENT;
139e98b1085SAnup Patel reg_val = riscv_cbom_block_size;
140e98b1085SAnup Patel break;
141e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
142e98b1085SAnup Patel if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
143a044ef71SDaniel Henrique Barboza return -ENOENT;
144e98b1085SAnup Patel reg_val = riscv_cboz_block_size;
145e98b1085SAnup Patel break;
146e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(mvendorid):
147e98b1085SAnup Patel reg_val = vcpu->arch.mvendorid;
148e98b1085SAnup Patel break;
149e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(marchid):
150e98b1085SAnup Patel reg_val = vcpu->arch.marchid;
151e98b1085SAnup Patel break;
152e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(mimpid):
153e98b1085SAnup Patel reg_val = vcpu->arch.mimpid;
154e98b1085SAnup Patel break;
1552776421eSDaniel Henrique Barboza case KVM_REG_RISCV_CONFIG_REG(satp_mode):
1562776421eSDaniel Henrique Barboza reg_val = satp_mode >> SATP_MODE_SHIFT;
1572776421eSDaniel Henrique Barboza break;
158e98b1085SAnup Patel default:
1592a88f38cSDaniel Henrique Barboza return -ENOENT;
160e98b1085SAnup Patel }
161e98b1085SAnup Patel
162e98b1085SAnup Patel if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
163e98b1085SAnup Patel return -EFAULT;
164e98b1085SAnup Patel
165e98b1085SAnup Patel return 0;
166e98b1085SAnup Patel }
167e98b1085SAnup Patel
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)168e98b1085SAnup Patel static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
169e98b1085SAnup Patel const struct kvm_one_reg *reg)
170e98b1085SAnup Patel {
171e98b1085SAnup Patel unsigned long __user *uaddr =
172e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
173e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
174e98b1085SAnup Patel KVM_REG_SIZE_MASK |
175e98b1085SAnup Patel KVM_REG_RISCV_CONFIG);
176e98b1085SAnup Patel unsigned long i, isa_ext, reg_val;
177e98b1085SAnup Patel
178e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
179e98b1085SAnup Patel return -EINVAL;
180e98b1085SAnup Patel
181e98b1085SAnup Patel if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
182e98b1085SAnup Patel return -EFAULT;
183e98b1085SAnup Patel
184e98b1085SAnup Patel switch (reg_num) {
185e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(isa):
186e98b1085SAnup Patel /*
187e98b1085SAnup Patel * This ONE REG interface is only defined for
188e98b1085SAnup Patel * single letter extensions.
189e98b1085SAnup Patel */
190e98b1085SAnup Patel if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
191e98b1085SAnup Patel return -EINVAL;
192e98b1085SAnup Patel
193bea8d237SDaniel Henrique Barboza /*
194bea8d237SDaniel Henrique Barboza * Return early (i.e. do nothing) if reg_val is the same
195bea8d237SDaniel Henrique Barboza * value retrievable via kvm_riscv_vcpu_get_reg_config().
196bea8d237SDaniel Henrique Barboza */
197bea8d237SDaniel Henrique Barboza if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
198bea8d237SDaniel Henrique Barboza break;
199bea8d237SDaniel Henrique Barboza
200e98b1085SAnup Patel if (!vcpu->arch.ran_atleast_once) {
201e98b1085SAnup Patel /* Ignore the enable/disable request for certain extensions */
202e98b1085SAnup Patel for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
203e98b1085SAnup Patel isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
204e98b1085SAnup Patel if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
205e98b1085SAnup Patel reg_val &= ~BIT(i);
206e98b1085SAnup Patel continue;
207e98b1085SAnup Patel }
208e98b1085SAnup Patel if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
209e98b1085SAnup Patel if (reg_val & BIT(i))
210e98b1085SAnup Patel reg_val &= ~BIT(i);
211e98b1085SAnup Patel if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
212e98b1085SAnup Patel if (!(reg_val & BIT(i)))
213e98b1085SAnup Patel reg_val |= BIT(i);
214e98b1085SAnup Patel }
215e98b1085SAnup Patel reg_val &= riscv_isa_extension_base(NULL);
216e98b1085SAnup Patel /* Do not modify anything beyond single letter extensions */
217e98b1085SAnup Patel reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
218e98b1085SAnup Patel (reg_val & KVM_RISCV_BASE_ISA_MASK);
219e98b1085SAnup Patel vcpu->arch.isa[0] = reg_val;
220e98b1085SAnup Patel kvm_riscv_vcpu_fp_reset(vcpu);
221e98b1085SAnup Patel } else {
222d57304bbSDaniel Henrique Barboza return -EBUSY;
223e98b1085SAnup Patel }
224e98b1085SAnup Patel break;
225e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
226e29f5791SDaniel Henrique Barboza if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
227e29f5791SDaniel Henrique Barboza return -ENOENT;
228e29f5791SDaniel Henrique Barboza if (reg_val != riscv_cbom_block_size)
229e29f5791SDaniel Henrique Barboza return -EINVAL;
230e29f5791SDaniel Henrique Barboza break;
231e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
232e29f5791SDaniel Henrique Barboza if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
233e29f5791SDaniel Henrique Barboza return -ENOENT;
234e29f5791SDaniel Henrique Barboza if (reg_val != riscv_cboz_block_size)
235e29f5791SDaniel Henrique Barboza return -EINVAL;
236e29f5791SDaniel Henrique Barboza break;
237e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(mvendorid):
23863bd6606SDaniel Henrique Barboza if (reg_val == vcpu->arch.mvendorid)
23963bd6606SDaniel Henrique Barboza break;
240e98b1085SAnup Patel if (!vcpu->arch.ran_atleast_once)
241e98b1085SAnup Patel vcpu->arch.mvendorid = reg_val;
242e98b1085SAnup Patel else
243e98b1085SAnup Patel return -EBUSY;
244e98b1085SAnup Patel break;
245e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(marchid):
24663bd6606SDaniel Henrique Barboza if (reg_val == vcpu->arch.marchid)
24763bd6606SDaniel Henrique Barboza break;
248e98b1085SAnup Patel if (!vcpu->arch.ran_atleast_once)
249e98b1085SAnup Patel vcpu->arch.marchid = reg_val;
250e98b1085SAnup Patel else
251e98b1085SAnup Patel return -EBUSY;
252e98b1085SAnup Patel break;
253e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG_REG(mimpid):
25463bd6606SDaniel Henrique Barboza if (reg_val == vcpu->arch.mimpid)
25563bd6606SDaniel Henrique Barboza break;
256e98b1085SAnup Patel if (!vcpu->arch.ran_atleast_once)
257e98b1085SAnup Patel vcpu->arch.mimpid = reg_val;
258e98b1085SAnup Patel else
259e98b1085SAnup Patel return -EBUSY;
260e98b1085SAnup Patel break;
2612776421eSDaniel Henrique Barboza case KVM_REG_RISCV_CONFIG_REG(satp_mode):
2622776421eSDaniel Henrique Barboza if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
2632776421eSDaniel Henrique Barboza return -EINVAL;
2642776421eSDaniel Henrique Barboza break;
265e98b1085SAnup Patel default:
2662a88f38cSDaniel Henrique Barboza return -ENOENT;
267e98b1085SAnup Patel }
268e98b1085SAnup Patel
269e98b1085SAnup Patel return 0;
270e98b1085SAnup Patel }
271e98b1085SAnup Patel
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)272e98b1085SAnup Patel static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
273e98b1085SAnup Patel const struct kvm_one_reg *reg)
274e98b1085SAnup Patel {
275e98b1085SAnup Patel struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
276e98b1085SAnup Patel unsigned long __user *uaddr =
277e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
278e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
279e98b1085SAnup Patel KVM_REG_SIZE_MASK |
280e98b1085SAnup Patel KVM_REG_RISCV_CORE);
281e98b1085SAnup Patel unsigned long reg_val;
282e98b1085SAnup Patel
283e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
284e98b1085SAnup Patel return -EINVAL;
285e98b1085SAnup Patel if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
2862a88f38cSDaniel Henrique Barboza return -ENOENT;
287e98b1085SAnup Patel
288e98b1085SAnup Patel if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
289e98b1085SAnup Patel reg_val = cntx->sepc;
290e98b1085SAnup Patel else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
291e98b1085SAnup Patel reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
292e98b1085SAnup Patel reg_val = ((unsigned long *)cntx)[reg_num];
293e98b1085SAnup Patel else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
294e98b1085SAnup Patel reg_val = (cntx->sstatus & SR_SPP) ?
295e98b1085SAnup Patel KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
296e98b1085SAnup Patel else
2972a88f38cSDaniel Henrique Barboza return -ENOENT;
298e98b1085SAnup Patel
299e98b1085SAnup Patel if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
300e98b1085SAnup Patel return -EFAULT;
301e98b1085SAnup Patel
302e98b1085SAnup Patel return 0;
303e98b1085SAnup Patel }
304e98b1085SAnup Patel
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)305e98b1085SAnup Patel static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
306e98b1085SAnup Patel const struct kvm_one_reg *reg)
307e98b1085SAnup Patel {
308e98b1085SAnup Patel struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
309e98b1085SAnup Patel unsigned long __user *uaddr =
310e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
311e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
312e98b1085SAnup Patel KVM_REG_SIZE_MASK |
313e98b1085SAnup Patel KVM_REG_RISCV_CORE);
314e98b1085SAnup Patel unsigned long reg_val;
315e98b1085SAnup Patel
316e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
317e98b1085SAnup Patel return -EINVAL;
318e98b1085SAnup Patel if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
3192a88f38cSDaniel Henrique Barboza return -ENOENT;
320e98b1085SAnup Patel
321e98b1085SAnup Patel if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
322e98b1085SAnup Patel return -EFAULT;
323e98b1085SAnup Patel
324e98b1085SAnup Patel if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
325e98b1085SAnup Patel cntx->sepc = reg_val;
326e98b1085SAnup Patel else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
327e98b1085SAnup Patel reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
328e98b1085SAnup Patel ((unsigned long *)cntx)[reg_num] = reg_val;
329e98b1085SAnup Patel else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
330e98b1085SAnup Patel if (reg_val == KVM_RISCV_MODE_S)
331e98b1085SAnup Patel cntx->sstatus |= SR_SPP;
332e98b1085SAnup Patel else
333e98b1085SAnup Patel cntx->sstatus &= ~SR_SPP;
334e98b1085SAnup Patel } else
3352a88f38cSDaniel Henrique Barboza return -ENOENT;
336e98b1085SAnup Patel
337e98b1085SAnup Patel return 0;
338e98b1085SAnup Patel }
339e98b1085SAnup Patel
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)340e98b1085SAnup Patel static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
341e98b1085SAnup Patel unsigned long reg_num,
342e98b1085SAnup Patel unsigned long *out_val)
343e98b1085SAnup Patel {
344e98b1085SAnup Patel struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
345e98b1085SAnup Patel
346e98b1085SAnup Patel if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
3472a88f38cSDaniel Henrique Barboza return -ENOENT;
348e98b1085SAnup Patel
349e98b1085SAnup Patel if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
350e98b1085SAnup Patel kvm_riscv_vcpu_flush_interrupts(vcpu);
351e98b1085SAnup Patel *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
352e98b1085SAnup Patel *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
353e98b1085SAnup Patel } else
354e98b1085SAnup Patel *out_val = ((unsigned long *)csr)[reg_num];
355e98b1085SAnup Patel
356e98b1085SAnup Patel return 0;
357e98b1085SAnup Patel }
358e98b1085SAnup Patel
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)359e98b1085SAnup Patel static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
360e98b1085SAnup Patel unsigned long reg_num,
361e98b1085SAnup Patel unsigned long reg_val)
362e98b1085SAnup Patel {
363e98b1085SAnup Patel struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
364e98b1085SAnup Patel
365e98b1085SAnup Patel if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
3662a88f38cSDaniel Henrique Barboza return -ENOENT;
367e98b1085SAnup Patel
368e98b1085SAnup Patel if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
369e98b1085SAnup Patel reg_val &= VSIP_VALID_MASK;
370e98b1085SAnup Patel reg_val <<= VSIP_TO_HVIP_SHIFT;
371e98b1085SAnup Patel }
372e98b1085SAnup Patel
373e98b1085SAnup Patel ((unsigned long *)csr)[reg_num] = reg_val;
374e98b1085SAnup Patel
375e98b1085SAnup Patel if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
376e98b1085SAnup Patel WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
377e98b1085SAnup Patel
378e98b1085SAnup Patel return 0;
379e98b1085SAnup Patel }
380e98b1085SAnup Patel
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)381e98b1085SAnup Patel static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
382e98b1085SAnup Patel const struct kvm_one_reg *reg)
383e98b1085SAnup Patel {
384e98b1085SAnup Patel int rc;
385e98b1085SAnup Patel unsigned long __user *uaddr =
386e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
387e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
388e98b1085SAnup Patel KVM_REG_SIZE_MASK |
389e98b1085SAnup Patel KVM_REG_RISCV_CSR);
390e98b1085SAnup Patel unsigned long reg_val, reg_subtype;
391e98b1085SAnup Patel
392e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
393e98b1085SAnup Patel return -EINVAL;
394e98b1085SAnup Patel
395e98b1085SAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
396e98b1085SAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
397e98b1085SAnup Patel switch (reg_subtype) {
398e98b1085SAnup Patel case KVM_REG_RISCV_CSR_GENERAL:
399e98b1085SAnup Patel rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
400e98b1085SAnup Patel break;
401e98b1085SAnup Patel case KVM_REG_RISCV_CSR_AIA:
402e98b1085SAnup Patel rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
403e98b1085SAnup Patel break;
404e98b1085SAnup Patel default:
4052a88f38cSDaniel Henrique Barboza rc = -ENOENT;
406e98b1085SAnup Patel break;
407e98b1085SAnup Patel }
408e98b1085SAnup Patel if (rc)
409e98b1085SAnup Patel return rc;
410e98b1085SAnup Patel
411e98b1085SAnup Patel if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
412e98b1085SAnup Patel return -EFAULT;
413e98b1085SAnup Patel
414e98b1085SAnup Patel return 0;
415e98b1085SAnup Patel }
416e98b1085SAnup Patel
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)417e98b1085SAnup Patel static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
418e98b1085SAnup Patel const struct kvm_one_reg *reg)
419e98b1085SAnup Patel {
420e98b1085SAnup Patel int rc;
421e98b1085SAnup Patel unsigned long __user *uaddr =
422e98b1085SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
423e98b1085SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
424e98b1085SAnup Patel KVM_REG_SIZE_MASK |
425e98b1085SAnup Patel KVM_REG_RISCV_CSR);
426e98b1085SAnup Patel unsigned long reg_val, reg_subtype;
427e98b1085SAnup Patel
428e98b1085SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
429e98b1085SAnup Patel return -EINVAL;
430e98b1085SAnup Patel
431e98b1085SAnup Patel if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
432e98b1085SAnup Patel return -EFAULT;
433e98b1085SAnup Patel
434e98b1085SAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
435e98b1085SAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
436e98b1085SAnup Patel switch (reg_subtype) {
437e98b1085SAnup Patel case KVM_REG_RISCV_CSR_GENERAL:
438e98b1085SAnup Patel rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
439e98b1085SAnup Patel break;
440e98b1085SAnup Patel case KVM_REG_RISCV_CSR_AIA:
441e98b1085SAnup Patel rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
442e98b1085SAnup Patel break;
443e98b1085SAnup Patel default:
4442a88f38cSDaniel Henrique Barboza rc = -ENOENT;
445e98b1085SAnup Patel break;
446e98b1085SAnup Patel }
447e98b1085SAnup Patel if (rc)
448e98b1085SAnup Patel return rc;
449e98b1085SAnup Patel
450e98b1085SAnup Patel return 0;
451e98b1085SAnup Patel }
452e98b1085SAnup Patel
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)45361302944SAnup Patel static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
45461302944SAnup Patel unsigned long reg_num,
45561302944SAnup Patel unsigned long *reg_val)
456e98b1085SAnup Patel {
457e98b1085SAnup Patel unsigned long host_isa_ext;
458e98b1085SAnup Patel
459e98b1085SAnup Patel if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
460e98b1085SAnup Patel reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
4612a88f38cSDaniel Henrique Barboza return -ENOENT;
462e98b1085SAnup Patel
463e98b1085SAnup Patel host_isa_ext = kvm_isa_ext_arr[reg_num];
46417f71a2aSAnup Patel if (!__riscv_isa_extension_available(NULL, host_isa_ext))
46517f71a2aSAnup Patel return -ENOENT;
46617f71a2aSAnup Patel
46717f71a2aSAnup Patel *reg_val = 0;
468e98b1085SAnup Patel if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
46961302944SAnup Patel *reg_val = 1; /* Mark the given extension as available */
470e98b1085SAnup Patel
471e98b1085SAnup Patel return 0;
472e98b1085SAnup Patel }
473e98b1085SAnup Patel
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)47461302944SAnup Patel static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
47561302944SAnup Patel unsigned long reg_num,
47661302944SAnup Patel unsigned long reg_val)
477e98b1085SAnup Patel {
478e98b1085SAnup Patel unsigned long host_isa_ext;
479e98b1085SAnup Patel
480e98b1085SAnup Patel if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
481e98b1085SAnup Patel reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
4822a88f38cSDaniel Henrique Barboza return -ENOENT;
483e98b1085SAnup Patel
484e98b1085SAnup Patel host_isa_ext = kvm_isa_ext_arr[reg_num];
485e98b1085SAnup Patel if (!__riscv_isa_extension_available(NULL, host_isa_ext))
486a044ef71SDaniel Henrique Barboza return -ENOENT;
487e98b1085SAnup Patel
4881099c809SDaniel Henrique Barboza if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
4891099c809SDaniel Henrique Barboza return 0;
4901099c809SDaniel Henrique Barboza
491e98b1085SAnup Patel if (!vcpu->arch.ran_atleast_once) {
492e98b1085SAnup Patel /*
493e98b1085SAnup Patel * All multi-letter extension and a few single letter
494e98b1085SAnup Patel * extension can be disabled
495e98b1085SAnup Patel */
496e98b1085SAnup Patel if (reg_val == 1 &&
497e98b1085SAnup Patel kvm_riscv_vcpu_isa_enable_allowed(reg_num))
498e98b1085SAnup Patel set_bit(host_isa_ext, vcpu->arch.isa);
499e98b1085SAnup Patel else if (!reg_val &&
500e98b1085SAnup Patel kvm_riscv_vcpu_isa_disable_allowed(reg_num))
501e98b1085SAnup Patel clear_bit(host_isa_ext, vcpu->arch.isa);
502e98b1085SAnup Patel else
503e98b1085SAnup Patel return -EINVAL;
504e98b1085SAnup Patel kvm_riscv_vcpu_fp_reset(vcpu);
505e98b1085SAnup Patel } else {
506d57304bbSDaniel Henrique Barboza return -EBUSY;
507e98b1085SAnup Patel }
508e98b1085SAnup Patel
509e98b1085SAnup Patel return 0;
510e98b1085SAnup Patel }
511e98b1085SAnup Patel
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)51261302944SAnup Patel static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
51361302944SAnup Patel unsigned long reg_num,
51461302944SAnup Patel unsigned long *reg_val)
51561302944SAnup Patel {
51661302944SAnup Patel unsigned long i, ext_id, ext_val;
51761302944SAnup Patel
51861302944SAnup Patel if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
5192a88f38cSDaniel Henrique Barboza return -ENOENT;
52061302944SAnup Patel
52161302944SAnup Patel for (i = 0; i < BITS_PER_LONG; i++) {
52261302944SAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
52361302944SAnup Patel if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
52461302944SAnup Patel break;
52561302944SAnup Patel
52661302944SAnup Patel ext_val = 0;
52761302944SAnup Patel riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
52861302944SAnup Patel if (ext_val)
52961302944SAnup Patel *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
53061302944SAnup Patel }
53161302944SAnup Patel
53261302944SAnup Patel return 0;
53361302944SAnup Patel }
53461302944SAnup Patel
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)53561302944SAnup Patel static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
53661302944SAnup Patel unsigned long reg_num,
53761302944SAnup Patel unsigned long reg_val, bool enable)
53861302944SAnup Patel {
53961302944SAnup Patel unsigned long i, ext_id;
54061302944SAnup Patel
54161302944SAnup Patel if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
5422a88f38cSDaniel Henrique Barboza return -ENOENT;
54361302944SAnup Patel
54461302944SAnup Patel for_each_set_bit(i, ®_val, BITS_PER_LONG) {
54561302944SAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
54661302944SAnup Patel if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
54761302944SAnup Patel break;
54861302944SAnup Patel
54961302944SAnup Patel riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
55061302944SAnup Patel }
55161302944SAnup Patel
55261302944SAnup Patel return 0;
55361302944SAnup Patel }
55461302944SAnup Patel
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)55561302944SAnup Patel static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
55661302944SAnup Patel const struct kvm_one_reg *reg)
55761302944SAnup Patel {
55861302944SAnup Patel int rc;
55961302944SAnup Patel unsigned long __user *uaddr =
56061302944SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
56161302944SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
56261302944SAnup Patel KVM_REG_SIZE_MASK |
56361302944SAnup Patel KVM_REG_RISCV_ISA_EXT);
56461302944SAnup Patel unsigned long reg_val, reg_subtype;
56561302944SAnup Patel
56661302944SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
56761302944SAnup Patel return -EINVAL;
56861302944SAnup Patel
56961302944SAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
57061302944SAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
57161302944SAnup Patel
57261302944SAnup Patel reg_val = 0;
57361302944SAnup Patel switch (reg_subtype) {
57461302944SAnup Patel case KVM_REG_RISCV_ISA_SINGLE:
57561302944SAnup Patel rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
57661302944SAnup Patel break;
57761302944SAnup Patel case KVM_REG_RISCV_ISA_MULTI_EN:
57861302944SAnup Patel case KVM_REG_RISCV_ISA_MULTI_DIS:
57961302944SAnup Patel rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
58061302944SAnup Patel if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
58161302944SAnup Patel reg_val = ~reg_val;
58261302944SAnup Patel break;
58361302944SAnup Patel default:
5842a88f38cSDaniel Henrique Barboza rc = -ENOENT;
58561302944SAnup Patel }
58661302944SAnup Patel if (rc)
58761302944SAnup Patel return rc;
58861302944SAnup Patel
58961302944SAnup Patel if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
59061302944SAnup Patel return -EFAULT;
59161302944SAnup Patel
59261302944SAnup Patel return 0;
59361302944SAnup Patel }
59461302944SAnup Patel
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)59561302944SAnup Patel static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
59661302944SAnup Patel const struct kvm_one_reg *reg)
59761302944SAnup Patel {
59861302944SAnup Patel unsigned long __user *uaddr =
59961302944SAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
60061302944SAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
60161302944SAnup Patel KVM_REG_SIZE_MASK |
60261302944SAnup Patel KVM_REG_RISCV_ISA_EXT);
60361302944SAnup Patel unsigned long reg_val, reg_subtype;
60461302944SAnup Patel
60561302944SAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
60661302944SAnup Patel return -EINVAL;
60761302944SAnup Patel
60861302944SAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
60961302944SAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
61061302944SAnup Patel
61161302944SAnup Patel if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
61261302944SAnup Patel return -EFAULT;
61361302944SAnup Patel
61461302944SAnup Patel switch (reg_subtype) {
61561302944SAnup Patel case KVM_REG_RISCV_ISA_SINGLE:
61661302944SAnup Patel return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
617*909dc098SQuan Zhou case KVM_REG_RISCV_ISA_MULTI_EN:
61861302944SAnup Patel return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
619*909dc098SQuan Zhou case KVM_REG_RISCV_ISA_MULTI_DIS:
62061302944SAnup Patel return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
62161302944SAnup Patel default:
6222a88f38cSDaniel Henrique Barboza return -ENOENT;
62361302944SAnup Patel }
62461302944SAnup Patel
62561302944SAnup Patel return 0;
62661302944SAnup Patel }
62761302944SAnup Patel
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)628031f9efaSHaibo Xu static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
629031f9efaSHaibo Xu u64 __user *uindices)
630031f9efaSHaibo Xu {
631031f9efaSHaibo Xu int n = 0;
632031f9efaSHaibo Xu
633031f9efaSHaibo Xu for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
634031f9efaSHaibo Xu i++) {
635031f9efaSHaibo Xu u64 size;
636031f9efaSHaibo Xu u64 reg;
637031f9efaSHaibo Xu
638031f9efaSHaibo Xu /*
639031f9efaSHaibo Xu * Avoid reporting config reg if the corresponding extension
640031f9efaSHaibo Xu * was not available.
641031f9efaSHaibo Xu */
642031f9efaSHaibo Xu if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
643031f9efaSHaibo Xu !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
644031f9efaSHaibo Xu continue;
645031f9efaSHaibo Xu else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
646031f9efaSHaibo Xu !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
647031f9efaSHaibo Xu continue;
648031f9efaSHaibo Xu
649031f9efaSHaibo Xu size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
650031f9efaSHaibo Xu reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
651031f9efaSHaibo Xu
652031f9efaSHaibo Xu if (uindices) {
653031f9efaSHaibo Xu if (put_user(reg, uindices))
654031f9efaSHaibo Xu return -EFAULT;
655031f9efaSHaibo Xu uindices++;
656031f9efaSHaibo Xu }
657031f9efaSHaibo Xu
658031f9efaSHaibo Xu n++;
659031f9efaSHaibo Xu }
660031f9efaSHaibo Xu
661031f9efaSHaibo Xu return n;
662031f9efaSHaibo Xu }
663031f9efaSHaibo Xu
num_config_regs(const struct kvm_vcpu * vcpu)664031f9efaSHaibo Xu static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
665031f9efaSHaibo Xu {
666031f9efaSHaibo Xu return copy_config_reg_indices(vcpu, NULL);
667031f9efaSHaibo Xu }
668031f9efaSHaibo Xu
num_core_regs(void)669031f9efaSHaibo Xu static inline unsigned long num_core_regs(void)
670031f9efaSHaibo Xu {
671031f9efaSHaibo Xu return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
672031f9efaSHaibo Xu }
673031f9efaSHaibo Xu
copy_core_reg_indices(u64 __user * uindices)674031f9efaSHaibo Xu static int copy_core_reg_indices(u64 __user *uindices)
675031f9efaSHaibo Xu {
676031f9efaSHaibo Xu int n = num_core_regs();
677031f9efaSHaibo Xu
678031f9efaSHaibo Xu for (int i = 0; i < n; i++) {
679031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
680031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
681031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
682031f9efaSHaibo Xu
683031f9efaSHaibo Xu if (uindices) {
684031f9efaSHaibo Xu if (put_user(reg, uindices))
685031f9efaSHaibo Xu return -EFAULT;
686031f9efaSHaibo Xu uindices++;
687031f9efaSHaibo Xu }
688031f9efaSHaibo Xu }
689031f9efaSHaibo Xu
690031f9efaSHaibo Xu return n;
691031f9efaSHaibo Xu }
692031f9efaSHaibo Xu
num_csr_regs(const struct kvm_vcpu * vcpu)693031f9efaSHaibo Xu static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
694031f9efaSHaibo Xu {
695031f9efaSHaibo Xu unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
696031f9efaSHaibo Xu
697031f9efaSHaibo Xu if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
698031f9efaSHaibo Xu n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
699031f9efaSHaibo Xu
700031f9efaSHaibo Xu return n;
701031f9efaSHaibo Xu }
702031f9efaSHaibo Xu
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)703031f9efaSHaibo Xu static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
704031f9efaSHaibo Xu u64 __user *uindices)
705031f9efaSHaibo Xu {
706031f9efaSHaibo Xu int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
707031f9efaSHaibo Xu int n2 = 0;
708031f9efaSHaibo Xu
709031f9efaSHaibo Xu /* copy general csr regs */
710031f9efaSHaibo Xu for (int i = 0; i < n1; i++) {
711031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
712031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
713031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
714031f9efaSHaibo Xu KVM_REG_RISCV_CSR_GENERAL | i;
715031f9efaSHaibo Xu
716031f9efaSHaibo Xu if (uindices) {
717031f9efaSHaibo Xu if (put_user(reg, uindices))
718031f9efaSHaibo Xu return -EFAULT;
719031f9efaSHaibo Xu uindices++;
720031f9efaSHaibo Xu }
721031f9efaSHaibo Xu }
722031f9efaSHaibo Xu
723031f9efaSHaibo Xu /* copy AIA csr regs */
724031f9efaSHaibo Xu if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
725031f9efaSHaibo Xu n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
726031f9efaSHaibo Xu
727031f9efaSHaibo Xu for (int i = 0; i < n2; i++) {
728031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
729031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
730031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
731031f9efaSHaibo Xu KVM_REG_RISCV_CSR_AIA | i;
732031f9efaSHaibo Xu
733031f9efaSHaibo Xu if (uindices) {
734031f9efaSHaibo Xu if (put_user(reg, uindices))
735031f9efaSHaibo Xu return -EFAULT;
736031f9efaSHaibo Xu uindices++;
737031f9efaSHaibo Xu }
738031f9efaSHaibo Xu }
739031f9efaSHaibo Xu }
740031f9efaSHaibo Xu
741031f9efaSHaibo Xu return n1 + n2;
742031f9efaSHaibo Xu }
743031f9efaSHaibo Xu
num_timer_regs(void)744031f9efaSHaibo Xu static inline unsigned long num_timer_regs(void)
745031f9efaSHaibo Xu {
746031f9efaSHaibo Xu return sizeof(struct kvm_riscv_timer) / sizeof(u64);
747031f9efaSHaibo Xu }
748031f9efaSHaibo Xu
copy_timer_reg_indices(u64 __user * uindices)749031f9efaSHaibo Xu static int copy_timer_reg_indices(u64 __user *uindices)
750031f9efaSHaibo Xu {
751031f9efaSHaibo Xu int n = num_timer_regs();
752031f9efaSHaibo Xu
753031f9efaSHaibo Xu for (int i = 0; i < n; i++) {
754031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
755031f9efaSHaibo Xu KVM_REG_RISCV_TIMER | i;
756031f9efaSHaibo Xu
757031f9efaSHaibo Xu if (uindices) {
758031f9efaSHaibo Xu if (put_user(reg, uindices))
759031f9efaSHaibo Xu return -EFAULT;
760031f9efaSHaibo Xu uindices++;
761031f9efaSHaibo Xu }
762031f9efaSHaibo Xu }
763031f9efaSHaibo Xu
764031f9efaSHaibo Xu return n;
765031f9efaSHaibo Xu }
766031f9efaSHaibo Xu
num_fp_f_regs(const struct kvm_vcpu * vcpu)767031f9efaSHaibo Xu static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
768031f9efaSHaibo Xu {
769031f9efaSHaibo Xu const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
770031f9efaSHaibo Xu
771031f9efaSHaibo Xu if (riscv_isa_extension_available(vcpu->arch.isa, f))
772031f9efaSHaibo Xu return sizeof(cntx->fp.f) / sizeof(u32);
773031f9efaSHaibo Xu else
774031f9efaSHaibo Xu return 0;
775031f9efaSHaibo Xu }
776031f9efaSHaibo Xu
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)777031f9efaSHaibo Xu static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
778031f9efaSHaibo Xu u64 __user *uindices)
779031f9efaSHaibo Xu {
780031f9efaSHaibo Xu int n = num_fp_f_regs(vcpu);
781031f9efaSHaibo Xu
782031f9efaSHaibo Xu for (int i = 0; i < n; i++) {
783031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
784031f9efaSHaibo Xu KVM_REG_RISCV_FP_F | i;
785031f9efaSHaibo Xu
786031f9efaSHaibo Xu if (uindices) {
787031f9efaSHaibo Xu if (put_user(reg, uindices))
788031f9efaSHaibo Xu return -EFAULT;
789031f9efaSHaibo Xu uindices++;
790031f9efaSHaibo Xu }
791031f9efaSHaibo Xu }
792031f9efaSHaibo Xu
793031f9efaSHaibo Xu return n;
794031f9efaSHaibo Xu }
795031f9efaSHaibo Xu
num_fp_d_regs(const struct kvm_vcpu * vcpu)796031f9efaSHaibo Xu static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
797031f9efaSHaibo Xu {
798031f9efaSHaibo Xu const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
799031f9efaSHaibo Xu
800031f9efaSHaibo Xu if (riscv_isa_extension_available(vcpu->arch.isa, d))
801031f9efaSHaibo Xu return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
802031f9efaSHaibo Xu else
803031f9efaSHaibo Xu return 0;
804031f9efaSHaibo Xu }
805031f9efaSHaibo Xu
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)806031f9efaSHaibo Xu static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
807031f9efaSHaibo Xu u64 __user *uindices)
808031f9efaSHaibo Xu {
809031f9efaSHaibo Xu int i;
810031f9efaSHaibo Xu int n = num_fp_d_regs(vcpu);
811031f9efaSHaibo Xu u64 reg;
812031f9efaSHaibo Xu
813031f9efaSHaibo Xu /* copy fp.d.f indices */
814031f9efaSHaibo Xu for (i = 0; i < n-1; i++) {
815031f9efaSHaibo Xu reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
816031f9efaSHaibo Xu KVM_REG_RISCV_FP_D | i;
817031f9efaSHaibo Xu
818031f9efaSHaibo Xu if (uindices) {
819031f9efaSHaibo Xu if (put_user(reg, uindices))
820031f9efaSHaibo Xu return -EFAULT;
821031f9efaSHaibo Xu uindices++;
822031f9efaSHaibo Xu }
823031f9efaSHaibo Xu }
824031f9efaSHaibo Xu
825031f9efaSHaibo Xu /* copy fp.d.fcsr indices */
826031f9efaSHaibo Xu reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
827031f9efaSHaibo Xu if (uindices) {
828031f9efaSHaibo Xu if (put_user(reg, uindices))
829031f9efaSHaibo Xu return -EFAULT;
830031f9efaSHaibo Xu uindices++;
831031f9efaSHaibo Xu }
832031f9efaSHaibo Xu
833031f9efaSHaibo Xu return n;
834031f9efaSHaibo Xu }
835031f9efaSHaibo Xu
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)836031f9efaSHaibo Xu static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
837031f9efaSHaibo Xu u64 __user *uindices)
838031f9efaSHaibo Xu {
839031f9efaSHaibo Xu unsigned int n = 0;
840031f9efaSHaibo Xu unsigned long isa_ext;
841031f9efaSHaibo Xu
842031f9efaSHaibo Xu for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
843031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
844031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
845031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
846031f9efaSHaibo Xu
847031f9efaSHaibo Xu isa_ext = kvm_isa_ext_arr[i];
848ef4d4836SAnup Patel if (!__riscv_isa_extension_available(NULL, isa_ext))
849031f9efaSHaibo Xu continue;
850031f9efaSHaibo Xu
851031f9efaSHaibo Xu if (uindices) {
852031f9efaSHaibo Xu if (put_user(reg, uindices))
853031f9efaSHaibo Xu return -EFAULT;
854031f9efaSHaibo Xu uindices++;
855031f9efaSHaibo Xu }
856031f9efaSHaibo Xu
857031f9efaSHaibo Xu n++;
858031f9efaSHaibo Xu }
859031f9efaSHaibo Xu
860031f9efaSHaibo Xu return n;
861031f9efaSHaibo Xu }
862031f9efaSHaibo Xu
num_isa_ext_regs(const struct kvm_vcpu * vcpu)863031f9efaSHaibo Xu static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
864031f9efaSHaibo Xu {
865031f9efaSHaibo Xu return copy_isa_ext_reg_indices(vcpu, NULL);;
866031f9efaSHaibo Xu }
867031f9efaSHaibo Xu
num_sbi_ext_regs(void)868031f9efaSHaibo Xu static inline unsigned long num_sbi_ext_regs(void)
869031f9efaSHaibo Xu {
870031f9efaSHaibo Xu /*
871031f9efaSHaibo Xu * number of KVM_REG_RISCV_SBI_SINGLE +
872031f9efaSHaibo Xu * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
873031f9efaSHaibo Xu */
874031f9efaSHaibo Xu return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
875031f9efaSHaibo Xu }
876031f9efaSHaibo Xu
copy_sbi_ext_reg_indices(u64 __user * uindices)877031f9efaSHaibo Xu static int copy_sbi_ext_reg_indices(u64 __user *uindices)
878031f9efaSHaibo Xu {
879031f9efaSHaibo Xu int n;
880031f9efaSHaibo Xu
881031f9efaSHaibo Xu /* copy KVM_REG_RISCV_SBI_SINGLE */
882031f9efaSHaibo Xu n = KVM_RISCV_SBI_EXT_MAX;
883031f9efaSHaibo Xu for (int i = 0; i < n; i++) {
884031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
885031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
886031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
887031f9efaSHaibo Xu KVM_REG_RISCV_SBI_SINGLE | i;
888031f9efaSHaibo Xu
889031f9efaSHaibo Xu if (uindices) {
890031f9efaSHaibo Xu if (put_user(reg, uindices))
891031f9efaSHaibo Xu return -EFAULT;
892031f9efaSHaibo Xu uindices++;
893031f9efaSHaibo Xu }
894031f9efaSHaibo Xu }
895031f9efaSHaibo Xu
896031f9efaSHaibo Xu /* copy KVM_REG_RISCV_SBI_MULTI */
897031f9efaSHaibo Xu n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
898031f9efaSHaibo Xu for (int i = 0; i < n; i++) {
899031f9efaSHaibo Xu u64 size = IS_ENABLED(CONFIG_32BIT) ?
900031f9efaSHaibo Xu KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
901031f9efaSHaibo Xu u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
902031f9efaSHaibo Xu KVM_REG_RISCV_SBI_MULTI_EN | i;
903031f9efaSHaibo Xu
904031f9efaSHaibo Xu if (uindices) {
905031f9efaSHaibo Xu if (put_user(reg, uindices))
906031f9efaSHaibo Xu return -EFAULT;
907031f9efaSHaibo Xu uindices++;
908031f9efaSHaibo Xu }
909031f9efaSHaibo Xu
910031f9efaSHaibo Xu reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
911031f9efaSHaibo Xu KVM_REG_RISCV_SBI_MULTI_DIS | i;
912031f9efaSHaibo Xu
913031f9efaSHaibo Xu if (uindices) {
914031f9efaSHaibo Xu if (put_user(reg, uindices))
915031f9efaSHaibo Xu return -EFAULT;
916031f9efaSHaibo Xu uindices++;
917031f9efaSHaibo Xu }
918031f9efaSHaibo Xu }
919031f9efaSHaibo Xu
920031f9efaSHaibo Xu return num_sbi_ext_regs();
921031f9efaSHaibo Xu }
922031f9efaSHaibo Xu
923031f9efaSHaibo Xu /*
924031f9efaSHaibo Xu * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
925031f9efaSHaibo Xu *
926031f9efaSHaibo Xu * This is for all registers.
927031f9efaSHaibo Xu */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)928031f9efaSHaibo Xu unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
929031f9efaSHaibo Xu {
930031f9efaSHaibo Xu unsigned long res = 0;
931031f9efaSHaibo Xu
932031f9efaSHaibo Xu res += num_config_regs(vcpu);
933031f9efaSHaibo Xu res += num_core_regs();
934031f9efaSHaibo Xu res += num_csr_regs(vcpu);
935031f9efaSHaibo Xu res += num_timer_regs();
936031f9efaSHaibo Xu res += num_fp_f_regs(vcpu);
937031f9efaSHaibo Xu res += num_fp_d_regs(vcpu);
938031f9efaSHaibo Xu res += num_isa_ext_regs(vcpu);
939031f9efaSHaibo Xu res += num_sbi_ext_regs();
940031f9efaSHaibo Xu
941031f9efaSHaibo Xu return res;
942031f9efaSHaibo Xu }
943031f9efaSHaibo Xu
944031f9efaSHaibo Xu /*
945031f9efaSHaibo Xu * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
946031f9efaSHaibo Xu */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)947031f9efaSHaibo Xu int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
948031f9efaSHaibo Xu u64 __user *uindices)
949031f9efaSHaibo Xu {
950031f9efaSHaibo Xu int ret;
951031f9efaSHaibo Xu
952031f9efaSHaibo Xu ret = copy_config_reg_indices(vcpu, uindices);
953031f9efaSHaibo Xu if (ret < 0)
954031f9efaSHaibo Xu return ret;
955031f9efaSHaibo Xu uindices += ret;
956031f9efaSHaibo Xu
957031f9efaSHaibo Xu ret = copy_core_reg_indices(uindices);
958031f9efaSHaibo Xu if (ret < 0)
959031f9efaSHaibo Xu return ret;
960031f9efaSHaibo Xu uindices += ret;
961031f9efaSHaibo Xu
962031f9efaSHaibo Xu ret = copy_csr_reg_indices(vcpu, uindices);
963031f9efaSHaibo Xu if (ret < 0)
964031f9efaSHaibo Xu return ret;
965031f9efaSHaibo Xu uindices += ret;
966031f9efaSHaibo Xu
967031f9efaSHaibo Xu ret = copy_timer_reg_indices(uindices);
968031f9efaSHaibo Xu if (ret < 0)
969031f9efaSHaibo Xu return ret;
970031f9efaSHaibo Xu uindices += ret;
971031f9efaSHaibo Xu
972031f9efaSHaibo Xu ret = copy_fp_f_reg_indices(vcpu, uindices);
973031f9efaSHaibo Xu if (ret < 0)
974031f9efaSHaibo Xu return ret;
975031f9efaSHaibo Xu uindices += ret;
976031f9efaSHaibo Xu
977031f9efaSHaibo Xu ret = copy_fp_d_reg_indices(vcpu, uindices);
978031f9efaSHaibo Xu if (ret < 0)
979031f9efaSHaibo Xu return ret;
980031f9efaSHaibo Xu uindices += ret;
981031f9efaSHaibo Xu
982031f9efaSHaibo Xu ret = copy_isa_ext_reg_indices(vcpu, uindices);
983031f9efaSHaibo Xu if (ret < 0)
984031f9efaSHaibo Xu return ret;
985031f9efaSHaibo Xu uindices += ret;
986031f9efaSHaibo Xu
987031f9efaSHaibo Xu ret = copy_sbi_ext_reg_indices(uindices);
988031f9efaSHaibo Xu if (ret < 0)
989031f9efaSHaibo Xu return ret;
990031f9efaSHaibo Xu
991031f9efaSHaibo Xu return 0;
992031f9efaSHaibo Xu }
993031f9efaSHaibo Xu
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)994e98b1085SAnup Patel int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
995e98b1085SAnup Patel const struct kvm_one_reg *reg)
996e98b1085SAnup Patel {
997e98b1085SAnup Patel switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
998e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG:
999e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1000e98b1085SAnup Patel case KVM_REG_RISCV_CORE:
1001e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1002e98b1085SAnup Patel case KVM_REG_RISCV_CSR:
1003e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1004e98b1085SAnup Patel case KVM_REG_RISCV_TIMER:
1005e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1006e98b1085SAnup Patel case KVM_REG_RISCV_FP_F:
1007e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1008e98b1085SAnup Patel KVM_REG_RISCV_FP_F);
1009e98b1085SAnup Patel case KVM_REG_RISCV_FP_D:
1010e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1011e98b1085SAnup Patel KVM_REG_RISCV_FP_D);
1012e98b1085SAnup Patel case KVM_REG_RISCV_ISA_EXT:
1013e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1014e98b1085SAnup Patel case KVM_REG_RISCV_SBI_EXT:
1015e98b1085SAnup Patel return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1016e98b1085SAnup Patel case KVM_REG_RISCV_VECTOR:
1017630b4ceeSAndrew Jones return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1018e98b1085SAnup Patel default:
1019e98b1085SAnup Patel break;
1020e98b1085SAnup Patel }
1021e98b1085SAnup Patel
10222a88f38cSDaniel Henrique Barboza return -ENOENT;
1023e98b1085SAnup Patel }
1024e98b1085SAnup Patel
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1025e98b1085SAnup Patel int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1026e98b1085SAnup Patel const struct kvm_one_reg *reg)
1027e98b1085SAnup Patel {
1028e98b1085SAnup Patel switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1029e98b1085SAnup Patel case KVM_REG_RISCV_CONFIG:
1030e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1031e98b1085SAnup Patel case KVM_REG_RISCV_CORE:
1032e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1033e98b1085SAnup Patel case KVM_REG_RISCV_CSR:
1034e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1035e98b1085SAnup Patel case KVM_REG_RISCV_TIMER:
1036e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1037e98b1085SAnup Patel case KVM_REG_RISCV_FP_F:
1038e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1039e98b1085SAnup Patel KVM_REG_RISCV_FP_F);
1040e98b1085SAnup Patel case KVM_REG_RISCV_FP_D:
1041e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1042e98b1085SAnup Patel KVM_REG_RISCV_FP_D);
1043e98b1085SAnup Patel case KVM_REG_RISCV_ISA_EXT:
1044e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1045e98b1085SAnup Patel case KVM_REG_RISCV_SBI_EXT:
1046e98b1085SAnup Patel return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1047e98b1085SAnup Patel case KVM_REG_RISCV_VECTOR:
1048630b4ceeSAndrew Jones return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1049e98b1085SAnup Patel default:
1050e98b1085SAnup Patel break;
1051e98b1085SAnup Patel }
1052e98b1085SAnup Patel
10532a88f38cSDaniel Henrique Barboza return -ENOENT;
1054e98b1085SAnup Patel }
1055