1dea8ee31SAtish Patra // SPDX-License-Identifier: GPL-2.0
20e2e6419SRandy Dunlap /*
3dea8ee31SAtish Patra * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4dea8ee31SAtish Patra *
5dea8ee31SAtish Patra * Authors:
6dea8ee31SAtish Patra * Atish Patra <atish.patra@wdc.com>
7dea8ee31SAtish Patra */
8dea8ee31SAtish Patra
9dea8ee31SAtish Patra #include <linux/errno.h>
10dea8ee31SAtish Patra #include <linux/err.h>
11dea8ee31SAtish Patra #include <linux/kvm_host.h>
12dea8ee31SAtish Patra #include <asm/sbi.h>
13cf70be9dSAtish Patra #include <asm/kvm_vcpu_sbi.h>
14dea8ee31SAtish Patra
153e5e56c6SConor Dooley #ifndef CONFIG_RISCV_SBI_V01
16a046c2d8SAtish Patra static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17a046c2d8SAtish Patra .extid_start = -1UL,
18a046c2d8SAtish Patra .extid_end = -1UL,
19a046c2d8SAtish Patra .handler = NULL,
20a046c2d8SAtish Patra };
21a046c2d8SAtish Patra #endif
225f862df5SAtish Patra
23377f71f6SBen Dooks #ifndef CONFIG_RISCV_PMU_SBI
24cbddc4c4SAtish Patra static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25cbddc4c4SAtish Patra .extid_start = -1UL,
26cbddc4c4SAtish Patra .extid_end = -1UL,
27cbddc4c4SAtish Patra .handler = NULL,
28cbddc4c4SAtish Patra };
29cbddc4c4SAtish Patra #endif
30cbddc4c4SAtish Patra
3196b3d4bdSAnup Patel struct kvm_riscv_sbi_extension_entry {
32ae328dadSAndrew Jones enum KVM_RISCV_SBI_EXT_ID ext_idx;
3396b3d4bdSAnup Patel const struct kvm_vcpu_sbi_extension *ext_ptr;
3496b3d4bdSAnup Patel };
3596b3d4bdSAnup Patel
3696b3d4bdSAnup Patel static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
3796b3d4bdSAnup Patel {
38ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_V01,
3996b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_v01,
4096b3d4bdSAnup Patel },
4196b3d4bdSAnup Patel {
42ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
4396b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_base,
4496b3d4bdSAnup Patel },
4596b3d4bdSAnup Patel {
46ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_TIME,
4796b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_time,
4896b3d4bdSAnup Patel },
4996b3d4bdSAnup Patel {
50ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_IPI,
5196b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_ipi,
5296b3d4bdSAnup Patel },
5396b3d4bdSAnup Patel {
54ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
5596b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_rfence,
5696b3d4bdSAnup Patel },
5796b3d4bdSAnup Patel {
58ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_SRST,
5996b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_srst,
6096b3d4bdSAnup Patel },
6196b3d4bdSAnup Patel {
62ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_HSM,
6396b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_hsm,
6496b3d4bdSAnup Patel },
6596b3d4bdSAnup Patel {
66ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_PMU,
6796b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_pmu,
6896b3d4bdSAnup Patel },
6996b3d4bdSAnup Patel {
70ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
7196b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_experimental,
7296b3d4bdSAnup Patel },
7396b3d4bdSAnup Patel {
74ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
7596b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_vendor,
7696b3d4bdSAnup Patel },
77a046c2d8SAtish Patra };
78a046c2d8SAtish Patra
kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu * vcpu,struct kvm_run * run)79a046c2d8SAtish Patra void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
80dea8ee31SAtish Patra {
81dea8ee31SAtish Patra struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
82dea8ee31SAtish Patra
83dea8ee31SAtish Patra vcpu->arch.sbi_context.return_handled = 0;
84dea8ee31SAtish Patra vcpu->stat.ecall_exit_stat++;
85dea8ee31SAtish Patra run->exit_reason = KVM_EXIT_RISCV_SBI;
86dea8ee31SAtish Patra run->riscv_sbi.extension_id = cp->a7;
87dea8ee31SAtish Patra run->riscv_sbi.function_id = cp->a6;
88dea8ee31SAtish Patra run->riscv_sbi.args[0] = cp->a0;
89dea8ee31SAtish Patra run->riscv_sbi.args[1] = cp->a1;
90dea8ee31SAtish Patra run->riscv_sbi.args[2] = cp->a2;
91dea8ee31SAtish Patra run->riscv_sbi.args[3] = cp->a3;
92dea8ee31SAtish Patra run->riscv_sbi.args[4] = cp->a4;
93dea8ee31SAtish Patra run->riscv_sbi.args[5] = cp->a5;
94a72a99daSAndrew Jones run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
95a72a99daSAndrew Jones run->riscv_sbi.ret[1] = 0;
96dea8ee31SAtish Patra }
97dea8ee31SAtish Patra
kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu * vcpu,struct kvm_run * run,u32 type,u64 reason)984b11d865SAnup Patel void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
994b11d865SAnup Patel struct kvm_run *run,
100d495f942SPaolo Bonzini u32 type, u64 reason)
1014b11d865SAnup Patel {
1024b11d865SAnup Patel unsigned long i;
1034b11d865SAnup Patel struct kvm_vcpu *tmp;
1044b11d865SAnup Patel
105*c1a524d7SYong-Xuan Wang kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
106*c1a524d7SYong-Xuan Wang spin_lock(&vcpu->arch.mp_state_lock);
107*c1a524d7SYong-Xuan Wang WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
108*c1a524d7SYong-Xuan Wang spin_unlock(&vcpu->arch.mp_state_lock);
109*c1a524d7SYong-Xuan Wang }
1104b11d865SAnup Patel kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
1114b11d865SAnup Patel
1124b11d865SAnup Patel memset(&run->system_event, 0, sizeof(run->system_event));
1134b11d865SAnup Patel run->system_event.type = type;
114d495f942SPaolo Bonzini run->system_event.ndata = 1;
115d495f942SPaolo Bonzini run->system_event.data[0] = reason;
1164b11d865SAnup Patel run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
1174b11d865SAnup Patel }
1184b11d865SAnup Patel
kvm_riscv_vcpu_sbi_return(struct kvm_vcpu * vcpu,struct kvm_run * run)119dea8ee31SAtish Patra int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
120dea8ee31SAtish Patra {
121dea8ee31SAtish Patra struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
122dea8ee31SAtish Patra
123dea8ee31SAtish Patra /* Handle SBI return only once */
124dea8ee31SAtish Patra if (vcpu->arch.sbi_context.return_handled)
125dea8ee31SAtish Patra return 0;
126dea8ee31SAtish Patra vcpu->arch.sbi_context.return_handled = 1;
127dea8ee31SAtish Patra
128dea8ee31SAtish Patra /* Update return values */
129dea8ee31SAtish Patra cp->a0 = run->riscv_sbi.ret[0];
130dea8ee31SAtish Patra cp->a1 = run->riscv_sbi.ret[1];
131dea8ee31SAtish Patra
132dea8ee31SAtish Patra /* Move to next instruction */
133dea8ee31SAtish Patra vcpu->arch.guest_context.sepc += 4;
134dea8ee31SAtish Patra
135dea8ee31SAtish Patra return 0;
136dea8ee31SAtish Patra }
137dea8ee31SAtish Patra
riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)13896b3d4bdSAnup Patel static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
13996b3d4bdSAnup Patel unsigned long reg_num,
14096b3d4bdSAnup Patel unsigned long reg_val)
141cf70be9dSAtish Patra {
14296b3d4bdSAnup Patel unsigned long i;
14396b3d4bdSAnup Patel const struct kvm_riscv_sbi_extension_entry *sext = NULL;
14496b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
14596b3d4bdSAnup Patel
1462a88f38cSDaniel Henrique Barboza if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
1472a88f38cSDaniel Henrique Barboza return -ENOENT;
1482a88f38cSDaniel Henrique Barboza
1492a88f38cSDaniel Henrique Barboza if (reg_val != 1 && reg_val != 0)
15096b3d4bdSAnup Patel return -EINVAL;
151cf70be9dSAtish Patra
152cf70be9dSAtish Patra for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
153ae328dadSAndrew Jones if (sbi_ext[i].ext_idx == reg_num) {
15496b3d4bdSAnup Patel sext = &sbi_ext[i];
15596b3d4bdSAnup Patel break;
15696b3d4bdSAnup Patel }
15796b3d4bdSAnup Patel }
15896b3d4bdSAnup Patel if (!sext)
15996b3d4bdSAnup Patel return -ENOENT;
16096b3d4bdSAnup Patel
16195c99104SAndrew Jones /*
16295c99104SAndrew Jones * We can't set the extension status to available here, since it may
16395c99104SAndrew Jones * have a probe() function which needs to confirm availability first,
16495c99104SAndrew Jones * but it may be too early to call that here. We can set the status to
16595c99104SAndrew Jones * unavailable, though.
16695c99104SAndrew Jones */
16795c99104SAndrew Jones if (!reg_val)
16895c99104SAndrew Jones scontext->ext_status[sext->ext_idx] =
16995c99104SAndrew Jones KVM_RISCV_SBI_EXT_UNAVAILABLE;
17096b3d4bdSAnup Patel
17196b3d4bdSAnup Patel return 0;
17296b3d4bdSAnup Patel }
17396b3d4bdSAnup Patel
riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)17496b3d4bdSAnup Patel static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
17596b3d4bdSAnup Patel unsigned long reg_num,
17696b3d4bdSAnup Patel unsigned long *reg_val)
17796b3d4bdSAnup Patel {
17896b3d4bdSAnup Patel unsigned long i;
17996b3d4bdSAnup Patel const struct kvm_riscv_sbi_extension_entry *sext = NULL;
18096b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
18196b3d4bdSAnup Patel
18296b3d4bdSAnup Patel if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
1832a88f38cSDaniel Henrique Barboza return -ENOENT;
18496b3d4bdSAnup Patel
18596b3d4bdSAnup Patel for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
186ae328dadSAndrew Jones if (sbi_ext[i].ext_idx == reg_num) {
18796b3d4bdSAnup Patel sext = &sbi_ext[i];
18896b3d4bdSAnup Patel break;
18996b3d4bdSAnup Patel }
19096b3d4bdSAnup Patel }
19196b3d4bdSAnup Patel if (!sext)
19296b3d4bdSAnup Patel return -ENOENT;
19396b3d4bdSAnup Patel
19495c99104SAndrew Jones /*
19595c99104SAndrew Jones * If the extension status is still uninitialized, then we should probe
19695c99104SAndrew Jones * to determine if it's available, but it may be too early to do that
19795c99104SAndrew Jones * here. The best we can do is report that the extension has not been
19895c99104SAndrew Jones * disabled, i.e. we return 1 when the extension is available and also
19995c99104SAndrew Jones * when it only may be available.
20095c99104SAndrew Jones */
20195c99104SAndrew Jones *reg_val = scontext->ext_status[sext->ext_idx] !=
20295c99104SAndrew Jones KVM_RISCV_SBI_EXT_UNAVAILABLE;
20396b3d4bdSAnup Patel
20496b3d4bdSAnup Patel return 0;
20596b3d4bdSAnup Patel }
20696b3d4bdSAnup Patel
riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)20796b3d4bdSAnup Patel static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
20896b3d4bdSAnup Patel unsigned long reg_num,
20996b3d4bdSAnup Patel unsigned long reg_val, bool enable)
21096b3d4bdSAnup Patel {
21196b3d4bdSAnup Patel unsigned long i, ext_id;
21296b3d4bdSAnup Patel
21396b3d4bdSAnup Patel if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
2142a88f38cSDaniel Henrique Barboza return -ENOENT;
21596b3d4bdSAnup Patel
21696b3d4bdSAnup Patel for_each_set_bit(i, ®_val, BITS_PER_LONG) {
21796b3d4bdSAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
21896b3d4bdSAnup Patel if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
21996b3d4bdSAnup Patel break;
22096b3d4bdSAnup Patel
22196b3d4bdSAnup Patel riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
22296b3d4bdSAnup Patel }
22396b3d4bdSAnup Patel
22496b3d4bdSAnup Patel return 0;
22596b3d4bdSAnup Patel }
22696b3d4bdSAnup Patel
riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)22796b3d4bdSAnup Patel static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
22896b3d4bdSAnup Patel unsigned long reg_num,
22996b3d4bdSAnup Patel unsigned long *reg_val)
23096b3d4bdSAnup Patel {
23196b3d4bdSAnup Patel unsigned long i, ext_id, ext_val;
23296b3d4bdSAnup Patel
23396b3d4bdSAnup Patel if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
2342a88f38cSDaniel Henrique Barboza return -ENOENT;
23596b3d4bdSAnup Patel
23696b3d4bdSAnup Patel for (i = 0; i < BITS_PER_LONG; i++) {
23796b3d4bdSAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
23896b3d4bdSAnup Patel if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
23996b3d4bdSAnup Patel break;
24096b3d4bdSAnup Patel
24196b3d4bdSAnup Patel ext_val = 0;
24296b3d4bdSAnup Patel riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
24396b3d4bdSAnup Patel if (ext_val)
24496b3d4bdSAnup Patel *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
24596b3d4bdSAnup Patel }
24696b3d4bdSAnup Patel
24796b3d4bdSAnup Patel return 0;
24896b3d4bdSAnup Patel }
24996b3d4bdSAnup Patel
kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)25096b3d4bdSAnup Patel int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
25196b3d4bdSAnup Patel const struct kvm_one_reg *reg)
25296b3d4bdSAnup Patel {
25396b3d4bdSAnup Patel unsigned long __user *uaddr =
25496b3d4bdSAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
25596b3d4bdSAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
25696b3d4bdSAnup Patel KVM_REG_SIZE_MASK |
25796b3d4bdSAnup Patel KVM_REG_RISCV_SBI_EXT);
25896b3d4bdSAnup Patel unsigned long reg_val, reg_subtype;
25996b3d4bdSAnup Patel
26096b3d4bdSAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
26196b3d4bdSAnup Patel return -EINVAL;
26296b3d4bdSAnup Patel
26396b3d4bdSAnup Patel if (vcpu->arch.ran_atleast_once)
26496b3d4bdSAnup Patel return -EBUSY;
26596b3d4bdSAnup Patel
26696b3d4bdSAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
26796b3d4bdSAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
26896b3d4bdSAnup Patel
26996b3d4bdSAnup Patel if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
27096b3d4bdSAnup Patel return -EFAULT;
27196b3d4bdSAnup Patel
27296b3d4bdSAnup Patel switch (reg_subtype) {
27396b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_SINGLE:
27496b3d4bdSAnup Patel return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
27596b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_MULTI_EN:
27696b3d4bdSAnup Patel return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
27796b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_MULTI_DIS:
27896b3d4bdSAnup Patel return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
27996b3d4bdSAnup Patel default:
2802a88f38cSDaniel Henrique Barboza return -ENOENT;
28196b3d4bdSAnup Patel }
28296b3d4bdSAnup Patel
28396b3d4bdSAnup Patel return 0;
28496b3d4bdSAnup Patel }
28596b3d4bdSAnup Patel
kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)28696b3d4bdSAnup Patel int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
28796b3d4bdSAnup Patel const struct kvm_one_reg *reg)
28896b3d4bdSAnup Patel {
28996b3d4bdSAnup Patel int rc;
29096b3d4bdSAnup Patel unsigned long __user *uaddr =
29196b3d4bdSAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
29296b3d4bdSAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
29396b3d4bdSAnup Patel KVM_REG_SIZE_MASK |
29496b3d4bdSAnup Patel KVM_REG_RISCV_SBI_EXT);
29596b3d4bdSAnup Patel unsigned long reg_val, reg_subtype;
29696b3d4bdSAnup Patel
29796b3d4bdSAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
29896b3d4bdSAnup Patel return -EINVAL;
29996b3d4bdSAnup Patel
30096b3d4bdSAnup Patel reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
30196b3d4bdSAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
30296b3d4bdSAnup Patel
30396b3d4bdSAnup Patel reg_val = 0;
30496b3d4bdSAnup Patel switch (reg_subtype) {
30596b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_SINGLE:
30696b3d4bdSAnup Patel rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
30796b3d4bdSAnup Patel break;
30896b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_MULTI_EN:
30996b3d4bdSAnup Patel case KVM_REG_RISCV_SBI_MULTI_DIS:
31096b3d4bdSAnup Patel rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
31196b3d4bdSAnup Patel if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
31296b3d4bdSAnup Patel reg_val = ~reg_val;
31396b3d4bdSAnup Patel break;
31496b3d4bdSAnup Patel default:
3152a88f38cSDaniel Henrique Barboza rc = -ENOENT;
31696b3d4bdSAnup Patel }
31796b3d4bdSAnup Patel if (rc)
31896b3d4bdSAnup Patel return rc;
31996b3d4bdSAnup Patel
32096b3d4bdSAnup Patel if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
32196b3d4bdSAnup Patel return -EFAULT;
32296b3d4bdSAnup Patel
32396b3d4bdSAnup Patel return 0;
32496b3d4bdSAnup Patel }
32596b3d4bdSAnup Patel
kvm_vcpu_sbi_find_ext(struct kvm_vcpu * vcpu,unsigned long extid)32696b3d4bdSAnup Patel const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
32796b3d4bdSAnup Patel struct kvm_vcpu *vcpu, unsigned long extid)
32896b3d4bdSAnup Patel {
32996b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
33095c99104SAndrew Jones const struct kvm_riscv_sbi_extension_entry *entry;
33195c99104SAndrew Jones const struct kvm_vcpu_sbi_extension *ext;
33295c99104SAndrew Jones int i;
33396b3d4bdSAnup Patel
33496b3d4bdSAnup Patel for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
33595c99104SAndrew Jones entry = &sbi_ext[i];
33695c99104SAndrew Jones ext = entry->ext_ptr;
33795c99104SAndrew Jones
33895c99104SAndrew Jones if (ext->extid_start <= extid && ext->extid_end >= extid) {
33995c99104SAndrew Jones if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
34095c99104SAndrew Jones scontext->ext_status[entry->ext_idx] ==
34195c99104SAndrew Jones KVM_RISCV_SBI_EXT_AVAILABLE)
34295c99104SAndrew Jones return ext;
34395c99104SAndrew Jones if (scontext->ext_status[entry->ext_idx] ==
3449f9e3ebeSAndrew Jones KVM_RISCV_SBI_EXT_UNAVAILABLE)
34596b3d4bdSAnup Patel return NULL;
34695c99104SAndrew Jones if (ext->probe && !ext->probe(vcpu)) {
34795c99104SAndrew Jones scontext->ext_status[entry->ext_idx] =
34895c99104SAndrew Jones KVM_RISCV_SBI_EXT_UNAVAILABLE;
34995c99104SAndrew Jones return NULL;
35095c99104SAndrew Jones }
35195c99104SAndrew Jones
35295c99104SAndrew Jones scontext->ext_status[entry->ext_idx] =
35395c99104SAndrew Jones KVM_RISCV_SBI_EXT_AVAILABLE;
35495c99104SAndrew Jones return ext;
35596b3d4bdSAnup Patel }
356cf70be9dSAtish Patra }
357cf70be9dSAtish Patra
358cf70be9dSAtish Patra return NULL;
359cf70be9dSAtish Patra }
360cf70be9dSAtish Patra
kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu * vcpu,struct kvm_run * run)361cf70be9dSAtish Patra int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
362cf70be9dSAtish Patra {
363cf70be9dSAtish Patra int ret = 1;
364cf70be9dSAtish Patra bool next_sepc = true;
365cf70be9dSAtish Patra struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
366cf70be9dSAtish Patra const struct kvm_vcpu_sbi_extension *sbi_ext;
367cf70be9dSAtish Patra struct kvm_cpu_trap utrap = {0};
368bae0dfd7SAtish Patra struct kvm_vcpu_sbi_return sbi_ret = {
369bae0dfd7SAtish Patra .out_val = 0,
370bae0dfd7SAtish Patra .err_val = 0,
371bae0dfd7SAtish Patra .utrap = &utrap,
372bae0dfd7SAtish Patra };
373cf70be9dSAtish Patra bool ext_is_v01 = false;
374cf70be9dSAtish Patra
37596b3d4bdSAnup Patel sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
376cf70be9dSAtish Patra if (sbi_ext && sbi_ext->handler) {
377a046c2d8SAtish Patra #ifdef CONFIG_RISCV_SBI_V01
378cf70be9dSAtish Patra if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
379cf70be9dSAtish Patra cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
380cf70be9dSAtish Patra ext_is_v01 = true;
381a046c2d8SAtish Patra #endif
382bae0dfd7SAtish Patra ret = sbi_ext->handler(vcpu, run, &sbi_ret);
383cf70be9dSAtish Patra } else {
384cf70be9dSAtish Patra /* Return error for unsupported SBI calls */
385cf70be9dSAtish Patra cp->a0 = SBI_ERR_NOT_SUPPORTED;
386cf70be9dSAtish Patra goto ecall_done;
387cf70be9dSAtish Patra }
388cf70be9dSAtish Patra
389bae0dfd7SAtish Patra /*
390bae0dfd7SAtish Patra * When the SBI extension returns a Linux error code, it exits the ioctl
391bae0dfd7SAtish Patra * loop and forwards the error to userspace.
392bae0dfd7SAtish Patra */
393bae0dfd7SAtish Patra if (ret < 0) {
394bae0dfd7SAtish Patra next_sepc = false;
395bae0dfd7SAtish Patra goto ecall_done;
396bae0dfd7SAtish Patra }
397bae0dfd7SAtish Patra
398cf70be9dSAtish Patra /* Handle special error cases i.e trap, exit or userspace forward */
399bae0dfd7SAtish Patra if (sbi_ret.utrap->scause) {
400cf70be9dSAtish Patra /* No need to increment sepc or exit ioctl loop */
401cf70be9dSAtish Patra ret = 1;
402bae0dfd7SAtish Patra sbi_ret.utrap->sepc = cp->sepc;
403bae0dfd7SAtish Patra kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
404cf70be9dSAtish Patra next_sepc = false;
405cf70be9dSAtish Patra goto ecall_done;
406cf70be9dSAtish Patra }
407cf70be9dSAtish Patra
408cf70be9dSAtish Patra /* Exit ioctl loop or Propagate the error code the guest */
409bae0dfd7SAtish Patra if (sbi_ret.uexit) {
410cf70be9dSAtish Patra next_sepc = false;
411cf70be9dSAtish Patra ret = 0;
412cf70be9dSAtish Patra } else {
413bae0dfd7SAtish Patra cp->a0 = sbi_ret.err_val;
414cf70be9dSAtish Patra ret = 1;
415cf70be9dSAtish Patra }
416cf70be9dSAtish Patra ecall_done:
417dea8ee31SAtish Patra if (next_sepc)
418dea8ee31SAtish Patra cp->sepc += 4;
419bae0dfd7SAtish Patra /* a1 should only be updated when we continue the ioctl loop */
420bae0dfd7SAtish Patra if (!ext_is_v01 && ret == 1)
421bae0dfd7SAtish Patra cp->a1 = sbi_ret.out_val;
422dea8ee31SAtish Patra
423dea8ee31SAtish Patra return ret;
424dea8ee31SAtish Patra }
425