189d01306SAnup Patel // SPDX-License-Identifier: GPL-2.0
289d01306SAnup Patel /*
389d01306SAnup Patel * Copyright (C) 2021 Western Digital Corporation or its affiliates.
489d01306SAnup Patel * Copyright (C) 2022 Ventana Micro Systems Inc.
589d01306SAnup Patel *
689d01306SAnup Patel * Authors:
789d01306SAnup Patel * Anup Patel <apatel@ventanamicro.com>
889d01306SAnup Patel */
989d01306SAnup Patel
1089d01306SAnup Patel #include <linux/bits.h>
1189d01306SAnup Patel #include <linux/kvm_host.h>
1289d01306SAnup Patel #include <linux/uaccess.h>
1389d01306SAnup Patel #include <asm/kvm_aia_imsic.h>
1489d01306SAnup Patel
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)1589d01306SAnup Patel static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
1689d01306SAnup Patel {
1789d01306SAnup Patel struct kvm_vcpu *tmp_vcpu;
1889d01306SAnup Patel
1989d01306SAnup Patel for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2089d01306SAnup Patel tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2189d01306SAnup Patel mutex_unlock(&tmp_vcpu->mutex);
2289d01306SAnup Patel }
2389d01306SAnup Patel }
2489d01306SAnup Patel
unlock_all_vcpus(struct kvm * kvm)2589d01306SAnup Patel static void unlock_all_vcpus(struct kvm *kvm)
2689d01306SAnup Patel {
2789d01306SAnup Patel unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
2889d01306SAnup Patel }
2989d01306SAnup Patel
lock_all_vcpus(struct kvm * kvm)3089d01306SAnup Patel static bool lock_all_vcpus(struct kvm *kvm)
3189d01306SAnup Patel {
3289d01306SAnup Patel struct kvm_vcpu *tmp_vcpu;
3389d01306SAnup Patel unsigned long c;
3489d01306SAnup Patel
3589d01306SAnup Patel kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
3689d01306SAnup Patel if (!mutex_trylock(&tmp_vcpu->mutex)) {
3789d01306SAnup Patel unlock_vcpus(kvm, c - 1);
3889d01306SAnup Patel return false;
3989d01306SAnup Patel }
4089d01306SAnup Patel }
4189d01306SAnup Patel
4289d01306SAnup Patel return true;
4389d01306SAnup Patel }
4489d01306SAnup Patel
aia_create(struct kvm_device * dev,u32 type)4589d01306SAnup Patel static int aia_create(struct kvm_device *dev, u32 type)
4689d01306SAnup Patel {
4789d01306SAnup Patel int ret;
4889d01306SAnup Patel unsigned long i;
4989d01306SAnup Patel struct kvm *kvm = dev->kvm;
5089d01306SAnup Patel struct kvm_vcpu *vcpu;
5189d01306SAnup Patel
5289d01306SAnup Patel if (irqchip_in_kernel(kvm))
5389d01306SAnup Patel return -EEXIST;
5489d01306SAnup Patel
5589d01306SAnup Patel ret = -EBUSY;
5689d01306SAnup Patel if (!lock_all_vcpus(kvm))
5789d01306SAnup Patel return ret;
5889d01306SAnup Patel
5989d01306SAnup Patel kvm_for_each_vcpu(i, vcpu, kvm) {
6089d01306SAnup Patel if (vcpu->arch.ran_atleast_once)
6189d01306SAnup Patel goto out_unlock;
6289d01306SAnup Patel }
6389d01306SAnup Patel ret = 0;
6489d01306SAnup Patel
6589d01306SAnup Patel kvm->arch.aia.in_kernel = true;
6689d01306SAnup Patel
6789d01306SAnup Patel out_unlock:
6889d01306SAnup Patel unlock_all_vcpus(kvm);
6989d01306SAnup Patel return ret;
7089d01306SAnup Patel }
7189d01306SAnup Patel
aia_destroy(struct kvm_device * dev)7289d01306SAnup Patel static void aia_destroy(struct kvm_device *dev)
7389d01306SAnup Patel {
7489d01306SAnup Patel kfree(dev);
7589d01306SAnup Patel }
7689d01306SAnup Patel
aia_config(struct kvm * kvm,unsigned long type,u32 * nr,bool write)7789d01306SAnup Patel static int aia_config(struct kvm *kvm, unsigned long type,
7889d01306SAnup Patel u32 *nr, bool write)
7989d01306SAnup Patel {
8089d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia;
8189d01306SAnup Patel
8289d01306SAnup Patel /* Writes can only be done before irqchip is initialized */
8389d01306SAnup Patel if (write && kvm_riscv_aia_initialized(kvm))
8489d01306SAnup Patel return -EBUSY;
8589d01306SAnup Patel
8689d01306SAnup Patel switch (type) {
8789d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_MODE:
8889d01306SAnup Patel if (write) {
8989d01306SAnup Patel switch (*nr) {
9089d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_EMUL:
9189d01306SAnup Patel break;
9289d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
9389d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_AUTO:
9489d01306SAnup Patel /*
9589d01306SAnup Patel * HW Acceleration and Auto modes only
9689d01306SAnup Patel * supported on host with non-zero guest
9789d01306SAnup Patel * external interrupts (i.e. non-zero
9889d01306SAnup Patel * VS-level IMSIC pages).
9989d01306SAnup Patel */
10089d01306SAnup Patel if (!kvm_riscv_aia_nr_hgei)
10189d01306SAnup Patel return -EINVAL;
10289d01306SAnup Patel break;
10389d01306SAnup Patel default:
10489d01306SAnup Patel return -EINVAL;
10507f225b5SYang Li }
10689d01306SAnup Patel aia->mode = *nr;
10789d01306SAnup Patel } else
10889d01306SAnup Patel *nr = aia->mode;
10989d01306SAnup Patel break;
11089d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_IDS:
11189d01306SAnup Patel if (write) {
11289d01306SAnup Patel if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
11389d01306SAnup Patel (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
11489d01306SAnup Patel ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
11589d01306SAnup Patel KVM_DEV_RISCV_AIA_IDS_MIN) ||
11689d01306SAnup Patel (kvm_riscv_aia_max_ids <= *nr))
11789d01306SAnup Patel return -EINVAL;
11889d01306SAnup Patel aia->nr_ids = *nr;
11989d01306SAnup Patel } else
12089d01306SAnup Patel *nr = aia->nr_ids;
12189d01306SAnup Patel break;
12289d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
12389d01306SAnup Patel if (write) {
12489d01306SAnup Patel if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
12589d01306SAnup Patel (*nr >= kvm_riscv_aia_max_ids))
12689d01306SAnup Patel return -EINVAL;
12789d01306SAnup Patel aia->nr_sources = *nr;
12889d01306SAnup Patel } else
12989d01306SAnup Patel *nr = aia->nr_sources;
13089d01306SAnup Patel break;
13189d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
13289d01306SAnup Patel if (write) {
13389d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
13489d01306SAnup Patel return -EINVAL;
13589d01306SAnup Patel aia->nr_group_bits = *nr;
13689d01306SAnup Patel } else
13789d01306SAnup Patel *nr = aia->nr_group_bits;
13889d01306SAnup Patel break;
13989d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
14089d01306SAnup Patel if (write) {
14189d01306SAnup Patel if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
14289d01306SAnup Patel (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
14389d01306SAnup Patel return -EINVAL;
14489d01306SAnup Patel aia->nr_group_shift = *nr;
14589d01306SAnup Patel } else
14689d01306SAnup Patel *nr = aia->nr_group_shift;
14789d01306SAnup Patel break;
14889d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
14989d01306SAnup Patel if (write) {
15089d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
15189d01306SAnup Patel return -EINVAL;
15289d01306SAnup Patel aia->nr_hart_bits = *nr;
15389d01306SAnup Patel } else
15489d01306SAnup Patel *nr = aia->nr_hart_bits;
15589d01306SAnup Patel break;
15689d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
15789d01306SAnup Patel if (write) {
15889d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
15989d01306SAnup Patel return -EINVAL;
16089d01306SAnup Patel aia->nr_guest_bits = *nr;
16189d01306SAnup Patel } else
16289d01306SAnup Patel *nr = aia->nr_guest_bits;
16389d01306SAnup Patel break;
16489d01306SAnup Patel default:
16589d01306SAnup Patel return -ENXIO;
16607f225b5SYang Li }
16789d01306SAnup Patel
16889d01306SAnup Patel return 0;
16989d01306SAnup Patel }
17089d01306SAnup Patel
aia_aplic_addr(struct kvm * kvm,u64 * addr,bool write)17189d01306SAnup Patel static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
17289d01306SAnup Patel {
17389d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia;
17489d01306SAnup Patel
17589d01306SAnup Patel if (write) {
17689d01306SAnup Patel /* Writes can only be done before irqchip is initialized */
17789d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm))
17889d01306SAnup Patel return -EBUSY;
17989d01306SAnup Patel
18089d01306SAnup Patel if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
18189d01306SAnup Patel return -EINVAL;
18289d01306SAnup Patel
18389d01306SAnup Patel aia->aplic_addr = *addr;
18489d01306SAnup Patel } else
18589d01306SAnup Patel *addr = aia->aplic_addr;
18689d01306SAnup Patel
18789d01306SAnup Patel return 0;
18889d01306SAnup Patel }
18989d01306SAnup Patel
aia_imsic_addr(struct kvm * kvm,u64 * addr,unsigned long vcpu_idx,bool write)19089d01306SAnup Patel static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
19189d01306SAnup Patel unsigned long vcpu_idx, bool write)
19289d01306SAnup Patel {
19389d01306SAnup Patel struct kvm_vcpu *vcpu;
19489d01306SAnup Patel struct kvm_vcpu_aia *vcpu_aia;
19589d01306SAnup Patel
19689d01306SAnup Patel vcpu = kvm_get_vcpu(kvm, vcpu_idx);
19789d01306SAnup Patel if (!vcpu)
19889d01306SAnup Patel return -EINVAL;
19989d01306SAnup Patel vcpu_aia = &vcpu->arch.aia_context;
20089d01306SAnup Patel
20189d01306SAnup Patel if (write) {
20289d01306SAnup Patel /* Writes can only be done before irqchip is initialized */
20389d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm))
20489d01306SAnup Patel return -EBUSY;
20589d01306SAnup Patel
20689d01306SAnup Patel if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
20789d01306SAnup Patel return -EINVAL;
20889d01306SAnup Patel }
20989d01306SAnup Patel
21089d01306SAnup Patel mutex_lock(&vcpu->mutex);
21189d01306SAnup Patel if (write)
21289d01306SAnup Patel vcpu_aia->imsic_addr = *addr;
21389d01306SAnup Patel else
21489d01306SAnup Patel *addr = vcpu_aia->imsic_addr;
21589d01306SAnup Patel mutex_unlock(&vcpu->mutex);
21689d01306SAnup Patel
21789d01306SAnup Patel return 0;
21889d01306SAnup Patel }
21989d01306SAnup Patel
aia_imsic_ppn(struct kvm_aia * aia,gpa_t addr)22089d01306SAnup Patel static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
22189d01306SAnup Patel {
22289d01306SAnup Patel u32 h, l;
22389d01306SAnup Patel gpa_t mask = 0;
22489d01306SAnup Patel
22589d01306SAnup Patel h = aia->nr_hart_bits + aia->nr_guest_bits +
22689d01306SAnup Patel IMSIC_MMIO_PAGE_SHIFT - 1;
22789d01306SAnup Patel mask = GENMASK_ULL(h, 0);
22889d01306SAnup Patel
22989d01306SAnup Patel if (aia->nr_group_bits) {
23089d01306SAnup Patel h = aia->nr_group_bits + aia->nr_group_shift - 1;
23189d01306SAnup Patel l = aia->nr_group_shift;
23289d01306SAnup Patel mask |= GENMASK_ULL(h, l);
23389d01306SAnup Patel }
23489d01306SAnup Patel
23589d01306SAnup Patel return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
23689d01306SAnup Patel }
23789d01306SAnup Patel
aia_imsic_hart_index(struct kvm_aia * aia,gpa_t addr)23889d01306SAnup Patel static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
23989d01306SAnup Patel {
240*5d8622f6SYong-Xuan Wang u32 hart = 0, group = 0;
24189d01306SAnup Patel
242*5d8622f6SYong-Xuan Wang if (aia->nr_hart_bits)
24389d01306SAnup Patel hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
24489d01306SAnup Patel GENMASK_ULL(aia->nr_hart_bits - 1, 0);
24589d01306SAnup Patel if (aia->nr_group_bits)
24689d01306SAnup Patel group = (addr >> aia->nr_group_shift) &
24789d01306SAnup Patel GENMASK_ULL(aia->nr_group_bits - 1, 0);
24889d01306SAnup Patel
24989d01306SAnup Patel return (group << aia->nr_hart_bits) | hart;
25089d01306SAnup Patel }
25189d01306SAnup Patel
aia_init(struct kvm * kvm)25289d01306SAnup Patel static int aia_init(struct kvm *kvm)
25389d01306SAnup Patel {
25489d01306SAnup Patel int ret, i;
25589d01306SAnup Patel unsigned long idx;
25689d01306SAnup Patel struct kvm_vcpu *vcpu;
25789d01306SAnup Patel struct kvm_vcpu_aia *vaia;
25889d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia;
25989d01306SAnup Patel gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
26089d01306SAnup Patel
26189d01306SAnup Patel /* Irqchip can be initialized only once */
26289d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm))
26389d01306SAnup Patel return -EBUSY;
26489d01306SAnup Patel
26589d01306SAnup Patel /* We might be in the middle of creating a VCPU? */
26689d01306SAnup Patel if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
26789d01306SAnup Patel return -EBUSY;
26889d01306SAnup Patel
26989d01306SAnup Patel /* Number of sources should be less than or equals number of IDs */
27089d01306SAnup Patel if (aia->nr_ids < aia->nr_sources)
27189d01306SAnup Patel return -EINVAL;
27289d01306SAnup Patel
27389d01306SAnup Patel /* APLIC base is required for non-zero number of sources */
27489d01306SAnup Patel if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
27589d01306SAnup Patel return -EINVAL;
27689d01306SAnup Patel
27789d01306SAnup Patel /* Initialize APLIC */
27889d01306SAnup Patel ret = kvm_riscv_aia_aplic_init(kvm);
27989d01306SAnup Patel if (ret)
28089d01306SAnup Patel return ret;
28189d01306SAnup Patel
28289d01306SAnup Patel /* Iterate over each VCPU */
28389d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) {
28489d01306SAnup Patel vaia = &vcpu->arch.aia_context;
28589d01306SAnup Patel
28689d01306SAnup Patel /* IMSIC base is required */
28789d01306SAnup Patel if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
28889d01306SAnup Patel ret = -EINVAL;
28989d01306SAnup Patel goto fail_cleanup_imsics;
29089d01306SAnup Patel }
29189d01306SAnup Patel
29289d01306SAnup Patel /* All IMSICs should have matching base PPN */
29389d01306SAnup Patel if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
29489d01306SAnup Patel base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
29589d01306SAnup Patel if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
29689d01306SAnup Patel ret = -EINVAL;
29789d01306SAnup Patel goto fail_cleanup_imsics;
29889d01306SAnup Patel }
29989d01306SAnup Patel
30089d01306SAnup Patel /* Update HART index of the IMSIC based on IMSIC base */
30189d01306SAnup Patel vaia->hart_index = aia_imsic_hart_index(aia,
30289d01306SAnup Patel vaia->imsic_addr);
30389d01306SAnup Patel
30489d01306SAnup Patel /* Initialize IMSIC for this VCPU */
30589d01306SAnup Patel ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
30689d01306SAnup Patel if (ret)
30789d01306SAnup Patel goto fail_cleanup_imsics;
30889d01306SAnup Patel }
30989d01306SAnup Patel
31089d01306SAnup Patel /* Set the initialized flag */
31189d01306SAnup Patel kvm->arch.aia.initialized = true;
31289d01306SAnup Patel
31389d01306SAnup Patel return 0;
31489d01306SAnup Patel
31589d01306SAnup Patel fail_cleanup_imsics:
31689d01306SAnup Patel for (i = idx - 1; i >= 0; i--) {
31789d01306SAnup Patel vcpu = kvm_get_vcpu(kvm, i);
31889d01306SAnup Patel if (!vcpu)
31989d01306SAnup Patel continue;
32089d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
32189d01306SAnup Patel }
32289d01306SAnup Patel kvm_riscv_aia_aplic_cleanup(kvm);
32389d01306SAnup Patel return ret;
32489d01306SAnup Patel }
32589d01306SAnup Patel
aia_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)32689d01306SAnup Patel static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
32789d01306SAnup Patel {
32889d01306SAnup Patel u32 nr;
32989d01306SAnup Patel u64 addr;
33089d01306SAnup Patel int nr_vcpus, r = -ENXIO;
3315463091aSAnup Patel unsigned long v, type = (unsigned long)attr->attr;
33289d01306SAnup Patel void __user *uaddr = (void __user *)(long)attr->addr;
33389d01306SAnup Patel
33489d01306SAnup Patel switch (attr->group) {
33589d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG:
33689d01306SAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr)))
33789d01306SAnup Patel return -EFAULT;
33889d01306SAnup Patel
33989d01306SAnup Patel mutex_lock(&dev->kvm->lock);
34089d01306SAnup Patel r = aia_config(dev->kvm, type, &nr, true);
34189d01306SAnup Patel mutex_unlock(&dev->kvm->lock);
34289d01306SAnup Patel
34389d01306SAnup Patel break;
34489d01306SAnup Patel
34589d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR:
34689d01306SAnup Patel if (copy_from_user(&addr, uaddr, sizeof(addr)))
34789d01306SAnup Patel return -EFAULT;
34889d01306SAnup Patel
34989d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
35089d01306SAnup Patel mutex_lock(&dev->kvm->lock);
35189d01306SAnup Patel if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
35289d01306SAnup Patel r = aia_aplic_addr(dev->kvm, &addr, true);
35389d01306SAnup Patel else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
35489d01306SAnup Patel r = aia_imsic_addr(dev->kvm, &addr,
35589d01306SAnup Patel type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
35689d01306SAnup Patel mutex_unlock(&dev->kvm->lock);
35789d01306SAnup Patel
35889d01306SAnup Patel break;
35989d01306SAnup Patel
36089d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CTRL:
36189d01306SAnup Patel switch (type) {
36289d01306SAnup Patel case KVM_DEV_RISCV_AIA_CTRL_INIT:
36389d01306SAnup Patel mutex_lock(&dev->kvm->lock);
36489d01306SAnup Patel r = aia_init(dev->kvm);
36589d01306SAnup Patel mutex_unlock(&dev->kvm->lock);
36689d01306SAnup Patel break;
36789d01306SAnup Patel }
36889d01306SAnup Patel
36989d01306SAnup Patel break;
370289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC:
371289a007bSAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr)))
372289a007bSAnup Patel return -EFAULT;
373289a007bSAnup Patel
374289a007bSAnup Patel mutex_lock(&dev->kvm->lock);
375289a007bSAnup Patel r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
376289a007bSAnup Patel mutex_unlock(&dev->kvm->lock);
377289a007bSAnup Patel
378289a007bSAnup Patel break;
3795463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC:
3805463091aSAnup Patel if (copy_from_user(&v, uaddr, sizeof(v)))
3815463091aSAnup Patel return -EFAULT;
3825463091aSAnup Patel
3835463091aSAnup Patel mutex_lock(&dev->kvm->lock);
3845463091aSAnup Patel r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
3855463091aSAnup Patel mutex_unlock(&dev->kvm->lock);
3865463091aSAnup Patel
3875463091aSAnup Patel break;
38889d01306SAnup Patel }
38989d01306SAnup Patel
39089d01306SAnup Patel return r;
39189d01306SAnup Patel }
39289d01306SAnup Patel
aia_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)39389d01306SAnup Patel static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
39489d01306SAnup Patel {
39589d01306SAnup Patel u32 nr;
39689d01306SAnup Patel u64 addr;
39789d01306SAnup Patel int nr_vcpus, r = -ENXIO;
39889d01306SAnup Patel void __user *uaddr = (void __user *)(long)attr->addr;
3995463091aSAnup Patel unsigned long v, type = (unsigned long)attr->attr;
40089d01306SAnup Patel
40189d01306SAnup Patel switch (attr->group) {
40289d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG:
40389d01306SAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr)))
40489d01306SAnup Patel return -EFAULT;
40589d01306SAnup Patel
40689d01306SAnup Patel mutex_lock(&dev->kvm->lock);
40789d01306SAnup Patel r = aia_config(dev->kvm, type, &nr, false);
40889d01306SAnup Patel mutex_unlock(&dev->kvm->lock);
40989d01306SAnup Patel if (r)
41089d01306SAnup Patel return r;
41189d01306SAnup Patel
41289d01306SAnup Patel if (copy_to_user(uaddr, &nr, sizeof(nr)))
41389d01306SAnup Patel return -EFAULT;
41489d01306SAnup Patel
41589d01306SAnup Patel break;
41689d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR:
41789d01306SAnup Patel if (copy_from_user(&addr, uaddr, sizeof(addr)))
41889d01306SAnup Patel return -EFAULT;
41989d01306SAnup Patel
42089d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
42189d01306SAnup Patel mutex_lock(&dev->kvm->lock);
42289d01306SAnup Patel if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
42389d01306SAnup Patel r = aia_aplic_addr(dev->kvm, &addr, false);
42489d01306SAnup Patel else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
42589d01306SAnup Patel r = aia_imsic_addr(dev->kvm, &addr,
42689d01306SAnup Patel type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
42789d01306SAnup Patel mutex_unlock(&dev->kvm->lock);
42889d01306SAnup Patel if (r)
42989d01306SAnup Patel return r;
43089d01306SAnup Patel
43189d01306SAnup Patel if (copy_to_user(uaddr, &addr, sizeof(addr)))
43289d01306SAnup Patel return -EFAULT;
43389d01306SAnup Patel
43489d01306SAnup Patel break;
435289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC:
436289a007bSAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr)))
437289a007bSAnup Patel return -EFAULT;
438289a007bSAnup Patel
439289a007bSAnup Patel mutex_lock(&dev->kvm->lock);
440289a007bSAnup Patel r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
441289a007bSAnup Patel mutex_unlock(&dev->kvm->lock);
442289a007bSAnup Patel if (r)
443289a007bSAnup Patel return r;
444289a007bSAnup Patel
445289a007bSAnup Patel if (copy_to_user(uaddr, &nr, sizeof(nr)))
446289a007bSAnup Patel return -EFAULT;
447289a007bSAnup Patel
448289a007bSAnup Patel break;
4495463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC:
4505463091aSAnup Patel if (copy_from_user(&v, uaddr, sizeof(v)))
4515463091aSAnup Patel return -EFAULT;
4525463091aSAnup Patel
4535463091aSAnup Patel mutex_lock(&dev->kvm->lock);
4545463091aSAnup Patel r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
4555463091aSAnup Patel mutex_unlock(&dev->kvm->lock);
4565463091aSAnup Patel if (r)
4575463091aSAnup Patel return r;
4585463091aSAnup Patel
4595463091aSAnup Patel if (copy_to_user(uaddr, &v, sizeof(v)))
4605463091aSAnup Patel return -EFAULT;
4615463091aSAnup Patel
4625463091aSAnup Patel break;
46389d01306SAnup Patel }
46489d01306SAnup Patel
46589d01306SAnup Patel return r;
46689d01306SAnup Patel }
46789d01306SAnup Patel
aia_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)46889d01306SAnup Patel static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
46989d01306SAnup Patel {
47089d01306SAnup Patel int nr_vcpus;
47189d01306SAnup Patel
47289d01306SAnup Patel switch (attr->group) {
47389d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG:
47489d01306SAnup Patel switch (attr->attr) {
47589d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_MODE:
47689d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_IDS:
47789d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
47889d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
47989d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
48089d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
48189d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
48289d01306SAnup Patel return 0;
48389d01306SAnup Patel }
48489d01306SAnup Patel break;
48589d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR:
48689d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
48789d01306SAnup Patel if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
48889d01306SAnup Patel return 0;
48989d01306SAnup Patel else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
49089d01306SAnup Patel return 0;
49189d01306SAnup Patel break;
49289d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CTRL:
49389d01306SAnup Patel switch (attr->attr) {
49489d01306SAnup Patel case KVM_DEV_RISCV_AIA_CTRL_INIT:
49589d01306SAnup Patel return 0;
49689d01306SAnup Patel }
49789d01306SAnup Patel break;
498289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC:
499289a007bSAnup Patel return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
5005463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC:
5015463091aSAnup Patel return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
50289d01306SAnup Patel }
50389d01306SAnup Patel
50489d01306SAnup Patel return -ENXIO;
50589d01306SAnup Patel }
50689d01306SAnup Patel
50789d01306SAnup Patel struct kvm_device_ops kvm_riscv_aia_device_ops = {
50889d01306SAnup Patel .name = "kvm-riscv-aia",
50989d01306SAnup Patel .create = aia_create,
51089d01306SAnup Patel .destroy = aia_destroy,
51189d01306SAnup Patel .set_attr = aia_set_attr,
51289d01306SAnup Patel .get_attr = aia_get_attr,
51389d01306SAnup Patel .has_attr = aia_has_attr,
51489d01306SAnup Patel };
51589d01306SAnup Patel
kvm_riscv_vcpu_aia_update(struct kvm_vcpu * vcpu)51689d01306SAnup Patel int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
51789d01306SAnup Patel {
51889d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
51989d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm))
52089d01306SAnup Patel return 1;
52189d01306SAnup Patel
52289d01306SAnup Patel /* Update the IMSIC HW state before entering guest mode */
52389d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_update(vcpu);
52489d01306SAnup Patel }
52589d01306SAnup Patel
kvm_riscv_vcpu_aia_reset(struct kvm_vcpu * vcpu)52689d01306SAnup Patel void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
52789d01306SAnup Patel {
52889d01306SAnup Patel struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
52989d01306SAnup Patel struct kvm_vcpu_aia_csr *reset_csr =
53089d01306SAnup Patel &vcpu->arch.aia_context.guest_reset_csr;
53189d01306SAnup Patel
53289d01306SAnup Patel if (!kvm_riscv_aia_available())
53389d01306SAnup Patel return;
53489d01306SAnup Patel memcpy(csr, reset_csr, sizeof(*csr));
53589d01306SAnup Patel
53689d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
53789d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm))
53889d01306SAnup Patel return;
53989d01306SAnup Patel
54089d01306SAnup Patel /* Reset the IMSIC context */
54189d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_reset(vcpu);
54289d01306SAnup Patel }
54389d01306SAnup Patel
kvm_riscv_vcpu_aia_init(struct kvm_vcpu * vcpu)54489d01306SAnup Patel int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
54589d01306SAnup Patel {
54689d01306SAnup Patel struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
54789d01306SAnup Patel
54889d01306SAnup Patel if (!kvm_riscv_aia_available())
54989d01306SAnup Patel return 0;
55089d01306SAnup Patel
55189d01306SAnup Patel /*
55289d01306SAnup Patel * We don't do any memory allocations over here because these
55389d01306SAnup Patel * will be done after AIA device is initialized by the user-space.
55489d01306SAnup Patel *
55589d01306SAnup Patel * Refer, aia_init() implementation for more details.
55689d01306SAnup Patel */
55789d01306SAnup Patel
55889d01306SAnup Patel /* Initialize default values in AIA vcpu context */
55989d01306SAnup Patel vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
56089d01306SAnup Patel vaia->hart_index = vcpu->vcpu_idx;
56189d01306SAnup Patel
56289d01306SAnup Patel return 0;
56389d01306SAnup Patel }
56489d01306SAnup Patel
kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu * vcpu)56589d01306SAnup Patel void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
56689d01306SAnup Patel {
56789d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
56889d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm))
56989d01306SAnup Patel return;
57089d01306SAnup Patel
57189d01306SAnup Patel /* Cleanup IMSIC context */
57289d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
57389d01306SAnup Patel }
57489d01306SAnup Patel
kvm_riscv_aia_inject_msi_by_id(struct kvm * kvm,u32 hart_index,u32 guest_index,u32 iid)57589d01306SAnup Patel int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
57689d01306SAnup Patel u32 guest_index, u32 iid)
57789d01306SAnup Patel {
57889d01306SAnup Patel unsigned long idx;
57989d01306SAnup Patel struct kvm_vcpu *vcpu;
58089d01306SAnup Patel
58189d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
58289d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm))
58389d01306SAnup Patel return -EBUSY;
58489d01306SAnup Patel
58589d01306SAnup Patel /* Inject MSI to matching VCPU */
58689d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) {
58789d01306SAnup Patel if (vcpu->arch.aia_context.hart_index == hart_index)
58889d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
58989d01306SAnup Patel guest_index,
59089d01306SAnup Patel 0, iid);
59189d01306SAnup Patel }
59289d01306SAnup Patel
59389d01306SAnup Patel return 0;
59489d01306SAnup Patel }
59589d01306SAnup Patel
kvm_riscv_aia_inject_msi(struct kvm * kvm,struct kvm_msi * msi)59689d01306SAnup Patel int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
59789d01306SAnup Patel {
59889d01306SAnup Patel gpa_t tppn, ippn;
59989d01306SAnup Patel unsigned long idx;
60089d01306SAnup Patel struct kvm_vcpu *vcpu;
60189d01306SAnup Patel u32 g, toff, iid = msi->data;
60289d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia;
60389d01306SAnup Patel gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
60489d01306SAnup Patel
60589d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
60689d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm))
60789d01306SAnup Patel return -EBUSY;
60889d01306SAnup Patel
60989d01306SAnup Patel /* Convert target address to target PPN */
61089d01306SAnup Patel tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
61189d01306SAnup Patel
61289d01306SAnup Patel /* Extract and clear Guest ID from target PPN */
61389d01306SAnup Patel g = tppn & (BIT(aia->nr_guest_bits) - 1);
61489d01306SAnup Patel tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
61589d01306SAnup Patel
61689d01306SAnup Patel /* Inject MSI to matching VCPU */
61789d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) {
61889d01306SAnup Patel ippn = vcpu->arch.aia_context.imsic_addr >>
61989d01306SAnup Patel IMSIC_MMIO_PAGE_SHIFT;
62089d01306SAnup Patel if (ippn == tppn) {
62189d01306SAnup Patel toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
62289d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
62389d01306SAnup Patel toff, iid);
62489d01306SAnup Patel }
62589d01306SAnup Patel }
62689d01306SAnup Patel
62789d01306SAnup Patel return 0;
62889d01306SAnup Patel }
62989d01306SAnup Patel
kvm_riscv_aia_inject_irq(struct kvm * kvm,unsigned int irq,bool level)63089d01306SAnup Patel int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
63189d01306SAnup Patel {
63289d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
63389d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm))
63489d01306SAnup Patel return -EBUSY;
63589d01306SAnup Patel
63689d01306SAnup Patel /* Inject interrupt level change in APLIC */
63789d01306SAnup Patel return kvm_riscv_aia_aplic_inject(kvm, irq, level);
63889d01306SAnup Patel }
63989d01306SAnup Patel
kvm_riscv_aia_init_vm(struct kvm * kvm)64089d01306SAnup Patel void kvm_riscv_aia_init_vm(struct kvm *kvm)
64189d01306SAnup Patel {
64289d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia;
64389d01306SAnup Patel
64489d01306SAnup Patel if (!kvm_riscv_aia_available())
64589d01306SAnup Patel return;
64689d01306SAnup Patel
64789d01306SAnup Patel /*
64889d01306SAnup Patel * We don't do any memory allocations over here because these
64989d01306SAnup Patel * will be done after AIA device is initialized by the user-space.
65089d01306SAnup Patel *
65189d01306SAnup Patel * Refer, aia_init() implementation for more details.
65289d01306SAnup Patel */
65389d01306SAnup Patel
65489d01306SAnup Patel /* Initialize default values in AIA global context */
65589d01306SAnup Patel aia->mode = (kvm_riscv_aia_nr_hgei) ?
65689d01306SAnup Patel KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
65789d01306SAnup Patel aia->nr_ids = kvm_riscv_aia_max_ids - 1;
65889d01306SAnup Patel aia->nr_sources = 0;
65989d01306SAnup Patel aia->nr_group_bits = 0;
66089d01306SAnup Patel aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
66189d01306SAnup Patel aia->nr_hart_bits = 0;
66289d01306SAnup Patel aia->nr_guest_bits = 0;
66389d01306SAnup Patel aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
66489d01306SAnup Patel }
66589d01306SAnup Patel
kvm_riscv_aia_destroy_vm(struct kvm * kvm)66689d01306SAnup Patel void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
66789d01306SAnup Patel {
66889d01306SAnup Patel /* Proceed only if AIA was initialized successfully */
66989d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm))
67089d01306SAnup Patel return;
67189d01306SAnup Patel
67289d01306SAnup Patel /* Cleanup APLIC context */
67389d01306SAnup Patel kvm_riscv_aia_aplic_cleanup(kvm);
67489d01306SAnup Patel }
675