189d01306SAnup Patel // SPDX-License-Identifier: GPL-2.0 289d01306SAnup Patel /* 389d01306SAnup Patel * Copyright (C) 2021 Western Digital Corporation or its affiliates. 489d01306SAnup Patel * Copyright (C) 2022 Ventana Micro Systems Inc. 589d01306SAnup Patel * 689d01306SAnup Patel * Authors: 789d01306SAnup Patel * Anup Patel <apatel@ventanamicro.com> 889d01306SAnup Patel */ 989d01306SAnup Patel 1089d01306SAnup Patel #include <linux/bits.h> 1189d01306SAnup Patel #include <linux/kvm_host.h> 1289d01306SAnup Patel #include <linux/uaccess.h> 1389d01306SAnup Patel #include <asm/kvm_aia_imsic.h> 1489d01306SAnup Patel 1589d01306SAnup Patel static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) 1689d01306SAnup Patel { 1789d01306SAnup Patel struct kvm_vcpu *tmp_vcpu; 1889d01306SAnup Patel 1989d01306SAnup Patel for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { 2089d01306SAnup Patel tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 2189d01306SAnup Patel mutex_unlock(&tmp_vcpu->mutex); 2289d01306SAnup Patel } 2389d01306SAnup Patel } 2489d01306SAnup Patel 2589d01306SAnup Patel static void unlock_all_vcpus(struct kvm *kvm) 2689d01306SAnup Patel { 2789d01306SAnup Patel unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); 2889d01306SAnup Patel } 2989d01306SAnup Patel 3089d01306SAnup Patel static bool lock_all_vcpus(struct kvm *kvm) 3189d01306SAnup Patel { 3289d01306SAnup Patel struct kvm_vcpu *tmp_vcpu; 3389d01306SAnup Patel unsigned long c; 3489d01306SAnup Patel 3589d01306SAnup Patel kvm_for_each_vcpu(c, tmp_vcpu, kvm) { 3689d01306SAnup Patel if (!mutex_trylock(&tmp_vcpu->mutex)) { 3789d01306SAnup Patel unlock_vcpus(kvm, c - 1); 3889d01306SAnup Patel return false; 3989d01306SAnup Patel } 4089d01306SAnup Patel } 4189d01306SAnup Patel 4289d01306SAnup Patel return true; 4389d01306SAnup Patel } 4489d01306SAnup Patel 4589d01306SAnup Patel static int aia_create(struct kvm_device *dev, u32 type) 4689d01306SAnup Patel { 4789d01306SAnup Patel int ret; 4889d01306SAnup Patel unsigned long i; 4989d01306SAnup Patel struct kvm *kvm = dev->kvm; 5089d01306SAnup Patel struct kvm_vcpu *vcpu; 5189d01306SAnup Patel 5289d01306SAnup Patel if (irqchip_in_kernel(kvm)) 5389d01306SAnup Patel return -EEXIST; 5489d01306SAnup Patel 5589d01306SAnup Patel ret = -EBUSY; 5689d01306SAnup Patel if (!lock_all_vcpus(kvm)) 5789d01306SAnup Patel return ret; 5889d01306SAnup Patel 5989d01306SAnup Patel kvm_for_each_vcpu(i, vcpu, kvm) { 6089d01306SAnup Patel if (vcpu->arch.ran_atleast_once) 6189d01306SAnup Patel goto out_unlock; 6289d01306SAnup Patel } 6389d01306SAnup Patel ret = 0; 6489d01306SAnup Patel 6589d01306SAnup Patel kvm->arch.aia.in_kernel = true; 6689d01306SAnup Patel 6789d01306SAnup Patel out_unlock: 6889d01306SAnup Patel unlock_all_vcpus(kvm); 6989d01306SAnup Patel return ret; 7089d01306SAnup Patel } 7189d01306SAnup Patel 7289d01306SAnup Patel static void aia_destroy(struct kvm_device *dev) 7389d01306SAnup Patel { 7489d01306SAnup Patel kfree(dev); 7589d01306SAnup Patel } 7689d01306SAnup Patel 7789d01306SAnup Patel static int aia_config(struct kvm *kvm, unsigned long type, 7889d01306SAnup Patel u32 *nr, bool write) 7989d01306SAnup Patel { 8089d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia; 8189d01306SAnup Patel 8289d01306SAnup Patel /* Writes can only be done before irqchip is initialized */ 8389d01306SAnup Patel if (write && kvm_riscv_aia_initialized(kvm)) 8489d01306SAnup Patel return -EBUSY; 8589d01306SAnup Patel 8689d01306SAnup Patel switch (type) { 8789d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_MODE: 8889d01306SAnup Patel if (write) { 8989d01306SAnup Patel switch (*nr) { 9089d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_EMUL: 9189d01306SAnup Patel break; 9289d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_HWACCEL: 9389d01306SAnup Patel case KVM_DEV_RISCV_AIA_MODE_AUTO: 9489d01306SAnup Patel /* 9589d01306SAnup Patel * HW Acceleration and Auto modes only 9689d01306SAnup Patel * supported on host with non-zero guest 9789d01306SAnup Patel * external interrupts (i.e. non-zero 9889d01306SAnup Patel * VS-level IMSIC pages). 9989d01306SAnup Patel */ 10089d01306SAnup Patel if (!kvm_riscv_aia_nr_hgei) 10189d01306SAnup Patel return -EINVAL; 10289d01306SAnup Patel break; 10389d01306SAnup Patel default: 10489d01306SAnup Patel return -EINVAL; 10589d01306SAnup Patel }; 10689d01306SAnup Patel aia->mode = *nr; 10789d01306SAnup Patel } else 10889d01306SAnup Patel *nr = aia->mode; 10989d01306SAnup Patel break; 11089d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_IDS: 11189d01306SAnup Patel if (write) { 11289d01306SAnup Patel if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) || 11389d01306SAnup Patel (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) || 11489d01306SAnup Patel ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) != 11589d01306SAnup Patel KVM_DEV_RISCV_AIA_IDS_MIN) || 11689d01306SAnup Patel (kvm_riscv_aia_max_ids <= *nr)) 11789d01306SAnup Patel return -EINVAL; 11889d01306SAnup Patel aia->nr_ids = *nr; 11989d01306SAnup Patel } else 12089d01306SAnup Patel *nr = aia->nr_ids; 12189d01306SAnup Patel break; 12289d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_SRCS: 12389d01306SAnup Patel if (write) { 12489d01306SAnup Patel if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) || 12589d01306SAnup Patel (*nr >= kvm_riscv_aia_max_ids)) 12689d01306SAnup Patel return -EINVAL; 12789d01306SAnup Patel aia->nr_sources = *nr; 12889d01306SAnup Patel } else 12989d01306SAnup Patel *nr = aia->nr_sources; 13089d01306SAnup Patel break; 13189d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS: 13289d01306SAnup Patel if (write) { 13389d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX) 13489d01306SAnup Patel return -EINVAL; 13589d01306SAnup Patel aia->nr_group_bits = *nr; 13689d01306SAnup Patel } else 13789d01306SAnup Patel *nr = aia->nr_group_bits; 13889d01306SAnup Patel break; 13989d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT: 14089d01306SAnup Patel if (write) { 14189d01306SAnup Patel if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) || 14289d01306SAnup Patel (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX)) 14389d01306SAnup Patel return -EINVAL; 14489d01306SAnup Patel aia->nr_group_shift = *nr; 14589d01306SAnup Patel } else 14689d01306SAnup Patel *nr = aia->nr_group_shift; 14789d01306SAnup Patel break; 14889d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS: 14989d01306SAnup Patel if (write) { 15089d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX) 15189d01306SAnup Patel return -EINVAL; 15289d01306SAnup Patel aia->nr_hart_bits = *nr; 15389d01306SAnup Patel } else 15489d01306SAnup Patel *nr = aia->nr_hart_bits; 15589d01306SAnup Patel break; 15689d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS: 15789d01306SAnup Patel if (write) { 15889d01306SAnup Patel if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX) 15989d01306SAnup Patel return -EINVAL; 16089d01306SAnup Patel aia->nr_guest_bits = *nr; 16189d01306SAnup Patel } else 16289d01306SAnup Patel *nr = aia->nr_guest_bits; 16389d01306SAnup Patel break; 16489d01306SAnup Patel default: 16589d01306SAnup Patel return -ENXIO; 16689d01306SAnup Patel }; 16789d01306SAnup Patel 16889d01306SAnup Patel return 0; 16989d01306SAnup Patel } 17089d01306SAnup Patel 17189d01306SAnup Patel static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write) 17289d01306SAnup Patel { 17389d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia; 17489d01306SAnup Patel 17589d01306SAnup Patel if (write) { 17689d01306SAnup Patel /* Writes can only be done before irqchip is initialized */ 17789d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm)) 17889d01306SAnup Patel return -EBUSY; 17989d01306SAnup Patel 18089d01306SAnup Patel if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1)) 18189d01306SAnup Patel return -EINVAL; 18289d01306SAnup Patel 18389d01306SAnup Patel aia->aplic_addr = *addr; 18489d01306SAnup Patel } else 18589d01306SAnup Patel *addr = aia->aplic_addr; 18689d01306SAnup Patel 18789d01306SAnup Patel return 0; 18889d01306SAnup Patel } 18989d01306SAnup Patel 19089d01306SAnup Patel static int aia_imsic_addr(struct kvm *kvm, u64 *addr, 19189d01306SAnup Patel unsigned long vcpu_idx, bool write) 19289d01306SAnup Patel { 19389d01306SAnup Patel struct kvm_vcpu *vcpu; 19489d01306SAnup Patel struct kvm_vcpu_aia *vcpu_aia; 19589d01306SAnup Patel 19689d01306SAnup Patel vcpu = kvm_get_vcpu(kvm, vcpu_idx); 19789d01306SAnup Patel if (!vcpu) 19889d01306SAnup Patel return -EINVAL; 19989d01306SAnup Patel vcpu_aia = &vcpu->arch.aia_context; 20089d01306SAnup Patel 20189d01306SAnup Patel if (write) { 20289d01306SAnup Patel /* Writes can only be done before irqchip is initialized */ 20389d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm)) 20489d01306SAnup Patel return -EBUSY; 20589d01306SAnup Patel 20689d01306SAnup Patel if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1)) 20789d01306SAnup Patel return -EINVAL; 20889d01306SAnup Patel } 20989d01306SAnup Patel 21089d01306SAnup Patel mutex_lock(&vcpu->mutex); 21189d01306SAnup Patel if (write) 21289d01306SAnup Patel vcpu_aia->imsic_addr = *addr; 21389d01306SAnup Patel else 21489d01306SAnup Patel *addr = vcpu_aia->imsic_addr; 21589d01306SAnup Patel mutex_unlock(&vcpu->mutex); 21689d01306SAnup Patel 21789d01306SAnup Patel return 0; 21889d01306SAnup Patel } 21989d01306SAnup Patel 22089d01306SAnup Patel static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr) 22189d01306SAnup Patel { 22289d01306SAnup Patel u32 h, l; 22389d01306SAnup Patel gpa_t mask = 0; 22489d01306SAnup Patel 22589d01306SAnup Patel h = aia->nr_hart_bits + aia->nr_guest_bits + 22689d01306SAnup Patel IMSIC_MMIO_PAGE_SHIFT - 1; 22789d01306SAnup Patel mask = GENMASK_ULL(h, 0); 22889d01306SAnup Patel 22989d01306SAnup Patel if (aia->nr_group_bits) { 23089d01306SAnup Patel h = aia->nr_group_bits + aia->nr_group_shift - 1; 23189d01306SAnup Patel l = aia->nr_group_shift; 23289d01306SAnup Patel mask |= GENMASK_ULL(h, l); 23389d01306SAnup Patel } 23489d01306SAnup Patel 23589d01306SAnup Patel return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT; 23689d01306SAnup Patel } 23789d01306SAnup Patel 23889d01306SAnup Patel static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr) 23989d01306SAnup Patel { 24089d01306SAnup Patel u32 hart, group = 0; 24189d01306SAnup Patel 24289d01306SAnup Patel hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) & 24389d01306SAnup Patel GENMASK_ULL(aia->nr_hart_bits - 1, 0); 24489d01306SAnup Patel if (aia->nr_group_bits) 24589d01306SAnup Patel group = (addr >> aia->nr_group_shift) & 24689d01306SAnup Patel GENMASK_ULL(aia->nr_group_bits - 1, 0); 24789d01306SAnup Patel 24889d01306SAnup Patel return (group << aia->nr_hart_bits) | hart; 24989d01306SAnup Patel } 25089d01306SAnup Patel 25189d01306SAnup Patel static int aia_init(struct kvm *kvm) 25289d01306SAnup Patel { 25389d01306SAnup Patel int ret, i; 25489d01306SAnup Patel unsigned long idx; 25589d01306SAnup Patel struct kvm_vcpu *vcpu; 25689d01306SAnup Patel struct kvm_vcpu_aia *vaia; 25789d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia; 25889d01306SAnup Patel gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR; 25989d01306SAnup Patel 26089d01306SAnup Patel /* Irqchip can be initialized only once */ 26189d01306SAnup Patel if (kvm_riscv_aia_initialized(kvm)) 26289d01306SAnup Patel return -EBUSY; 26389d01306SAnup Patel 26489d01306SAnup Patel /* We might be in the middle of creating a VCPU? */ 26589d01306SAnup Patel if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) 26689d01306SAnup Patel return -EBUSY; 26789d01306SAnup Patel 26889d01306SAnup Patel /* Number of sources should be less than or equals number of IDs */ 26989d01306SAnup Patel if (aia->nr_ids < aia->nr_sources) 27089d01306SAnup Patel return -EINVAL; 27189d01306SAnup Patel 27289d01306SAnup Patel /* APLIC base is required for non-zero number of sources */ 27389d01306SAnup Patel if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR) 27489d01306SAnup Patel return -EINVAL; 27589d01306SAnup Patel 27689d01306SAnup Patel /* Initialize APLIC */ 27789d01306SAnup Patel ret = kvm_riscv_aia_aplic_init(kvm); 27889d01306SAnup Patel if (ret) 27989d01306SAnup Patel return ret; 28089d01306SAnup Patel 28189d01306SAnup Patel /* Iterate over each VCPU */ 28289d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) { 28389d01306SAnup Patel vaia = &vcpu->arch.aia_context; 28489d01306SAnup Patel 28589d01306SAnup Patel /* IMSIC base is required */ 28689d01306SAnup Patel if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) { 28789d01306SAnup Patel ret = -EINVAL; 28889d01306SAnup Patel goto fail_cleanup_imsics; 28989d01306SAnup Patel } 29089d01306SAnup Patel 29189d01306SAnup Patel /* All IMSICs should have matching base PPN */ 29289d01306SAnup Patel if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR) 29389d01306SAnup Patel base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr); 29489d01306SAnup Patel if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) { 29589d01306SAnup Patel ret = -EINVAL; 29689d01306SAnup Patel goto fail_cleanup_imsics; 29789d01306SAnup Patel } 29889d01306SAnup Patel 29989d01306SAnup Patel /* Update HART index of the IMSIC based on IMSIC base */ 30089d01306SAnup Patel vaia->hart_index = aia_imsic_hart_index(aia, 30189d01306SAnup Patel vaia->imsic_addr); 30289d01306SAnup Patel 30389d01306SAnup Patel /* Initialize IMSIC for this VCPU */ 30489d01306SAnup Patel ret = kvm_riscv_vcpu_aia_imsic_init(vcpu); 30589d01306SAnup Patel if (ret) 30689d01306SAnup Patel goto fail_cleanup_imsics; 30789d01306SAnup Patel } 30889d01306SAnup Patel 30989d01306SAnup Patel /* Set the initialized flag */ 31089d01306SAnup Patel kvm->arch.aia.initialized = true; 31189d01306SAnup Patel 31289d01306SAnup Patel return 0; 31389d01306SAnup Patel 31489d01306SAnup Patel fail_cleanup_imsics: 31589d01306SAnup Patel for (i = idx - 1; i >= 0; i--) { 31689d01306SAnup Patel vcpu = kvm_get_vcpu(kvm, i); 31789d01306SAnup Patel if (!vcpu) 31889d01306SAnup Patel continue; 31989d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_cleanup(vcpu); 32089d01306SAnup Patel } 32189d01306SAnup Patel kvm_riscv_aia_aplic_cleanup(kvm); 32289d01306SAnup Patel return ret; 32389d01306SAnup Patel } 32489d01306SAnup Patel 32589d01306SAnup Patel static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 32689d01306SAnup Patel { 32789d01306SAnup Patel u32 nr; 32889d01306SAnup Patel u64 addr; 32989d01306SAnup Patel int nr_vcpus, r = -ENXIO; 330*5463091aSAnup Patel unsigned long v, type = (unsigned long)attr->attr; 33189d01306SAnup Patel void __user *uaddr = (void __user *)(long)attr->addr; 33289d01306SAnup Patel 33389d01306SAnup Patel switch (attr->group) { 33489d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG: 33589d01306SAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr))) 33689d01306SAnup Patel return -EFAULT; 33789d01306SAnup Patel 33889d01306SAnup Patel mutex_lock(&dev->kvm->lock); 33989d01306SAnup Patel r = aia_config(dev->kvm, type, &nr, true); 34089d01306SAnup Patel mutex_unlock(&dev->kvm->lock); 34189d01306SAnup Patel 34289d01306SAnup Patel break; 34389d01306SAnup Patel 34489d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR: 34589d01306SAnup Patel if (copy_from_user(&addr, uaddr, sizeof(addr))) 34689d01306SAnup Patel return -EFAULT; 34789d01306SAnup Patel 34889d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus); 34989d01306SAnup Patel mutex_lock(&dev->kvm->lock); 35089d01306SAnup Patel if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC) 35189d01306SAnup Patel r = aia_aplic_addr(dev->kvm, &addr, true); 35289d01306SAnup Patel else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus)) 35389d01306SAnup Patel r = aia_imsic_addr(dev->kvm, &addr, 35489d01306SAnup Patel type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true); 35589d01306SAnup Patel mutex_unlock(&dev->kvm->lock); 35689d01306SAnup Patel 35789d01306SAnup Patel break; 35889d01306SAnup Patel 35989d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CTRL: 36089d01306SAnup Patel switch (type) { 36189d01306SAnup Patel case KVM_DEV_RISCV_AIA_CTRL_INIT: 36289d01306SAnup Patel mutex_lock(&dev->kvm->lock); 36389d01306SAnup Patel r = aia_init(dev->kvm); 36489d01306SAnup Patel mutex_unlock(&dev->kvm->lock); 36589d01306SAnup Patel break; 36689d01306SAnup Patel } 36789d01306SAnup Patel 36889d01306SAnup Patel break; 369289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC: 370289a007bSAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr))) 371289a007bSAnup Patel return -EFAULT; 372289a007bSAnup Patel 373289a007bSAnup Patel mutex_lock(&dev->kvm->lock); 374289a007bSAnup Patel r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr); 375289a007bSAnup Patel mutex_unlock(&dev->kvm->lock); 376289a007bSAnup Patel 377289a007bSAnup Patel break; 378*5463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC: 379*5463091aSAnup Patel if (copy_from_user(&v, uaddr, sizeof(v))) 380*5463091aSAnup Patel return -EFAULT; 381*5463091aSAnup Patel 382*5463091aSAnup Patel mutex_lock(&dev->kvm->lock); 383*5463091aSAnup Patel r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v); 384*5463091aSAnup Patel mutex_unlock(&dev->kvm->lock); 385*5463091aSAnup Patel 386*5463091aSAnup Patel break; 38789d01306SAnup Patel } 38889d01306SAnup Patel 38989d01306SAnup Patel return r; 39089d01306SAnup Patel } 39189d01306SAnup Patel 39289d01306SAnup Patel static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 39389d01306SAnup Patel { 39489d01306SAnup Patel u32 nr; 39589d01306SAnup Patel u64 addr; 39689d01306SAnup Patel int nr_vcpus, r = -ENXIO; 39789d01306SAnup Patel void __user *uaddr = (void __user *)(long)attr->addr; 398*5463091aSAnup Patel unsigned long v, type = (unsigned long)attr->attr; 39989d01306SAnup Patel 40089d01306SAnup Patel switch (attr->group) { 40189d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG: 40289d01306SAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr))) 40389d01306SAnup Patel return -EFAULT; 40489d01306SAnup Patel 40589d01306SAnup Patel mutex_lock(&dev->kvm->lock); 40689d01306SAnup Patel r = aia_config(dev->kvm, type, &nr, false); 40789d01306SAnup Patel mutex_unlock(&dev->kvm->lock); 40889d01306SAnup Patel if (r) 40989d01306SAnup Patel return r; 41089d01306SAnup Patel 41189d01306SAnup Patel if (copy_to_user(uaddr, &nr, sizeof(nr))) 41289d01306SAnup Patel return -EFAULT; 41389d01306SAnup Patel 41489d01306SAnup Patel break; 41589d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR: 41689d01306SAnup Patel if (copy_from_user(&addr, uaddr, sizeof(addr))) 41789d01306SAnup Patel return -EFAULT; 41889d01306SAnup Patel 41989d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus); 42089d01306SAnup Patel mutex_lock(&dev->kvm->lock); 42189d01306SAnup Patel if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC) 42289d01306SAnup Patel r = aia_aplic_addr(dev->kvm, &addr, false); 42389d01306SAnup Patel else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus)) 42489d01306SAnup Patel r = aia_imsic_addr(dev->kvm, &addr, 42589d01306SAnup Patel type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false); 42689d01306SAnup Patel mutex_unlock(&dev->kvm->lock); 42789d01306SAnup Patel if (r) 42889d01306SAnup Patel return r; 42989d01306SAnup Patel 43089d01306SAnup Patel if (copy_to_user(uaddr, &addr, sizeof(addr))) 43189d01306SAnup Patel return -EFAULT; 43289d01306SAnup Patel 43389d01306SAnup Patel break; 434289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC: 435289a007bSAnup Patel if (copy_from_user(&nr, uaddr, sizeof(nr))) 436289a007bSAnup Patel return -EFAULT; 437289a007bSAnup Patel 438289a007bSAnup Patel mutex_lock(&dev->kvm->lock); 439289a007bSAnup Patel r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr); 440289a007bSAnup Patel mutex_unlock(&dev->kvm->lock); 441289a007bSAnup Patel if (r) 442289a007bSAnup Patel return r; 443289a007bSAnup Patel 444289a007bSAnup Patel if (copy_to_user(uaddr, &nr, sizeof(nr))) 445289a007bSAnup Patel return -EFAULT; 446289a007bSAnup Patel 447289a007bSAnup Patel break; 448*5463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC: 449*5463091aSAnup Patel if (copy_from_user(&v, uaddr, sizeof(v))) 450*5463091aSAnup Patel return -EFAULT; 451*5463091aSAnup Patel 452*5463091aSAnup Patel mutex_lock(&dev->kvm->lock); 453*5463091aSAnup Patel r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v); 454*5463091aSAnup Patel mutex_unlock(&dev->kvm->lock); 455*5463091aSAnup Patel if (r) 456*5463091aSAnup Patel return r; 457*5463091aSAnup Patel 458*5463091aSAnup Patel if (copy_to_user(uaddr, &v, sizeof(v))) 459*5463091aSAnup Patel return -EFAULT; 460*5463091aSAnup Patel 461*5463091aSAnup Patel break; 46289d01306SAnup Patel } 46389d01306SAnup Patel 46489d01306SAnup Patel return r; 46589d01306SAnup Patel } 46689d01306SAnup Patel 46789d01306SAnup Patel static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 46889d01306SAnup Patel { 46989d01306SAnup Patel int nr_vcpus; 47089d01306SAnup Patel 47189d01306SAnup Patel switch (attr->group) { 47289d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CONFIG: 47389d01306SAnup Patel switch (attr->attr) { 47489d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_MODE: 47589d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_IDS: 47689d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_SRCS: 47789d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS: 47889d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT: 47989d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS: 48089d01306SAnup Patel case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS: 48189d01306SAnup Patel return 0; 48289d01306SAnup Patel } 48389d01306SAnup Patel break; 48489d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_ADDR: 48589d01306SAnup Patel nr_vcpus = atomic_read(&dev->kvm->online_vcpus); 48689d01306SAnup Patel if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC) 48789d01306SAnup Patel return 0; 48889d01306SAnup Patel else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus)) 48989d01306SAnup Patel return 0; 49089d01306SAnup Patel break; 49189d01306SAnup Patel case KVM_DEV_RISCV_AIA_GRP_CTRL: 49289d01306SAnup Patel switch (attr->attr) { 49389d01306SAnup Patel case KVM_DEV_RISCV_AIA_CTRL_INIT: 49489d01306SAnup Patel return 0; 49589d01306SAnup Patel } 49689d01306SAnup Patel break; 497289a007bSAnup Patel case KVM_DEV_RISCV_AIA_GRP_APLIC: 498289a007bSAnup Patel return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 499*5463091aSAnup Patel case KVM_DEV_RISCV_AIA_GRP_IMSIC: 500*5463091aSAnup Patel return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 50189d01306SAnup Patel } 50289d01306SAnup Patel 50389d01306SAnup Patel return -ENXIO; 50489d01306SAnup Patel } 50589d01306SAnup Patel 50689d01306SAnup Patel struct kvm_device_ops kvm_riscv_aia_device_ops = { 50789d01306SAnup Patel .name = "kvm-riscv-aia", 50889d01306SAnup Patel .create = aia_create, 50989d01306SAnup Patel .destroy = aia_destroy, 51089d01306SAnup Patel .set_attr = aia_set_attr, 51189d01306SAnup Patel .get_attr = aia_get_attr, 51289d01306SAnup Patel .has_attr = aia_has_attr, 51389d01306SAnup Patel }; 51489d01306SAnup Patel 51589d01306SAnup Patel int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu) 51689d01306SAnup Patel { 51789d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 51889d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm)) 51989d01306SAnup Patel return 1; 52089d01306SAnup Patel 52189d01306SAnup Patel /* Update the IMSIC HW state before entering guest mode */ 52289d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_update(vcpu); 52389d01306SAnup Patel } 52489d01306SAnup Patel 52589d01306SAnup Patel void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu) 52689d01306SAnup Patel { 52789d01306SAnup Patel struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 52889d01306SAnup Patel struct kvm_vcpu_aia_csr *reset_csr = 52989d01306SAnup Patel &vcpu->arch.aia_context.guest_reset_csr; 53089d01306SAnup Patel 53189d01306SAnup Patel if (!kvm_riscv_aia_available()) 53289d01306SAnup Patel return; 53389d01306SAnup Patel memcpy(csr, reset_csr, sizeof(*csr)); 53489d01306SAnup Patel 53589d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 53689d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm)) 53789d01306SAnup Patel return; 53889d01306SAnup Patel 53989d01306SAnup Patel /* Reset the IMSIC context */ 54089d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_reset(vcpu); 54189d01306SAnup Patel } 54289d01306SAnup Patel 54389d01306SAnup Patel int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu) 54489d01306SAnup Patel { 54589d01306SAnup Patel struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context; 54689d01306SAnup Patel 54789d01306SAnup Patel if (!kvm_riscv_aia_available()) 54889d01306SAnup Patel return 0; 54989d01306SAnup Patel 55089d01306SAnup Patel /* 55189d01306SAnup Patel * We don't do any memory allocations over here because these 55289d01306SAnup Patel * will be done after AIA device is initialized by the user-space. 55389d01306SAnup Patel * 55489d01306SAnup Patel * Refer, aia_init() implementation for more details. 55589d01306SAnup Patel */ 55689d01306SAnup Patel 55789d01306SAnup Patel /* Initialize default values in AIA vcpu context */ 55889d01306SAnup Patel vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR; 55989d01306SAnup Patel vaia->hart_index = vcpu->vcpu_idx; 56089d01306SAnup Patel 56189d01306SAnup Patel return 0; 56289d01306SAnup Patel } 56389d01306SAnup Patel 56489d01306SAnup Patel void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu) 56589d01306SAnup Patel { 56689d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 56789d01306SAnup Patel if (!kvm_riscv_aia_initialized(vcpu->kvm)) 56889d01306SAnup Patel return; 56989d01306SAnup Patel 57089d01306SAnup Patel /* Cleanup IMSIC context */ 57189d01306SAnup Patel kvm_riscv_vcpu_aia_imsic_cleanup(vcpu); 57289d01306SAnup Patel } 57389d01306SAnup Patel 57489d01306SAnup Patel int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index, 57589d01306SAnup Patel u32 guest_index, u32 iid) 57689d01306SAnup Patel { 57789d01306SAnup Patel unsigned long idx; 57889d01306SAnup Patel struct kvm_vcpu *vcpu; 57989d01306SAnup Patel 58089d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 58189d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm)) 58289d01306SAnup Patel return -EBUSY; 58389d01306SAnup Patel 58489d01306SAnup Patel /* Inject MSI to matching VCPU */ 58589d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) { 58689d01306SAnup Patel if (vcpu->arch.aia_context.hart_index == hart_index) 58789d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_inject(vcpu, 58889d01306SAnup Patel guest_index, 58989d01306SAnup Patel 0, iid); 59089d01306SAnup Patel } 59189d01306SAnup Patel 59289d01306SAnup Patel return 0; 59389d01306SAnup Patel } 59489d01306SAnup Patel 59589d01306SAnup Patel int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 59689d01306SAnup Patel { 59789d01306SAnup Patel gpa_t tppn, ippn; 59889d01306SAnup Patel unsigned long idx; 59989d01306SAnup Patel struct kvm_vcpu *vcpu; 60089d01306SAnup Patel u32 g, toff, iid = msi->data; 60189d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia; 60289d01306SAnup Patel gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo; 60389d01306SAnup Patel 60489d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 60589d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm)) 60689d01306SAnup Patel return -EBUSY; 60789d01306SAnup Patel 60889d01306SAnup Patel /* Convert target address to target PPN */ 60989d01306SAnup Patel tppn = target >> IMSIC_MMIO_PAGE_SHIFT; 61089d01306SAnup Patel 61189d01306SAnup Patel /* Extract and clear Guest ID from target PPN */ 61289d01306SAnup Patel g = tppn & (BIT(aia->nr_guest_bits) - 1); 61389d01306SAnup Patel tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1)); 61489d01306SAnup Patel 61589d01306SAnup Patel /* Inject MSI to matching VCPU */ 61689d01306SAnup Patel kvm_for_each_vcpu(idx, vcpu, kvm) { 61789d01306SAnup Patel ippn = vcpu->arch.aia_context.imsic_addr >> 61889d01306SAnup Patel IMSIC_MMIO_PAGE_SHIFT; 61989d01306SAnup Patel if (ippn == tppn) { 62089d01306SAnup Patel toff = target & (IMSIC_MMIO_PAGE_SZ - 1); 62189d01306SAnup Patel return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g, 62289d01306SAnup Patel toff, iid); 62389d01306SAnup Patel } 62489d01306SAnup Patel } 62589d01306SAnup Patel 62689d01306SAnup Patel return 0; 62789d01306SAnup Patel } 62889d01306SAnup Patel 62989d01306SAnup Patel int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level) 63089d01306SAnup Patel { 63189d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 63289d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm)) 63389d01306SAnup Patel return -EBUSY; 63489d01306SAnup Patel 63589d01306SAnup Patel /* Inject interrupt level change in APLIC */ 63689d01306SAnup Patel return kvm_riscv_aia_aplic_inject(kvm, irq, level); 63789d01306SAnup Patel } 63889d01306SAnup Patel 63989d01306SAnup Patel void kvm_riscv_aia_init_vm(struct kvm *kvm) 64089d01306SAnup Patel { 64189d01306SAnup Patel struct kvm_aia *aia = &kvm->arch.aia; 64289d01306SAnup Patel 64389d01306SAnup Patel if (!kvm_riscv_aia_available()) 64489d01306SAnup Patel return; 64589d01306SAnup Patel 64689d01306SAnup Patel /* 64789d01306SAnup Patel * We don't do any memory allocations over here because these 64889d01306SAnup Patel * will be done after AIA device is initialized by the user-space. 64989d01306SAnup Patel * 65089d01306SAnup Patel * Refer, aia_init() implementation for more details. 65189d01306SAnup Patel */ 65289d01306SAnup Patel 65389d01306SAnup Patel /* Initialize default values in AIA global context */ 65489d01306SAnup Patel aia->mode = (kvm_riscv_aia_nr_hgei) ? 65589d01306SAnup Patel KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL; 65689d01306SAnup Patel aia->nr_ids = kvm_riscv_aia_max_ids - 1; 65789d01306SAnup Patel aia->nr_sources = 0; 65889d01306SAnup Patel aia->nr_group_bits = 0; 65989d01306SAnup Patel aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN; 66089d01306SAnup Patel aia->nr_hart_bits = 0; 66189d01306SAnup Patel aia->nr_guest_bits = 0; 66289d01306SAnup Patel aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR; 66389d01306SAnup Patel } 66489d01306SAnup Patel 66589d01306SAnup Patel void kvm_riscv_aia_destroy_vm(struct kvm *kvm) 66689d01306SAnup Patel { 66789d01306SAnup Patel /* Proceed only if AIA was initialized successfully */ 66889d01306SAnup Patel if (!kvm_riscv_aia_initialized(kvm)) 66989d01306SAnup Patel return; 67089d01306SAnup Patel 67189d01306SAnup Patel /* Cleanup APLIC context */ 67289d01306SAnup Patel kvm_riscv_aia_aplic_cleanup(kvm); 67389d01306SAnup Patel } 674