199cdc6c1SAnup Patel // SPDX-License-Identifier: GPL-2.0
299cdc6c1SAnup Patel /*
399cdc6c1SAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates.
499cdc6c1SAnup Patel *
599cdc6c1SAnup Patel * Authors:
699cdc6c1SAnup Patel * Anup Patel <anup.patel@wdc.com>
799cdc6c1SAnup Patel */
899cdc6c1SAnup Patel
999cdc6c1SAnup Patel #include <linux/errno.h>
1099cdc6c1SAnup Patel #include <linux/err.h>
1199cdc6c1SAnup Patel #include <linux/module.h>
1299cdc6c1SAnup Patel #include <linux/uaccess.h>
1399cdc6c1SAnup Patel #include <linux/kvm_host.h>
1499cdc6c1SAnup Patel
1599cdc6c1SAnup Patel const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
1699cdc6c1SAnup Patel KVM_GENERIC_VM_STATS()
1799cdc6c1SAnup Patel };
1899cdc6c1SAnup Patel static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
1999cdc6c1SAnup Patel sizeof(struct kvm_vm_stat) / sizeof(u64));
2099cdc6c1SAnup Patel
2199cdc6c1SAnup Patel const struct kvm_stats_header kvm_vm_stats_header = {
2299cdc6c1SAnup Patel .name_size = KVM_STATS_NAME_SIZE,
2399cdc6c1SAnup Patel .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
2499cdc6c1SAnup Patel .id_offset = sizeof(struct kvm_stats_header),
2599cdc6c1SAnup Patel .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
2699cdc6c1SAnup Patel .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
2799cdc6c1SAnup Patel sizeof(kvm_vm_stats_desc),
2899cdc6c1SAnup Patel };
2999cdc6c1SAnup Patel
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)3099cdc6c1SAnup Patel int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3199cdc6c1SAnup Patel {
3299cdc6c1SAnup Patel int r;
3399cdc6c1SAnup Patel
3426708234SAnup Patel r = kvm_riscv_gstage_alloc_pgd(kvm);
3599cdc6c1SAnup Patel if (r)
3699cdc6c1SAnup Patel return r;
3799cdc6c1SAnup Patel
3826708234SAnup Patel r = kvm_riscv_gstage_vmid_init(kvm);
39fd7bb4a2SAnup Patel if (r) {
4026708234SAnup Patel kvm_riscv_gstage_free_pgd(kvm);
41fd7bb4a2SAnup Patel return r;
42fd7bb4a2SAnup Patel }
43fd7bb4a2SAnup Patel
44*54e43320SAnup Patel kvm_riscv_aia_init_vm(kvm);
45*54e43320SAnup Patel
46cca986faSNikolay Borisov kvm_riscv_guest_timer_init(kvm);
47cca986faSNikolay Borisov
48cca986faSNikolay Borisov return 0;
4999cdc6c1SAnup Patel }
5099cdc6c1SAnup Patel
kvm_arch_destroy_vm(struct kvm * kvm)5199cdc6c1SAnup Patel void kvm_arch_destroy_vm(struct kvm *kvm)
5299cdc6c1SAnup Patel {
5327592ae8SMarc Zyngier kvm_destroy_vcpus(kvm);
54*54e43320SAnup Patel
55*54e43320SAnup Patel kvm_riscv_aia_destroy_vm(kvm);
5699cdc6c1SAnup Patel }
5799cdc6c1SAnup Patel
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irql,bool line_status)5899cdc6c1SAnup Patel int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
5999cdc6c1SAnup Patel bool line_status)
6099cdc6c1SAnup Patel {
6199cdc6c1SAnup Patel if (!irqchip_in_kernel(kvm))
6299cdc6c1SAnup Patel return -ENXIO;
639f701326SAnup Patel
6499cdc6c1SAnup Patel return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
6599cdc6c1SAnup Patel }
669955371cSAnup Patel
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)6799cdc6c1SAnup Patel int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
6899cdc6c1SAnup Patel struct kvm *kvm, int irq_source_id,
6999cdc6c1SAnup Patel int level, bool line_status)
7099cdc6c1SAnup Patel {
7199cdc6c1SAnup Patel struct kvm_msi msi;
7299cdc6c1SAnup Patel
7399cdc6c1SAnup Patel if (!level)
7499cdc6c1SAnup Patel return -1;
7537fd3ce1SVitaly Kuznetsov
7699cdc6c1SAnup Patel msi.address_lo = e->msi.address_lo;
7799cdc6c1SAnup Patel msi.address_hi = e->msi.address_hi;
7899cdc6c1SAnup Patel msi.data = e->msi.data;
7999cdc6c1SAnup Patel msi.flags = e->msi.flags;
8099cdc6c1SAnup Patel msi.devid = e->msi.devid;
8199cdc6c1SAnup Patel
8299cdc6c1SAnup Patel return kvm_riscv_aia_inject_msi(kvm, &msi);
83a457fd56SAnup Patel }
8426708234SAnup Patel
kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)85a457fd56SAnup Patel static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
8699cdc6c1SAnup Patel struct kvm *kvm, int irq_source_id,
8799cdc6c1SAnup Patel int level, bool line_status)
8899cdc6c1SAnup Patel {
8999cdc6c1SAnup Patel return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
9099cdc6c1SAnup Patel }
9199cdc6c1SAnup Patel
kvm_riscv_setup_default_irq_routing(struct kvm * kvm,u32 lines)9299cdc6c1SAnup Patel int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
9399cdc6c1SAnup Patel {
94d8708b80SThomas Huth struct kvm_irq_routing_entry *ents;
9599cdc6c1SAnup Patel int i, rc;
9699cdc6c1SAnup Patel
9799cdc6c1SAnup Patel ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
98 if (!ents)
99 return -ENOMEM;
100
101 for (i = 0; i < lines; i++) {
102 ents[i].gsi = i;
103 ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
104 ents[i].u.irqchip.irqchip = 0;
105 ents[i].u.irqchip.pin = i;
106 }
107 rc = kvm_set_irq_routing(kvm, ents, lines, 0);
108 kfree(ents);
109
110 return rc;
111 }
112
kvm_arch_can_set_irq_routing(struct kvm * kvm)113 bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
114 {
115 return irqchip_in_kernel(kvm);
116 }
117
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)118 int kvm_set_routing_entry(struct kvm *kvm,
119 struct kvm_kernel_irq_routing_entry *e,
120 const struct kvm_irq_routing_entry *ue)
121 {
122 int r = -EINVAL;
123
124 switch (ue->type) {
125 case KVM_IRQ_ROUTING_IRQCHIP:
126 e->set = kvm_riscv_set_irq;
127 e->irqchip.irqchip = ue->u.irqchip.irqchip;
128 e->irqchip.pin = ue->u.irqchip.pin;
129 if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
130 (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
131 goto out;
132 break;
133 case KVM_IRQ_ROUTING_MSI:
134 e->set = kvm_set_msi;
135 e->msi.address_lo = ue->u.msi.address_lo;
136 e->msi.address_hi = ue->u.msi.address_hi;
137 e->msi.data = ue->u.msi.data;
138 e->msi.flags = ue->flags;
139 e->msi.devid = ue->u.msi.devid;
140 break;
141 default:
142 goto out;
143 }
144 r = 0;
145 out:
146 return r;
147 }
148
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)149 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
150 struct kvm *kvm, int irq_source_id, int level,
151 bool line_status)
152 {
153 if (!level)
154 return -EWOULDBLOCK;
155
156 switch (e->type) {
157 case KVM_IRQ_ROUTING_MSI:
158 return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
159
160 case KVM_IRQ_ROUTING_IRQCHIP:
161 return kvm_riscv_set_irq(e, kvm, irq_source_id,
162 level, line_status);
163 }
164
165 return -EWOULDBLOCK;
166 }
167
kvm_arch_irqchip_in_kernel(struct kvm * kvm)168 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
169 {
170 return irqchip_in_kernel(kvm);
171 }
172
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)173 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
174 {
175 int r;
176
177 switch (ext) {
178 case KVM_CAP_IRQCHIP:
179 r = kvm_riscv_aia_available();
180 break;
181 case KVM_CAP_IOEVENTFD:
182 case KVM_CAP_DEVICE_CTRL:
183 case KVM_CAP_USER_MEMORY:
184 case KVM_CAP_SYNC_MMU:
185 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
186 case KVM_CAP_ONE_REG:
187 case KVM_CAP_READONLY_MEM:
188 case KVM_CAP_MP_STATE:
189 case KVM_CAP_IMMEDIATE_EXIT:
190 r = 1;
191 break;
192 case KVM_CAP_NR_VCPUS:
193 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
194 break;
195 case KVM_CAP_MAX_VCPUS:
196 r = KVM_MAX_VCPUS;
197 break;
198 case KVM_CAP_NR_MEMSLOTS:
199 r = KVM_USER_MEM_SLOTS;
200 break;
201 case KVM_CAP_VM_GPA_BITS:
202 r = kvm_riscv_gstage_gpa_bits();
203 break;
204 default:
205 r = 0;
206 break;
207 }
208
209 return r;
210 }
211
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)212 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
213 {
214 return -EINVAL;
215 }
216