1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/kvm_host.h>
15 #include <linux/percpu.h>
16 #include <linux/spinlock.h>
17 #include <asm/hwcap.h>
18 #include <asm/kvm_aia_imsic.h>
19
20 struct aia_hgei_control {
21 raw_spinlock_t lock;
22 unsigned long free_bitmap;
23 struct kvm_vcpu *owners[BITS_PER_LONG];
24 };
25 static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
26 static int hgei_parent_irq;
27
28 unsigned int kvm_riscv_aia_nr_hgei;
29 unsigned int kvm_riscv_aia_max_ids;
30 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
31
aia_find_hgei(struct kvm_vcpu * owner)32 static int aia_find_hgei(struct kvm_vcpu *owner)
33 {
34 int i, hgei;
35 unsigned long flags;
36 struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
37
38 raw_spin_lock_irqsave(&hgctrl->lock, flags);
39
40 hgei = -1;
41 for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
42 if (hgctrl->owners[i] == owner) {
43 hgei = i;
44 break;
45 }
46 }
47
48 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
49
50 put_cpu_ptr(&aia_hgei);
51 return hgei;
52 }
53
aia_set_hvictl(bool ext_irq_pending)54 static void aia_set_hvictl(bool ext_irq_pending)
55 {
56 unsigned long hvictl;
57
58 /*
59 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
60 * no interrupt in HVICTL.
61 */
62
63 hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
64 hvictl |= ext_irq_pending;
65 csr_write(CSR_HVICTL, hvictl);
66 }
67
68 #ifdef CONFIG_32BIT
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)69 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
70 {
71 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
72 unsigned long mask, val;
73
74 if (!kvm_riscv_aia_available())
75 return;
76
77 if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
78 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
79 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
80
81 csr->hviph &= ~mask;
82 csr->hviph |= val;
83 }
84 }
85
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)86 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
87 {
88 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
89
90 if (kvm_riscv_aia_available())
91 csr->vsieh = csr_read(CSR_VSIEH);
92 }
93 #endif
94
kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu * vcpu,u64 mask)95 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
96 {
97 int hgei;
98 unsigned long seip;
99
100 if (!kvm_riscv_aia_available())
101 return false;
102
103 #ifdef CONFIG_32BIT
104 if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
105 (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
106 return true;
107 #endif
108
109 seip = vcpu->arch.guest_csr.vsie;
110 seip &= (unsigned long)mask;
111 seip &= BIT(IRQ_S_EXT);
112
113 if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
114 return false;
115
116 hgei = aia_find_hgei(vcpu);
117 if (hgei > 0)
118 return !!(csr_read(CSR_HGEIP) & BIT(hgei));
119
120 return false;
121 }
122
kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu * vcpu)123 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
124 {
125 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
126
127 if (!kvm_riscv_aia_available())
128 return;
129
130 #ifdef CONFIG_32BIT
131 csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
132 #endif
133 aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
134 }
135
kvm_riscv_vcpu_aia_load(struct kvm_vcpu * vcpu,int cpu)136 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
137 {
138 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
139
140 if (!kvm_riscv_aia_available())
141 return;
142
143 csr_write(CSR_VSISELECT, csr->vsiselect);
144 csr_write(CSR_HVIPRIO1, csr->hviprio1);
145 csr_write(CSR_HVIPRIO2, csr->hviprio2);
146 #ifdef CONFIG_32BIT
147 csr_write(CSR_VSIEH, csr->vsieh);
148 csr_write(CSR_HVIPH, csr->hviph);
149 csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
150 csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
151 #endif
152 }
153
kvm_riscv_vcpu_aia_put(struct kvm_vcpu * vcpu)154 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
155 {
156 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
157
158 if (!kvm_riscv_aia_available())
159 return;
160
161 csr->vsiselect = csr_read(CSR_VSISELECT);
162 csr->hviprio1 = csr_read(CSR_HVIPRIO1);
163 csr->hviprio2 = csr_read(CSR_HVIPRIO2);
164 #ifdef CONFIG_32BIT
165 csr->vsieh = csr_read(CSR_VSIEH);
166 csr->hviph = csr_read(CSR_HVIPH);
167 csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
168 csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
169 #endif
170 }
171
kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)172 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
173 unsigned long reg_num,
174 unsigned long *out_val)
175 {
176 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
177
178 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
179 return -ENOENT;
180
181 *out_val = 0;
182 if (kvm_riscv_aia_available())
183 *out_val = ((unsigned long *)csr)[reg_num];
184
185 return 0;
186 }
187
kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long val)188 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
189 unsigned long reg_num,
190 unsigned long val)
191 {
192 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
193
194 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
195 return -ENOENT;
196
197 if (kvm_riscv_aia_available()) {
198 ((unsigned long *)csr)[reg_num] = val;
199
200 #ifdef CONFIG_32BIT
201 if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
202 WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
203 #endif
204 }
205
206 return 0;
207 }
208
kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)209 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
210 unsigned int csr_num,
211 unsigned long *val,
212 unsigned long new_val,
213 unsigned long wr_mask)
214 {
215 /* If AIA not available then redirect trap */
216 if (!kvm_riscv_aia_available())
217 return KVM_INSN_ILLEGAL_TRAP;
218
219 /* If AIA not initialized then forward to user space */
220 if (!kvm_riscv_aia_initialized(vcpu->kvm))
221 return KVM_INSN_EXIT_TO_USER_SPACE;
222
223 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
224 val, new_val, wr_mask);
225 }
226
227 /*
228 * External IRQ priority always read-only zero. This means default
229 * priority order is always preferred for external IRQs unless
230 * HVICTL.IID == 9 and HVICTL.IPRIO != 0
231 */
232 static int aia_irq2bitpos[] = {
233 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
234 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
235 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
236 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
241 };
242
aia_get_iprio8(struct kvm_vcpu * vcpu,unsigned int irq)243 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
244 {
245 unsigned long hviprio;
246 int bitpos = aia_irq2bitpos[irq];
247
248 if (bitpos < 0)
249 return 0;
250
251 switch (bitpos / BITS_PER_LONG) {
252 case 0:
253 hviprio = csr_read(CSR_HVIPRIO1);
254 break;
255 case 1:
256 #ifndef CONFIG_32BIT
257 hviprio = csr_read(CSR_HVIPRIO2);
258 break;
259 #else
260 hviprio = csr_read(CSR_HVIPRIO1H);
261 break;
262 case 2:
263 hviprio = csr_read(CSR_HVIPRIO2);
264 break;
265 case 3:
266 hviprio = csr_read(CSR_HVIPRIO2H);
267 break;
268 #endif
269 default:
270 return 0;
271 }
272
273 return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
274 }
275
aia_set_iprio8(struct kvm_vcpu * vcpu,unsigned int irq,u8 prio)276 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
277 {
278 unsigned long hviprio;
279 int bitpos = aia_irq2bitpos[irq];
280
281 if (bitpos < 0)
282 return;
283
284 switch (bitpos / BITS_PER_LONG) {
285 case 0:
286 hviprio = csr_read(CSR_HVIPRIO1);
287 break;
288 case 1:
289 #ifndef CONFIG_32BIT
290 hviprio = csr_read(CSR_HVIPRIO2);
291 break;
292 #else
293 hviprio = csr_read(CSR_HVIPRIO1H);
294 break;
295 case 2:
296 hviprio = csr_read(CSR_HVIPRIO2);
297 break;
298 case 3:
299 hviprio = csr_read(CSR_HVIPRIO2H);
300 break;
301 #endif
302 default:
303 return;
304 }
305
306 hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
307 hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
308
309 switch (bitpos / BITS_PER_LONG) {
310 case 0:
311 csr_write(CSR_HVIPRIO1, hviprio);
312 break;
313 case 1:
314 #ifndef CONFIG_32BIT
315 csr_write(CSR_HVIPRIO2, hviprio);
316 break;
317 #else
318 csr_write(CSR_HVIPRIO1H, hviprio);
319 break;
320 case 2:
321 csr_write(CSR_HVIPRIO2, hviprio);
322 break;
323 case 3:
324 csr_write(CSR_HVIPRIO2H, hviprio);
325 break;
326 #endif
327 default:
328 return;
329 }
330 }
331
aia_rmw_iprio(struct kvm_vcpu * vcpu,unsigned int isel,unsigned long * val,unsigned long new_val,unsigned long wr_mask)332 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
333 unsigned long *val, unsigned long new_val,
334 unsigned long wr_mask)
335 {
336 int i, first_irq, nirqs;
337 unsigned long old_val;
338 u8 prio;
339
340 #ifndef CONFIG_32BIT
341 if (isel & 0x1)
342 return KVM_INSN_ILLEGAL_TRAP;
343 #endif
344
345 nirqs = 4 * (BITS_PER_LONG / 32);
346 first_irq = (isel - ISELECT_IPRIO0) * 4;
347
348 old_val = 0;
349 for (i = 0; i < nirqs; i++) {
350 prio = aia_get_iprio8(vcpu, first_irq + i);
351 old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
352 }
353
354 if (val)
355 *val = old_val;
356
357 if (wr_mask) {
358 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
359 for (i = 0; i < nirqs; i++) {
360 prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
361 TOPI_IPRIO_MASK;
362 aia_set_iprio8(vcpu, first_irq + i, prio);
363 }
364 }
365
366 return KVM_INSN_CONTINUE_NEXT_SEPC;
367 }
368
kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)369 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
370 unsigned long *val, unsigned long new_val,
371 unsigned long wr_mask)
372 {
373 unsigned int isel;
374
375 /* If AIA not available then redirect trap */
376 if (!kvm_riscv_aia_available())
377 return KVM_INSN_ILLEGAL_TRAP;
378
379 /* First try to emulate in kernel space */
380 isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
381 if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
382 return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
383 else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
384 kvm_riscv_aia_initialized(vcpu->kvm))
385 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
386 wr_mask);
387
388 /* We can't handle it here so redirect to user space */
389 return KVM_INSN_EXIT_TO_USER_SPACE;
390 }
391
kvm_riscv_aia_alloc_hgei(int cpu,struct kvm_vcpu * owner,void __iomem ** hgei_va,phys_addr_t * hgei_pa)392 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
393 void __iomem **hgei_va, phys_addr_t *hgei_pa)
394 {
395 int ret = -ENOENT;
396 unsigned long flags;
397 struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
398
399 if (!kvm_riscv_aia_available() || !hgctrl)
400 return -ENODEV;
401
402 raw_spin_lock_irqsave(&hgctrl->lock, flags);
403
404 if (hgctrl->free_bitmap) {
405 ret = __ffs(hgctrl->free_bitmap);
406 hgctrl->free_bitmap &= ~BIT(ret);
407 hgctrl->owners[ret] = owner;
408 }
409
410 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
411
412 /* TODO: To be updated later by AIA IMSIC HW guest file support */
413 if (hgei_va)
414 *hgei_va = NULL;
415 if (hgei_pa)
416 *hgei_pa = 0;
417
418 return ret;
419 }
420
kvm_riscv_aia_free_hgei(int cpu,int hgei)421 void kvm_riscv_aia_free_hgei(int cpu, int hgei)
422 {
423 unsigned long flags;
424 struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
425
426 if (!kvm_riscv_aia_available() || !hgctrl)
427 return;
428
429 raw_spin_lock_irqsave(&hgctrl->lock, flags);
430
431 if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
432 if (!(hgctrl->free_bitmap & BIT(hgei))) {
433 hgctrl->free_bitmap |= BIT(hgei);
434 hgctrl->owners[hgei] = NULL;
435 }
436 }
437
438 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
439 }
440
kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu * owner,bool enable)441 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
442 {
443 int hgei;
444
445 if (!kvm_riscv_aia_available())
446 return;
447
448 hgei = aia_find_hgei(owner);
449 if (hgei > 0) {
450 if (enable)
451 csr_set(CSR_HGEIE, BIT(hgei));
452 else
453 csr_clear(CSR_HGEIE, BIT(hgei));
454 }
455 }
456
hgei_interrupt(int irq,void * dev_id)457 static irqreturn_t hgei_interrupt(int irq, void *dev_id)
458 {
459 int i;
460 unsigned long hgei_mask, flags;
461 struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
462
463 hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
464 csr_clear(CSR_HGEIE, hgei_mask);
465
466 raw_spin_lock_irqsave(&hgctrl->lock, flags);
467
468 for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
469 if (hgctrl->owners[i])
470 kvm_vcpu_kick(hgctrl->owners[i]);
471 }
472
473 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
474
475 put_cpu_ptr(&aia_hgei);
476 return IRQ_HANDLED;
477 }
478
aia_hgei_init(void)479 static int aia_hgei_init(void)
480 {
481 int cpu, rc;
482 struct irq_domain *domain;
483 struct aia_hgei_control *hgctrl;
484
485 /* Initialize per-CPU guest external interrupt line management */
486 for_each_possible_cpu(cpu) {
487 hgctrl = per_cpu_ptr(&aia_hgei, cpu);
488 raw_spin_lock_init(&hgctrl->lock);
489 if (kvm_riscv_aia_nr_hgei) {
490 hgctrl->free_bitmap =
491 BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
492 hgctrl->free_bitmap &= ~BIT(0);
493 } else
494 hgctrl->free_bitmap = 0;
495 }
496
497 /* Find INTC irq domain */
498 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
499 DOMAIN_BUS_ANY);
500 if (!domain) {
501 kvm_err("unable to find INTC domain\n");
502 return -ENOENT;
503 }
504
505 /* Map per-CPU SGEI interrupt from INTC domain */
506 hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
507 if (!hgei_parent_irq) {
508 kvm_err("unable to map SGEI IRQ\n");
509 return -ENOMEM;
510 }
511
512 /* Request per-CPU SGEI interrupt */
513 rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
514 "riscv-kvm", &aia_hgei);
515 if (rc) {
516 kvm_err("failed to request SGEI IRQ\n");
517 return rc;
518 }
519
520 return 0;
521 }
522
aia_hgei_exit(void)523 static void aia_hgei_exit(void)
524 {
525 /* Free per-CPU SGEI interrupt */
526 free_percpu_irq(hgei_parent_irq, &aia_hgei);
527 }
528
kvm_riscv_aia_enable(void)529 void kvm_riscv_aia_enable(void)
530 {
531 if (!kvm_riscv_aia_available())
532 return;
533
534 aia_set_hvictl(false);
535 csr_write(CSR_HVIPRIO1, 0x0);
536 csr_write(CSR_HVIPRIO2, 0x0);
537 #ifdef CONFIG_32BIT
538 csr_write(CSR_HVIPH, 0x0);
539 csr_write(CSR_HIDELEGH, 0x0);
540 csr_write(CSR_HVIPRIO1H, 0x0);
541 csr_write(CSR_HVIPRIO2H, 0x0);
542 #endif
543
544 /* Enable per-CPU SGEI interrupt */
545 enable_percpu_irq(hgei_parent_irq,
546 irq_get_trigger_type(hgei_parent_irq));
547 csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
548 }
549
kvm_riscv_aia_disable(void)550 void kvm_riscv_aia_disable(void)
551 {
552 int i;
553 unsigned long flags;
554 struct kvm_vcpu *vcpu;
555 struct aia_hgei_control *hgctrl;
556
557 if (!kvm_riscv_aia_available())
558 return;
559 hgctrl = get_cpu_ptr(&aia_hgei);
560
561 /* Disable per-CPU SGEI interrupt */
562 csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
563 disable_percpu_irq(hgei_parent_irq);
564
565 aia_set_hvictl(false);
566
567 raw_spin_lock_irqsave(&hgctrl->lock, flags);
568
569 for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
570 vcpu = hgctrl->owners[i];
571 if (!vcpu)
572 continue;
573
574 /*
575 * We release hgctrl->lock before notifying IMSIC
576 * so that we don't have lock ordering issues.
577 */
578 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
579
580 /* Notify IMSIC */
581 kvm_riscv_vcpu_aia_imsic_release(vcpu);
582
583 /*
584 * Wakeup VCPU if it was blocked so that it can
585 * run on other HARTs
586 */
587 if (csr_read(CSR_HGEIE) & BIT(i)) {
588 csr_clear(CSR_HGEIE, BIT(i));
589 kvm_vcpu_kick(vcpu);
590 }
591
592 raw_spin_lock_irqsave(&hgctrl->lock, flags);
593 }
594
595 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
596
597 put_cpu_ptr(&aia_hgei);
598 }
599
kvm_riscv_aia_init(void)600 int kvm_riscv_aia_init(void)
601 {
602 int rc;
603
604 if (!riscv_isa_extension_available(NULL, SxAIA))
605 return -ENODEV;
606
607 /* Figure-out number of bits in HGEIE */
608 csr_write(CSR_HGEIE, -1UL);
609 kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
610 csr_write(CSR_HGEIE, 0);
611 if (kvm_riscv_aia_nr_hgei)
612 kvm_riscv_aia_nr_hgei--;
613
614 /*
615 * Number of usable HGEI lines should be minimum of per-HART
616 * IMSIC guest files and number of bits in HGEIE
617 *
618 * TODO: To be updated later by AIA IMSIC HW guest file support
619 */
620 kvm_riscv_aia_nr_hgei = 0;
621
622 /*
623 * Find number of guest MSI IDs
624 *
625 * TODO: To be updated later by AIA IMSIC HW guest file support
626 */
627 kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
628
629 /* Initialize guest external interrupt line management */
630 rc = aia_hgei_init();
631 if (rc)
632 return rc;
633
634 /* Register device operations */
635 rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
636 KVM_DEV_TYPE_RISCV_AIA);
637 if (rc) {
638 aia_hgei_exit();
639 return rc;
640 }
641
642 /* Enable KVM AIA support */
643 static_branch_enable(&kvm_riscv_aia_available);
644
645 return 0;
646 }
647
kvm_riscv_aia_exit(void)648 void kvm_riscv_aia_exit(void)
649 {
650 if (!kvm_riscv_aia_available())
651 return;
652
653 /* Unregister device operations */
654 kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
655
656 /* Cleanup the HGEI state */
657 aia_hgei_exit();
658 }
659