xref: /openbmc/linux/arch/riscv/include/asm/kvm_aia.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
154e43320SAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
254e43320SAnup Patel /*
354e43320SAnup Patel  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
454e43320SAnup Patel  * Copyright (C) 2022 Ventana Micro Systems Inc.
554e43320SAnup Patel  *
654e43320SAnup Patel  * Authors:
754e43320SAnup Patel  *	Anup Patel <apatel@ventanamicro.com>
854e43320SAnup Patel  */
954e43320SAnup Patel 
1054e43320SAnup Patel #ifndef __KVM_RISCV_AIA_H
1154e43320SAnup Patel #define __KVM_RISCV_AIA_H
1254e43320SAnup Patel 
1354e43320SAnup Patel #include <linux/jump_label.h>
1454e43320SAnup Patel #include <linux/kvm_types.h>
15*2f4d58f7SAnup Patel #include <asm/csr.h>
1654e43320SAnup Patel 
1754e43320SAnup Patel struct kvm_aia {
1854e43320SAnup Patel 	/* In-kernel irqchip created */
1954e43320SAnup Patel 	bool		in_kernel;
2054e43320SAnup Patel 
2154e43320SAnup Patel 	/* In-kernel irqchip initialized */
2254e43320SAnup Patel 	bool		initialized;
2354e43320SAnup Patel 
2454e43320SAnup Patel 	/* Virtualization mode (Emulation, HW Accelerated, or Auto) */
25*2f4d58f7SAnup Patel 	u32		mode;
26*2f4d58f7SAnup Patel 
27*2f4d58f7SAnup Patel 	/* Number of MSIs */
28*2f4d58f7SAnup Patel 	u32		nr_ids;
29*2f4d58f7SAnup Patel 
30*2f4d58f7SAnup Patel 	/* Number of wired IRQs */
31*2f4d58f7SAnup Patel 	u32		nr_sources;
32*2f4d58f7SAnup Patel 
33*2f4d58f7SAnup Patel 	/* Number of group bits in IMSIC address */
34*2f4d58f7SAnup Patel 	u32		nr_group_bits;
3554e43320SAnup Patel 
36*2f4d58f7SAnup Patel 	/* Position of group bits in IMSIC address */
37*2f4d58f7SAnup Patel 	u32		nr_group_shift;
38*2f4d58f7SAnup Patel 
39*2f4d58f7SAnup Patel 	/* Number of hart bits in IMSIC address */
40*2f4d58f7SAnup Patel 	u32		nr_hart_bits;
4154e43320SAnup Patel 
4254e43320SAnup Patel 	/* Number of guest bits in IMSIC address */
4354e43320SAnup Patel 	u32		nr_guest_bits;
4454e43320SAnup Patel 
4554e43320SAnup Patel 	/* Guest physical address of APLIC */
4654e43320SAnup Patel 	gpa_t		aplic_addr;
4754e43320SAnup Patel 
4854e43320SAnup Patel 	/* Internal state of APLIC */
4954e43320SAnup Patel 	void		*aplic_state;
5054e43320SAnup Patel };
51*2f4d58f7SAnup Patel 
52*2f4d58f7SAnup Patel struct kvm_vcpu_aia_csr {
53*2f4d58f7SAnup Patel 	unsigned long vsiselect;
54*2f4d58f7SAnup Patel 	unsigned long hviprio1;
55*2f4d58f7SAnup Patel 	unsigned long hviprio2;
56*2f4d58f7SAnup Patel 	unsigned long vsieh;
57*2f4d58f7SAnup Patel 	unsigned long hviph;
58*2f4d58f7SAnup Patel 	unsigned long hviprio1h;
59*2f4d58f7SAnup Patel 	unsigned long hviprio2h;
60*2f4d58f7SAnup Patel };
61*2f4d58f7SAnup Patel 
62*2f4d58f7SAnup Patel struct kvm_vcpu_aia {
63*2f4d58f7SAnup Patel 	/* CPU AIA CSR context of Guest VCPU */
64*2f4d58f7SAnup Patel 	struct kvm_vcpu_aia_csr guest_csr;
6554e43320SAnup Patel 
6654e43320SAnup Patel 	/* CPU AIA CSR context upon Guest VCPU reset */
6754e43320SAnup Patel 	struct kvm_vcpu_aia_csr guest_reset_csr;
6854e43320SAnup Patel 
6954e43320SAnup Patel 	/* Guest physical address of IMSIC for this VCPU */
7054e43320SAnup Patel 	gpa_t		imsic_addr;
71*2f4d58f7SAnup Patel 
72*2f4d58f7SAnup Patel 	/* HART index of IMSIC extacted from guest physical address */
7354e43320SAnup Patel 	u32		hart_index;
74*2f4d58f7SAnup Patel 
75*2f4d58f7SAnup Patel 	/* Internal state of IMSIC for this VCPU */
76*2f4d58f7SAnup Patel 	void		*imsic_state;
77*2f4d58f7SAnup Patel };
7854e43320SAnup Patel 
79*2f4d58f7SAnup Patel #define KVM_RISCV_AIA_UNDEF_ADDR	(-1)
80*2f4d58f7SAnup Patel 
8154e43320SAnup Patel #define kvm_riscv_aia_initialized(k)	((k)->arch.aia.initialized)
82*2f4d58f7SAnup Patel 
8354e43320SAnup Patel #define irqchip_in_kernel(k)		((k)->arch.aia.in_kernel)
84*2f4d58f7SAnup Patel 
85*2f4d58f7SAnup Patel extern unsigned int kvm_riscv_aia_nr_hgei;
86*2f4d58f7SAnup Patel extern unsigned int kvm_riscv_aia_max_ids;
87*2f4d58f7SAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
88*2f4d58f7SAnup Patel #define kvm_riscv_aia_available() \
89*2f4d58f7SAnup Patel 	static_branch_unlikely(&kvm_riscv_aia_available)
90*2f4d58f7SAnup Patel 
91*2f4d58f7SAnup Patel extern struct kvm_device_ops kvm_riscv_aia_device_ops;
92*2f4d58f7SAnup Patel 
93*2f4d58f7SAnup Patel void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
94*2f4d58f7SAnup Patel int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
9554e43320SAnup Patel 
9654e43320SAnup Patel #define KVM_RISCV_AIA_IMSIC_TOPEI	(ISELECT_MASK + 1)
9754e43320SAnup Patel int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
9854e43320SAnup Patel 				 unsigned long *val, unsigned long new_val,
9954e43320SAnup Patel 				 unsigned long wr_mask);
10054e43320SAnup Patel int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
10154e43320SAnup Patel 				bool write, unsigned long *val);
10254e43320SAnup Patel int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
10354e43320SAnup Patel void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
10454e43320SAnup Patel int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
10554e43320SAnup Patel 				    u32 guest_index, u32 offset, u32 iid);
10654e43320SAnup Patel int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
10754e43320SAnup Patel void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
10854e43320SAnup Patel 
10954e43320SAnup Patel int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
11054e43320SAnup Patel int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
11154e43320SAnup Patel int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
11254e43320SAnup Patel int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
11354e43320SAnup Patel int kvm_riscv_aia_aplic_init(struct kvm *kvm);
11454e43320SAnup Patel void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
11554e43320SAnup Patel 
11654e43320SAnup Patel #ifdef CONFIG_32BIT
11754e43320SAnup Patel void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
11854e43320SAnup Patel void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
11954e43320SAnup Patel #else
kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu * vcpu)12054e43320SAnup Patel static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
12154e43320SAnup Patel {
12254e43320SAnup Patel }
kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu * vcpu)12354e43320SAnup Patel static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
12454e43320SAnup Patel {
12554e43320SAnup Patel }
12654e43320SAnup Patel #endif
12754e43320SAnup Patel bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
128 
129 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
130 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
131 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
132 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
133 			       unsigned long reg_num,
134 			       unsigned long *out_val);
135 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
136 			       unsigned long reg_num,
137 			       unsigned long val);
138 
139 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
140 				 unsigned int csr_num,
141 				 unsigned long *val,
142 				 unsigned long new_val,
143 				 unsigned long wr_mask);
144 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
145 				unsigned long *val, unsigned long new_val,
146 				unsigned long wr_mask);
147 #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
148 { .base = CSR_SIREG,      .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
149 { .base = CSR_STOPEI,     .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
150 
151 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
152 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
153 int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
154 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
155 
156 int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
157 				   u32 guest_index, u32 iid);
158 int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
159 int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
160 
161 void kvm_riscv_aia_init_vm(struct kvm *kvm);
162 void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
163 
164 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
165 			     void __iomem **hgei_va, phys_addr_t *hgei_pa);
166 void kvm_riscv_aia_free_hgei(int cpu, int hgei);
167 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
168 
169 void kvm_riscv_aia_enable(void);
170 void kvm_riscv_aia_disable(void);
171 int kvm_riscv_aia_init(void);
172 void kvm_riscv_aia_exit(void);
173 
174 #endif
175