xref: /openbmc/linux/arch/riscv/kvm/aia.c (revision 726ccdba)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <asm/hwcap.h>
13 
14 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
15 
16 static void aia_set_hvictl(bool ext_irq_pending)
17 {
18 	unsigned long hvictl;
19 
20 	/*
21 	 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
22 	 * no interrupt in HVICTL.
23 	 */
24 
25 	hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
26 	hvictl |= ext_irq_pending;
27 	csr_write(CSR_HVICTL, hvictl);
28 }
29 
30 #ifdef CONFIG_32BIT
31 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
32 {
33 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
34 	unsigned long mask, val;
35 
36 	if (!kvm_riscv_aia_available())
37 		return;
38 
39 	if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
40 		mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
41 		val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
42 
43 		csr->hviph &= ~mask;
44 		csr->hviph |= val;
45 	}
46 }
47 
48 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
49 {
50 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
51 
52 	if (kvm_riscv_aia_available())
53 		csr->vsieh = csr_read(CSR_VSIEH);
54 }
55 #endif
56 
57 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
58 {
59 	unsigned long seip;
60 
61 	if (!kvm_riscv_aia_available())
62 		return false;
63 
64 #ifdef CONFIG_32BIT
65 	if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
66 	    (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
67 		return true;
68 #endif
69 
70 	seip = vcpu->arch.guest_csr.vsie;
71 	seip &= (unsigned long)mask;
72 	seip &= BIT(IRQ_S_EXT);
73 
74 	if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
75 		return false;
76 
77 	return false;
78 }
79 
80 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
81 {
82 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
83 
84 	if (!kvm_riscv_aia_available())
85 		return;
86 
87 #ifdef CONFIG_32BIT
88 	csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
89 #endif
90 	aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
91 }
92 
93 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
94 {
95 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
96 
97 	if (!kvm_riscv_aia_available())
98 		return;
99 
100 	csr_write(CSR_VSISELECT, csr->vsiselect);
101 	csr_write(CSR_HVIPRIO1, csr->hviprio1);
102 	csr_write(CSR_HVIPRIO2, csr->hviprio2);
103 #ifdef CONFIG_32BIT
104 	csr_write(CSR_VSIEH, csr->vsieh);
105 	csr_write(CSR_HVIPH, csr->hviph);
106 	csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
107 	csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
108 #endif
109 }
110 
111 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
112 {
113 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
114 
115 	if (!kvm_riscv_aia_available())
116 		return;
117 
118 	csr->vsiselect = csr_read(CSR_VSISELECT);
119 	csr->hviprio1 = csr_read(CSR_HVIPRIO1);
120 	csr->hviprio2 = csr_read(CSR_HVIPRIO2);
121 #ifdef CONFIG_32BIT
122 	csr->vsieh = csr_read(CSR_VSIEH);
123 	csr->hviph = csr_read(CSR_HVIPH);
124 	csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
125 	csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
126 #endif
127 }
128 
129 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
130 			       unsigned long reg_num,
131 			       unsigned long *out_val)
132 {
133 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
134 
135 	if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
136 		return -EINVAL;
137 
138 	*out_val = 0;
139 	if (kvm_riscv_aia_available())
140 		*out_val = ((unsigned long *)csr)[reg_num];
141 
142 	return 0;
143 }
144 
145 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
146 			       unsigned long reg_num,
147 			       unsigned long val)
148 {
149 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
150 
151 	if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
152 		return -EINVAL;
153 
154 	if (kvm_riscv_aia_available()) {
155 		((unsigned long *)csr)[reg_num] = val;
156 
157 #ifdef CONFIG_32BIT
158 		if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
159 			WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
160 #endif
161 	}
162 
163 	return 0;
164 }
165 
166 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
167 				 unsigned int csr_num,
168 				 unsigned long *val,
169 				 unsigned long new_val,
170 				 unsigned long wr_mask)
171 {
172 	/* If AIA not available then redirect trap */
173 	if (!kvm_riscv_aia_available())
174 		return KVM_INSN_ILLEGAL_TRAP;
175 
176 	/* If AIA not initialized then forward to user space */
177 	if (!kvm_riscv_aia_initialized(vcpu->kvm))
178 		return KVM_INSN_EXIT_TO_USER_SPACE;
179 
180 	return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
181 					    val, new_val, wr_mask);
182 }
183 
184 /*
185  * External IRQ priority always read-only zero. This means default
186  * priority order  is always preferred for external IRQs unless
187  * HVICTL.IID == 9 and HVICTL.IPRIO != 0
188  */
189 static int aia_irq2bitpos[] = {
190 0,     8,   -1,   -1,   16,   24,   -1,   -1, /* 0 - 7 */
191 32,   -1,   -1,   -1,   -1,   40,   48,   56, /* 8 - 15 */
192 64,   72,   80,   88,   96,  104,  112,  120, /* 16 - 23 */
193 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24 - 31 */
194 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 32 - 39 */
195 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 40 - 47 */
196 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48 - 55 */
197 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 56 - 63 */
198 };
199 
200 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
201 {
202 	unsigned long hviprio;
203 	int bitpos = aia_irq2bitpos[irq];
204 
205 	if (bitpos < 0)
206 		return 0;
207 
208 	switch (bitpos / BITS_PER_LONG) {
209 	case 0:
210 		hviprio = csr_read(CSR_HVIPRIO1);
211 		break;
212 	case 1:
213 #ifndef CONFIG_32BIT
214 		hviprio = csr_read(CSR_HVIPRIO2);
215 		break;
216 #else
217 		hviprio = csr_read(CSR_HVIPRIO1H);
218 		break;
219 	case 2:
220 		hviprio = csr_read(CSR_HVIPRIO2);
221 		break;
222 	case 3:
223 		hviprio = csr_read(CSR_HVIPRIO2H);
224 		break;
225 #endif
226 	default:
227 		return 0;
228 	}
229 
230 	return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
231 }
232 
233 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
234 {
235 	unsigned long hviprio;
236 	int bitpos = aia_irq2bitpos[irq];
237 
238 	if (bitpos < 0)
239 		return;
240 
241 	switch (bitpos / BITS_PER_LONG) {
242 	case 0:
243 		hviprio = csr_read(CSR_HVIPRIO1);
244 		break;
245 	case 1:
246 #ifndef CONFIG_32BIT
247 		hviprio = csr_read(CSR_HVIPRIO2);
248 		break;
249 #else
250 		hviprio = csr_read(CSR_HVIPRIO1H);
251 		break;
252 	case 2:
253 		hviprio = csr_read(CSR_HVIPRIO2);
254 		break;
255 	case 3:
256 		hviprio = csr_read(CSR_HVIPRIO2H);
257 		break;
258 #endif
259 	default:
260 		return;
261 	}
262 
263 	hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
264 	hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
265 
266 	switch (bitpos / BITS_PER_LONG) {
267 	case 0:
268 		csr_write(CSR_HVIPRIO1, hviprio);
269 		break;
270 	case 1:
271 #ifndef CONFIG_32BIT
272 		csr_write(CSR_HVIPRIO2, hviprio);
273 		break;
274 #else
275 		csr_write(CSR_HVIPRIO1H, hviprio);
276 		break;
277 	case 2:
278 		csr_write(CSR_HVIPRIO2, hviprio);
279 		break;
280 	case 3:
281 		csr_write(CSR_HVIPRIO2H, hviprio);
282 		break;
283 #endif
284 	default:
285 		return;
286 	}
287 }
288 
289 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
290 			 unsigned long *val, unsigned long new_val,
291 			 unsigned long wr_mask)
292 {
293 	int i, first_irq, nirqs;
294 	unsigned long old_val;
295 	u8 prio;
296 
297 #ifndef CONFIG_32BIT
298 	if (isel & 0x1)
299 		return KVM_INSN_ILLEGAL_TRAP;
300 #endif
301 
302 	nirqs = 4 * (BITS_PER_LONG / 32);
303 	first_irq = (isel - ISELECT_IPRIO0) * 4;
304 
305 	old_val = 0;
306 	for (i = 0; i < nirqs; i++) {
307 		prio = aia_get_iprio8(vcpu, first_irq + i);
308 		old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
309 	}
310 
311 	if (val)
312 		*val = old_val;
313 
314 	if (wr_mask) {
315 		new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
316 		for (i = 0; i < nirqs; i++) {
317 			prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
318 				TOPI_IPRIO_MASK;
319 			aia_set_iprio8(vcpu, first_irq + i, prio);
320 		}
321 	}
322 
323 	return KVM_INSN_CONTINUE_NEXT_SEPC;
324 }
325 
326 #define IMSIC_FIRST	0x70
327 #define IMSIC_LAST	0xff
328 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
329 				unsigned long *val, unsigned long new_val,
330 				unsigned long wr_mask)
331 {
332 	unsigned int isel;
333 
334 	/* If AIA not available then redirect trap */
335 	if (!kvm_riscv_aia_available())
336 		return KVM_INSN_ILLEGAL_TRAP;
337 
338 	/* First try to emulate in kernel space */
339 	isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
340 	if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
341 		return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
342 	else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
343 		 kvm_riscv_aia_initialized(vcpu->kvm))
344 		return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
345 						    wr_mask);
346 
347 	/* We can't handle it here so redirect to user space */
348 	return KVM_INSN_EXIT_TO_USER_SPACE;
349 }
350 
351 void kvm_riscv_aia_enable(void)
352 {
353 	if (!kvm_riscv_aia_available())
354 		return;
355 
356 	aia_set_hvictl(false);
357 	csr_write(CSR_HVIPRIO1, 0x0);
358 	csr_write(CSR_HVIPRIO2, 0x0);
359 #ifdef CONFIG_32BIT
360 	csr_write(CSR_HVIPH, 0x0);
361 	csr_write(CSR_HIDELEGH, 0x0);
362 	csr_write(CSR_HVIPRIO1H, 0x0);
363 	csr_write(CSR_HVIPRIO2H, 0x0);
364 #endif
365 }
366 
367 void kvm_riscv_aia_disable(void)
368 {
369 	if (!kvm_riscv_aia_available())
370 		return;
371 
372 	aia_set_hvictl(false);
373 }
374 
375 int kvm_riscv_aia_init(void)
376 {
377 	if (!riscv_isa_extension_available(NULL, SxAIA))
378 		return -ENODEV;
379 
380 	/* Enable KVM AIA support */
381 	static_branch_enable(&kvm_riscv_aia_available);
382 
383 	return 0;
384 }
385 
386 void kvm_riscv_aia_exit(void)
387 {
388 }
389