1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19
20 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
21
22 #define KVM_ISA_EXT_ARR(ext) \
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 /* Single letter extensions (alphabetically sorted) */
28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 /* Multi letter extensions (alphabetically sorted) */
37 KVM_ISA_EXT_ARR(SSAIA),
38 KVM_ISA_EXT_ARR(SSTC),
39 KVM_ISA_EXT_ARR(SVINVAL),
40 KVM_ISA_EXT_ARR(SVNAPOT),
41 KVM_ISA_EXT_ARR(SVPBMT),
42 KVM_ISA_EXT_ARR(ZBA),
43 KVM_ISA_EXT_ARR(ZBB),
44 KVM_ISA_EXT_ARR(ZBS),
45 KVM_ISA_EXT_ARR(ZICBOM),
46 KVM_ISA_EXT_ARR(ZICBOZ),
47 KVM_ISA_EXT_ARR(ZICNTR),
48 KVM_ISA_EXT_ARR(ZICSR),
49 KVM_ISA_EXT_ARR(ZIFENCEI),
50 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
51 KVM_ISA_EXT_ARR(ZIHPM),
52 };
53
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)54 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
55 {
56 unsigned long i;
57
58 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
59 if (kvm_isa_ext_arr[i] == base_ext)
60 return i;
61 }
62
63 return KVM_RISCV_ISA_EXT_MAX;
64 }
65
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)66 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
67 {
68 switch (ext) {
69 case KVM_RISCV_ISA_EXT_H:
70 return false;
71 case KVM_RISCV_ISA_EXT_V:
72 return riscv_v_vstate_ctrl_user_allowed();
73 default:
74 break;
75 }
76
77 return true;
78 }
79
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)80 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
81 {
82 switch (ext) {
83 case KVM_RISCV_ISA_EXT_A:
84 case KVM_RISCV_ISA_EXT_C:
85 case KVM_RISCV_ISA_EXT_I:
86 case KVM_RISCV_ISA_EXT_M:
87 case KVM_RISCV_ISA_EXT_SSAIA:
88 case KVM_RISCV_ISA_EXT_SSTC:
89 case KVM_RISCV_ISA_EXT_SVINVAL:
90 case KVM_RISCV_ISA_EXT_SVNAPOT:
91 case KVM_RISCV_ISA_EXT_ZBA:
92 case KVM_RISCV_ISA_EXT_ZBB:
93 case KVM_RISCV_ISA_EXT_ZBS:
94 case KVM_RISCV_ISA_EXT_ZICNTR:
95 case KVM_RISCV_ISA_EXT_ZICSR:
96 case KVM_RISCV_ISA_EXT_ZIFENCEI:
97 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
98 case KVM_RISCV_ISA_EXT_ZIHPM:
99 return false;
100 default:
101 break;
102 }
103
104 return true;
105 }
106
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)107 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
108 {
109 unsigned long host_isa, i;
110
111 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
112 host_isa = kvm_isa_ext_arr[i];
113 if (__riscv_isa_extension_available(NULL, host_isa) &&
114 kvm_riscv_vcpu_isa_enable_allowed(i))
115 set_bit(host_isa, vcpu->arch.isa);
116 }
117 }
118
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)119 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
120 const struct kvm_one_reg *reg)
121 {
122 unsigned long __user *uaddr =
123 (unsigned long __user *)(unsigned long)reg->addr;
124 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
125 KVM_REG_SIZE_MASK |
126 KVM_REG_RISCV_CONFIG);
127 unsigned long reg_val;
128
129 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
130 return -EINVAL;
131
132 switch (reg_num) {
133 case KVM_REG_RISCV_CONFIG_REG(isa):
134 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
135 break;
136 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
137 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
138 return -ENOENT;
139 reg_val = riscv_cbom_block_size;
140 break;
141 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
142 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
143 return -ENOENT;
144 reg_val = riscv_cboz_block_size;
145 break;
146 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
147 reg_val = vcpu->arch.mvendorid;
148 break;
149 case KVM_REG_RISCV_CONFIG_REG(marchid):
150 reg_val = vcpu->arch.marchid;
151 break;
152 case KVM_REG_RISCV_CONFIG_REG(mimpid):
153 reg_val = vcpu->arch.mimpid;
154 break;
155 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
156 reg_val = satp_mode >> SATP_MODE_SHIFT;
157 break;
158 default:
159 return -ENOENT;
160 }
161
162 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
163 return -EFAULT;
164
165 return 0;
166 }
167
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)168 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
169 const struct kvm_one_reg *reg)
170 {
171 unsigned long __user *uaddr =
172 (unsigned long __user *)(unsigned long)reg->addr;
173 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
174 KVM_REG_SIZE_MASK |
175 KVM_REG_RISCV_CONFIG);
176 unsigned long i, isa_ext, reg_val;
177
178 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
179 return -EINVAL;
180
181 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
182 return -EFAULT;
183
184 switch (reg_num) {
185 case KVM_REG_RISCV_CONFIG_REG(isa):
186 /*
187 * This ONE REG interface is only defined for
188 * single letter extensions.
189 */
190 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
191 return -EINVAL;
192
193 /*
194 * Return early (i.e. do nothing) if reg_val is the same
195 * value retrievable via kvm_riscv_vcpu_get_reg_config().
196 */
197 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
198 break;
199
200 if (!vcpu->arch.ran_atleast_once) {
201 /* Ignore the enable/disable request for certain extensions */
202 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
203 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
204 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
205 reg_val &= ~BIT(i);
206 continue;
207 }
208 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
209 if (reg_val & BIT(i))
210 reg_val &= ~BIT(i);
211 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
212 if (!(reg_val & BIT(i)))
213 reg_val |= BIT(i);
214 }
215 reg_val &= riscv_isa_extension_base(NULL);
216 /* Do not modify anything beyond single letter extensions */
217 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
218 (reg_val & KVM_RISCV_BASE_ISA_MASK);
219 vcpu->arch.isa[0] = reg_val;
220 kvm_riscv_vcpu_fp_reset(vcpu);
221 } else {
222 return -EBUSY;
223 }
224 break;
225 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
226 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
227 return -ENOENT;
228 if (reg_val != riscv_cbom_block_size)
229 return -EINVAL;
230 break;
231 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
233 return -ENOENT;
234 if (reg_val != riscv_cboz_block_size)
235 return -EINVAL;
236 break;
237 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
238 if (reg_val == vcpu->arch.mvendorid)
239 break;
240 if (!vcpu->arch.ran_atleast_once)
241 vcpu->arch.mvendorid = reg_val;
242 else
243 return -EBUSY;
244 break;
245 case KVM_REG_RISCV_CONFIG_REG(marchid):
246 if (reg_val == vcpu->arch.marchid)
247 break;
248 if (!vcpu->arch.ran_atleast_once)
249 vcpu->arch.marchid = reg_val;
250 else
251 return -EBUSY;
252 break;
253 case KVM_REG_RISCV_CONFIG_REG(mimpid):
254 if (reg_val == vcpu->arch.mimpid)
255 break;
256 if (!vcpu->arch.ran_atleast_once)
257 vcpu->arch.mimpid = reg_val;
258 else
259 return -EBUSY;
260 break;
261 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
262 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
263 return -EINVAL;
264 break;
265 default:
266 return -ENOENT;
267 }
268
269 return 0;
270 }
271
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)272 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
273 const struct kvm_one_reg *reg)
274 {
275 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
276 unsigned long __user *uaddr =
277 (unsigned long __user *)(unsigned long)reg->addr;
278 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
279 KVM_REG_SIZE_MASK |
280 KVM_REG_RISCV_CORE);
281 unsigned long reg_val;
282
283 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
284 return -EINVAL;
285 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
286 return -ENOENT;
287
288 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
289 reg_val = cntx->sepc;
290 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
291 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
292 reg_val = ((unsigned long *)cntx)[reg_num];
293 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
294 reg_val = (cntx->sstatus & SR_SPP) ?
295 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
296 else
297 return -ENOENT;
298
299 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
300 return -EFAULT;
301
302 return 0;
303 }
304
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)305 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
306 const struct kvm_one_reg *reg)
307 {
308 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
309 unsigned long __user *uaddr =
310 (unsigned long __user *)(unsigned long)reg->addr;
311 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
312 KVM_REG_SIZE_MASK |
313 KVM_REG_RISCV_CORE);
314 unsigned long reg_val;
315
316 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
317 return -EINVAL;
318 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
319 return -ENOENT;
320
321 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
322 return -EFAULT;
323
324 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
325 cntx->sepc = reg_val;
326 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
327 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
328 ((unsigned long *)cntx)[reg_num] = reg_val;
329 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
330 if (reg_val == KVM_RISCV_MODE_S)
331 cntx->sstatus |= SR_SPP;
332 else
333 cntx->sstatus &= ~SR_SPP;
334 } else
335 return -ENOENT;
336
337 return 0;
338 }
339
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)340 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
341 unsigned long reg_num,
342 unsigned long *out_val)
343 {
344 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
345
346 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
347 return -ENOENT;
348
349 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
350 kvm_riscv_vcpu_flush_interrupts(vcpu);
351 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
352 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
353 } else
354 *out_val = ((unsigned long *)csr)[reg_num];
355
356 return 0;
357 }
358
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)359 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
360 unsigned long reg_num,
361 unsigned long reg_val)
362 {
363 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
364
365 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
366 return -ENOENT;
367
368 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
369 reg_val &= VSIP_VALID_MASK;
370 reg_val <<= VSIP_TO_HVIP_SHIFT;
371 }
372
373 ((unsigned long *)csr)[reg_num] = reg_val;
374
375 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
376 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
377
378 return 0;
379 }
380
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)381 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
382 const struct kvm_one_reg *reg)
383 {
384 int rc;
385 unsigned long __user *uaddr =
386 (unsigned long __user *)(unsigned long)reg->addr;
387 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
388 KVM_REG_SIZE_MASK |
389 KVM_REG_RISCV_CSR);
390 unsigned long reg_val, reg_subtype;
391
392 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
393 return -EINVAL;
394
395 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
396 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
397 switch (reg_subtype) {
398 case KVM_REG_RISCV_CSR_GENERAL:
399 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
400 break;
401 case KVM_REG_RISCV_CSR_AIA:
402 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
403 break;
404 default:
405 rc = -ENOENT;
406 break;
407 }
408 if (rc)
409 return rc;
410
411 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
412 return -EFAULT;
413
414 return 0;
415 }
416
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)417 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
418 const struct kvm_one_reg *reg)
419 {
420 int rc;
421 unsigned long __user *uaddr =
422 (unsigned long __user *)(unsigned long)reg->addr;
423 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
424 KVM_REG_SIZE_MASK |
425 KVM_REG_RISCV_CSR);
426 unsigned long reg_val, reg_subtype;
427
428 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
429 return -EINVAL;
430
431 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
432 return -EFAULT;
433
434 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
435 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
436 switch (reg_subtype) {
437 case KVM_REG_RISCV_CSR_GENERAL:
438 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
439 break;
440 case KVM_REG_RISCV_CSR_AIA:
441 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
442 break;
443 default:
444 rc = -ENOENT;
445 break;
446 }
447 if (rc)
448 return rc;
449
450 return 0;
451 }
452
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)453 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
454 unsigned long reg_num,
455 unsigned long *reg_val)
456 {
457 unsigned long host_isa_ext;
458
459 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
460 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
461 return -ENOENT;
462
463 host_isa_ext = kvm_isa_ext_arr[reg_num];
464 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
465 return -ENOENT;
466
467 *reg_val = 0;
468 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
469 *reg_val = 1; /* Mark the given extension as available */
470
471 return 0;
472 }
473
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)474 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
475 unsigned long reg_num,
476 unsigned long reg_val)
477 {
478 unsigned long host_isa_ext;
479
480 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
481 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
482 return -ENOENT;
483
484 host_isa_ext = kvm_isa_ext_arr[reg_num];
485 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
486 return -ENOENT;
487
488 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
489 return 0;
490
491 if (!vcpu->arch.ran_atleast_once) {
492 /*
493 * All multi-letter extension and a few single letter
494 * extension can be disabled
495 */
496 if (reg_val == 1 &&
497 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
498 set_bit(host_isa_ext, vcpu->arch.isa);
499 else if (!reg_val &&
500 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
501 clear_bit(host_isa_ext, vcpu->arch.isa);
502 else
503 return -EINVAL;
504 kvm_riscv_vcpu_fp_reset(vcpu);
505 } else {
506 return -EBUSY;
507 }
508
509 return 0;
510 }
511
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)512 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
513 unsigned long reg_num,
514 unsigned long *reg_val)
515 {
516 unsigned long i, ext_id, ext_val;
517
518 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
519 return -ENOENT;
520
521 for (i = 0; i < BITS_PER_LONG; i++) {
522 ext_id = i + reg_num * BITS_PER_LONG;
523 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
524 break;
525
526 ext_val = 0;
527 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
528 if (ext_val)
529 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
530 }
531
532 return 0;
533 }
534
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)535 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
536 unsigned long reg_num,
537 unsigned long reg_val, bool enable)
538 {
539 unsigned long i, ext_id;
540
541 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
542 return -ENOENT;
543
544 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
545 ext_id = i + reg_num * BITS_PER_LONG;
546 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
547 break;
548
549 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
550 }
551
552 return 0;
553 }
554
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)555 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
556 const struct kvm_one_reg *reg)
557 {
558 int rc;
559 unsigned long __user *uaddr =
560 (unsigned long __user *)(unsigned long)reg->addr;
561 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
562 KVM_REG_SIZE_MASK |
563 KVM_REG_RISCV_ISA_EXT);
564 unsigned long reg_val, reg_subtype;
565
566 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
567 return -EINVAL;
568
569 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
570 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
571
572 reg_val = 0;
573 switch (reg_subtype) {
574 case KVM_REG_RISCV_ISA_SINGLE:
575 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
576 break;
577 case KVM_REG_RISCV_ISA_MULTI_EN:
578 case KVM_REG_RISCV_ISA_MULTI_DIS:
579 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
580 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
581 reg_val = ~reg_val;
582 break;
583 default:
584 rc = -ENOENT;
585 }
586 if (rc)
587 return rc;
588
589 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
590 return -EFAULT;
591
592 return 0;
593 }
594
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)595 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
596 const struct kvm_one_reg *reg)
597 {
598 unsigned long __user *uaddr =
599 (unsigned long __user *)(unsigned long)reg->addr;
600 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
601 KVM_REG_SIZE_MASK |
602 KVM_REG_RISCV_ISA_EXT);
603 unsigned long reg_val, reg_subtype;
604
605 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
606 return -EINVAL;
607
608 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
609 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
610
611 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
612 return -EFAULT;
613
614 switch (reg_subtype) {
615 case KVM_REG_RISCV_ISA_SINGLE:
616 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
617 case KVM_REG_RISCV_ISA_MULTI_EN:
618 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
619 case KVM_REG_RISCV_ISA_MULTI_DIS:
620 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
621 default:
622 return -ENOENT;
623 }
624
625 return 0;
626 }
627
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)628 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
629 u64 __user *uindices)
630 {
631 int n = 0;
632
633 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
634 i++) {
635 u64 size;
636 u64 reg;
637
638 /*
639 * Avoid reporting config reg if the corresponding extension
640 * was not available.
641 */
642 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
643 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
644 continue;
645 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
646 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
647 continue;
648
649 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
650 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
651
652 if (uindices) {
653 if (put_user(reg, uindices))
654 return -EFAULT;
655 uindices++;
656 }
657
658 n++;
659 }
660
661 return n;
662 }
663
num_config_regs(const struct kvm_vcpu * vcpu)664 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
665 {
666 return copy_config_reg_indices(vcpu, NULL);
667 }
668
num_core_regs(void)669 static inline unsigned long num_core_regs(void)
670 {
671 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
672 }
673
copy_core_reg_indices(u64 __user * uindices)674 static int copy_core_reg_indices(u64 __user *uindices)
675 {
676 int n = num_core_regs();
677
678 for (int i = 0; i < n; i++) {
679 u64 size = IS_ENABLED(CONFIG_32BIT) ?
680 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
681 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
682
683 if (uindices) {
684 if (put_user(reg, uindices))
685 return -EFAULT;
686 uindices++;
687 }
688 }
689
690 return n;
691 }
692
num_csr_regs(const struct kvm_vcpu * vcpu)693 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
694 {
695 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
696
697 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
698 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
699
700 return n;
701 }
702
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)703 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
704 u64 __user *uindices)
705 {
706 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
707 int n2 = 0;
708
709 /* copy general csr regs */
710 for (int i = 0; i < n1; i++) {
711 u64 size = IS_ENABLED(CONFIG_32BIT) ?
712 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
713 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
714 KVM_REG_RISCV_CSR_GENERAL | i;
715
716 if (uindices) {
717 if (put_user(reg, uindices))
718 return -EFAULT;
719 uindices++;
720 }
721 }
722
723 /* copy AIA csr regs */
724 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
725 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
726
727 for (int i = 0; i < n2; i++) {
728 u64 size = IS_ENABLED(CONFIG_32BIT) ?
729 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
730 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
731 KVM_REG_RISCV_CSR_AIA | i;
732
733 if (uindices) {
734 if (put_user(reg, uindices))
735 return -EFAULT;
736 uindices++;
737 }
738 }
739 }
740
741 return n1 + n2;
742 }
743
num_timer_regs(void)744 static inline unsigned long num_timer_regs(void)
745 {
746 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
747 }
748
copy_timer_reg_indices(u64 __user * uindices)749 static int copy_timer_reg_indices(u64 __user *uindices)
750 {
751 int n = num_timer_regs();
752
753 for (int i = 0; i < n; i++) {
754 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
755 KVM_REG_RISCV_TIMER | i;
756
757 if (uindices) {
758 if (put_user(reg, uindices))
759 return -EFAULT;
760 uindices++;
761 }
762 }
763
764 return n;
765 }
766
num_fp_f_regs(const struct kvm_vcpu * vcpu)767 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
768 {
769 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
770
771 if (riscv_isa_extension_available(vcpu->arch.isa, f))
772 return sizeof(cntx->fp.f) / sizeof(u32);
773 else
774 return 0;
775 }
776
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)777 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
778 u64 __user *uindices)
779 {
780 int n = num_fp_f_regs(vcpu);
781
782 for (int i = 0; i < n; i++) {
783 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
784 KVM_REG_RISCV_FP_F | i;
785
786 if (uindices) {
787 if (put_user(reg, uindices))
788 return -EFAULT;
789 uindices++;
790 }
791 }
792
793 return n;
794 }
795
num_fp_d_regs(const struct kvm_vcpu * vcpu)796 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
797 {
798 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
799
800 if (riscv_isa_extension_available(vcpu->arch.isa, d))
801 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
802 else
803 return 0;
804 }
805
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)806 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
807 u64 __user *uindices)
808 {
809 int i;
810 int n = num_fp_d_regs(vcpu);
811 u64 reg;
812
813 /* copy fp.d.f indices */
814 for (i = 0; i < n-1; i++) {
815 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
816 KVM_REG_RISCV_FP_D | i;
817
818 if (uindices) {
819 if (put_user(reg, uindices))
820 return -EFAULT;
821 uindices++;
822 }
823 }
824
825 /* copy fp.d.fcsr indices */
826 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
827 if (uindices) {
828 if (put_user(reg, uindices))
829 return -EFAULT;
830 uindices++;
831 }
832
833 return n;
834 }
835
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)836 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
837 u64 __user *uindices)
838 {
839 unsigned int n = 0;
840 unsigned long isa_ext;
841
842 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
843 u64 size = IS_ENABLED(CONFIG_32BIT) ?
844 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
845 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
846
847 isa_ext = kvm_isa_ext_arr[i];
848 if (!__riscv_isa_extension_available(NULL, isa_ext))
849 continue;
850
851 if (uindices) {
852 if (put_user(reg, uindices))
853 return -EFAULT;
854 uindices++;
855 }
856
857 n++;
858 }
859
860 return n;
861 }
862
num_isa_ext_regs(const struct kvm_vcpu * vcpu)863 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
864 {
865 return copy_isa_ext_reg_indices(vcpu, NULL);;
866 }
867
num_sbi_ext_regs(void)868 static inline unsigned long num_sbi_ext_regs(void)
869 {
870 /*
871 * number of KVM_REG_RISCV_SBI_SINGLE +
872 * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
873 */
874 return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
875 }
876
copy_sbi_ext_reg_indices(u64 __user * uindices)877 static int copy_sbi_ext_reg_indices(u64 __user *uindices)
878 {
879 int n;
880
881 /* copy KVM_REG_RISCV_SBI_SINGLE */
882 n = KVM_RISCV_SBI_EXT_MAX;
883 for (int i = 0; i < n; i++) {
884 u64 size = IS_ENABLED(CONFIG_32BIT) ?
885 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
886 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
887 KVM_REG_RISCV_SBI_SINGLE | i;
888
889 if (uindices) {
890 if (put_user(reg, uindices))
891 return -EFAULT;
892 uindices++;
893 }
894 }
895
896 /* copy KVM_REG_RISCV_SBI_MULTI */
897 n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
898 for (int i = 0; i < n; i++) {
899 u64 size = IS_ENABLED(CONFIG_32BIT) ?
900 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
901 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
902 KVM_REG_RISCV_SBI_MULTI_EN | i;
903
904 if (uindices) {
905 if (put_user(reg, uindices))
906 return -EFAULT;
907 uindices++;
908 }
909
910 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
911 KVM_REG_RISCV_SBI_MULTI_DIS | i;
912
913 if (uindices) {
914 if (put_user(reg, uindices))
915 return -EFAULT;
916 uindices++;
917 }
918 }
919
920 return num_sbi_ext_regs();
921 }
922
923 /*
924 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
925 *
926 * This is for all registers.
927 */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)928 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
929 {
930 unsigned long res = 0;
931
932 res += num_config_regs(vcpu);
933 res += num_core_regs();
934 res += num_csr_regs(vcpu);
935 res += num_timer_regs();
936 res += num_fp_f_regs(vcpu);
937 res += num_fp_d_regs(vcpu);
938 res += num_isa_ext_regs(vcpu);
939 res += num_sbi_ext_regs();
940
941 return res;
942 }
943
944 /*
945 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
946 */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)947 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
948 u64 __user *uindices)
949 {
950 int ret;
951
952 ret = copy_config_reg_indices(vcpu, uindices);
953 if (ret < 0)
954 return ret;
955 uindices += ret;
956
957 ret = copy_core_reg_indices(uindices);
958 if (ret < 0)
959 return ret;
960 uindices += ret;
961
962 ret = copy_csr_reg_indices(vcpu, uindices);
963 if (ret < 0)
964 return ret;
965 uindices += ret;
966
967 ret = copy_timer_reg_indices(uindices);
968 if (ret < 0)
969 return ret;
970 uindices += ret;
971
972 ret = copy_fp_f_reg_indices(vcpu, uindices);
973 if (ret < 0)
974 return ret;
975 uindices += ret;
976
977 ret = copy_fp_d_reg_indices(vcpu, uindices);
978 if (ret < 0)
979 return ret;
980 uindices += ret;
981
982 ret = copy_isa_ext_reg_indices(vcpu, uindices);
983 if (ret < 0)
984 return ret;
985 uindices += ret;
986
987 ret = copy_sbi_ext_reg_indices(uindices);
988 if (ret < 0)
989 return ret;
990
991 return 0;
992 }
993
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)994 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
995 const struct kvm_one_reg *reg)
996 {
997 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
998 case KVM_REG_RISCV_CONFIG:
999 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1000 case KVM_REG_RISCV_CORE:
1001 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1002 case KVM_REG_RISCV_CSR:
1003 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1004 case KVM_REG_RISCV_TIMER:
1005 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1006 case KVM_REG_RISCV_FP_F:
1007 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1008 KVM_REG_RISCV_FP_F);
1009 case KVM_REG_RISCV_FP_D:
1010 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1011 KVM_REG_RISCV_FP_D);
1012 case KVM_REG_RISCV_ISA_EXT:
1013 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1014 case KVM_REG_RISCV_SBI_EXT:
1015 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1016 case KVM_REG_RISCV_VECTOR:
1017 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1018 default:
1019 break;
1020 }
1021
1022 return -ENOENT;
1023 }
1024
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1025 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1026 const struct kvm_one_reg *reg)
1027 {
1028 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1029 case KVM_REG_RISCV_CONFIG:
1030 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1031 case KVM_REG_RISCV_CORE:
1032 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1033 case KVM_REG_RISCV_CSR:
1034 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1035 case KVM_REG_RISCV_TIMER:
1036 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1037 case KVM_REG_RISCV_FP_F:
1038 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1039 KVM_REG_RISCV_FP_F);
1040 case KVM_REG_RISCV_FP_D:
1041 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1042 KVM_REG_RISCV_FP_D);
1043 case KVM_REG_RISCV_ISA_EXT:
1044 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1045 case KVM_REG_RISCV_SBI_EXT:
1046 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1047 case KVM_REG_RISCV_VECTOR:
1048 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1049 default:
1050 break;
1051 }
1052
1053 return -ENOENT;
1054 }
1055