xref: /openbmc/linux/arch/arm64/kvm/guest.c (revision ba2929159000dc7015cc01cdf7bb72542e19952a)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
22f4a07c5SMarc Zyngier /*
32f4a07c5SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
42f4a07c5SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
52f4a07c5SMarc Zyngier  *
62f4a07c5SMarc Zyngier  * Derived from arch/arm/kvm/guest.c:
72f4a07c5SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
82f4a07c5SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
92f4a07c5SMarc Zyngier  */
102f4a07c5SMarc Zyngier 
11e1c9c983SDave Martin #include <linux/bits.h>
122f4a07c5SMarc Zyngier #include <linux/errno.h>
132f4a07c5SMarc Zyngier #include <linux/err.h>
14e1c9c983SDave Martin #include <linux/nospec.h>
152f4a07c5SMarc Zyngier #include <linux/kvm_host.h>
162f4a07c5SMarc Zyngier #include <linux/module.h>
17be25bbb3SDave Martin #include <linux/stddef.h>
18dc52f31aSDave Martin #include <linux/string.h>
192f4a07c5SMarc Zyngier #include <linux/vmalloc.h>
202f4a07c5SMarc Zyngier #include <linux/fs.h>
2185fbe08eSRaghavendra Rao Ananta #include <kvm/arm_hypercalls.h>
222f4a07c5SMarc Zyngier #include <asm/cputype.h>
237c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
24e1c9c983SDave Martin #include <asm/fpsimd.h>
252f4a07c5SMarc Zyngier #include <asm/kvm.h>
262f4a07c5SMarc Zyngier #include <asm/kvm_emulate.h>
271d05d51bSChristoffer Dall #include <asm/kvm_nested.h>
28e1c9c983SDave Martin #include <asm/sigcontext.h>
292f4a07c5SMarc Zyngier 
30eef8c85aSAlex Bennée #include "trace.h"
31eef8c85aSAlex Bennée 
32fcfe1baeSJing Zhang const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
33fcfe1baeSJing Zhang 	KVM_GENERIC_VM_STATS()
34fcfe1baeSJing Zhang };
35fcfe1baeSJing Zhang 
36fcfe1baeSJing Zhang const struct kvm_stats_header kvm_vm_stats_header = {
37fcfe1baeSJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
38fcfe1baeSJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
39fcfe1baeSJing Zhang 	.id_offset =  sizeof(struct kvm_stats_header),
40fcfe1baeSJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
41fcfe1baeSJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
42fcfe1baeSJing Zhang 		       sizeof(kvm_vm_stats_desc),
43fcfe1baeSJing Zhang };
44fcfe1baeSJing Zhang 
45ce55c049SJing Zhang const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
46ce55c049SJing Zhang 	KVM_GENERIC_VCPU_STATS(),
47ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
48ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
49ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
50ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, mmio_exit_user),
51ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
52fe5161d2SOliver Upton 	STATS_DESC_COUNTER(VCPU, signal_exits),
53ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exits)
54ce55c049SJing Zhang };
55ce55c049SJing Zhang 
56ce55c049SJing Zhang const struct kvm_stats_header kvm_vcpu_stats_header = {
57ce55c049SJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
58ce55c049SJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
59ce55c049SJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
60ce55c049SJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
61ce55c049SJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
62ce55c049SJing Zhang 		       sizeof(kvm_vcpu_stats_desc),
63ce55c049SJing Zhang };
64ce55c049SJing Zhang 
core_reg_offset_is_vreg(u64 off)658c86dfe3SDave Martin static bool core_reg_offset_is_vreg(u64 off)
668c86dfe3SDave Martin {
678c86dfe3SDave Martin 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
688c86dfe3SDave Martin 		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
698c86dfe3SDave Martin }
708c86dfe3SDave Martin 
core_reg_offset_from_id(u64 id)712f4a07c5SMarc Zyngier static u64 core_reg_offset_from_id(u64 id)
722f4a07c5SMarc Zyngier {
732f4a07c5SMarc Zyngier 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
742f4a07c5SMarc Zyngier }
752f4a07c5SMarc Zyngier 
core_reg_size_from_offset(const struct kvm_vcpu * vcpu,u64 off)76df205b5cSDave Martin static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
77d26c25a9SDave Martin {
78d26c25a9SDave Martin 	int size;
79d26c25a9SDave Martin 
80d26c25a9SDave Martin 	switch (off) {
81d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
82d26c25a9SDave Martin 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
83d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(regs.sp):
84d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(regs.pc):
85d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(regs.pstate):
86d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(sp_el1):
87d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(elr_el1):
88d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
89d26c25a9SDave Martin 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
90d26c25a9SDave Martin 		size = sizeof(__u64);
91d26c25a9SDave Martin 		break;
92d26c25a9SDave Martin 
93d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
94d26c25a9SDave Martin 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
95d26c25a9SDave Martin 		size = sizeof(__uint128_t);
96d26c25a9SDave Martin 		break;
97d26c25a9SDave Martin 
98d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
99d26c25a9SDave Martin 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
100d26c25a9SDave Martin 		size = sizeof(__u32);
101d26c25a9SDave Martin 		break;
102d26c25a9SDave Martin 
103d26c25a9SDave Martin 	default:
104d26c25a9SDave Martin 		return -EINVAL;
105d26c25a9SDave Martin 	}
106d26c25a9SDave Martin 
107df205b5cSDave Martin 	if (!IS_ALIGNED(off, size / sizeof(__u32)))
108d26c25a9SDave Martin 		return -EINVAL;
1098c86dfe3SDave Martin 
1108c86dfe3SDave Martin 	/*
1118c86dfe3SDave Martin 	 * The KVM_REG_ARM64_SVE regs must be used instead of
1128c86dfe3SDave Martin 	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
1138c86dfe3SDave Martin 	 * SVE-enabled vcpus:
1148c86dfe3SDave Martin 	 */
1158c86dfe3SDave Martin 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
1168c86dfe3SDave Martin 		return -EINVAL;
1178c86dfe3SDave Martin 
118df205b5cSDave Martin 	return size;
119df205b5cSDave Martin }
120df205b5cSDave Martin 
core_reg_addr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)121e47c2055SMarc Zyngier static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
122df205b5cSDave Martin {
123df205b5cSDave Martin 	u64 off = core_reg_offset_from_id(reg->id);
124df205b5cSDave Martin 	int size = core_reg_size_from_offset(vcpu, off);
125df205b5cSDave Martin 
126df205b5cSDave Martin 	if (size < 0)
127e47c2055SMarc Zyngier 		return NULL;
128df205b5cSDave Martin 
129df205b5cSDave Martin 	if (KVM_REG_SIZE(reg->id) != size)
130e47c2055SMarc Zyngier 		return NULL;
131df205b5cSDave Martin 
132e47c2055SMarc Zyngier 	switch (off) {
133e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
134e47c2055SMarc Zyngier 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
135e47c2055SMarc Zyngier 		off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
136e47c2055SMarc Zyngier 		off /= 2;
137e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.regs.regs[off];
138e47c2055SMarc Zyngier 
139e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(regs.sp):
140e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.regs.sp;
141e47c2055SMarc Zyngier 
142e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(regs.pc):
143e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.regs.pc;
144e47c2055SMarc Zyngier 
145e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(regs.pstate):
146e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.regs.pstate;
147e47c2055SMarc Zyngier 
148e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(sp_el1):
1491bded23eSMarc Zyngier 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
150e47c2055SMarc Zyngier 
151e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(elr_el1):
15298909e6dSMarc Zyngier 		return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
153e47c2055SMarc Zyngier 
154fd85b667SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
155710f1982SMarc Zyngier 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
156fd85b667SMarc Zyngier 
157fd85b667SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
158fd85b667SMarc Zyngier 		return &vcpu->arch.ctxt.spsr_abt;
159fd85b667SMarc Zyngier 
160fd85b667SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
161fd85b667SMarc Zyngier 		return &vcpu->arch.ctxt.spsr_und;
162fd85b667SMarc Zyngier 
163fd85b667SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
164fd85b667SMarc Zyngier 		return &vcpu->arch.ctxt.spsr_irq;
165fd85b667SMarc Zyngier 
166fd85b667SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
167fd85b667SMarc Zyngier 		return &vcpu->arch.ctxt.spsr_fiq;
168e47c2055SMarc Zyngier 
169e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
170e47c2055SMarc Zyngier 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
171e47c2055SMarc Zyngier 		off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
172e47c2055SMarc Zyngier 		off /= 4;
173e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.fp_regs.vregs[off];
174e47c2055SMarc Zyngier 
175e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
176e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.fp_regs.fpsr;
177e47c2055SMarc Zyngier 
178e47c2055SMarc Zyngier 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
179e47c2055SMarc Zyngier 		return &vcpu->arch.ctxt.fp_regs.fpcr;
180e47c2055SMarc Zyngier 
181e47c2055SMarc Zyngier 	default:
182e47c2055SMarc Zyngier 		return NULL;
183e47c2055SMarc Zyngier 	}
184d26c25a9SDave Martin }
185d26c25a9SDave Martin 
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1862f4a07c5SMarc Zyngier static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1872f4a07c5SMarc Zyngier {
1882f4a07c5SMarc Zyngier 	/*
1892f4a07c5SMarc Zyngier 	 * Because the kvm_regs structure is a mix of 32, 64 and
1902f4a07c5SMarc Zyngier 	 * 128bit fields, we index it as if it was a 32bit
1912f4a07c5SMarc Zyngier 	 * array. Hence below, nr_regs is the number of entries, and
1922f4a07c5SMarc Zyngier 	 * off the index in the "array".
1932f4a07c5SMarc Zyngier 	 */
1942f4a07c5SMarc Zyngier 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
195e47c2055SMarc Zyngier 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
196e47c2055SMarc Zyngier 	void *addr;
1972f4a07c5SMarc Zyngier 	u32 off;
1982f4a07c5SMarc Zyngier 
1992f4a07c5SMarc Zyngier 	/* Our ID is an index into the kvm_regs struct. */
2002f4a07c5SMarc Zyngier 	off = core_reg_offset_from_id(reg->id);
2012f4a07c5SMarc Zyngier 	if (off >= nr_regs ||
2022f4a07c5SMarc Zyngier 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
2032f4a07c5SMarc Zyngier 		return -ENOENT;
2042f4a07c5SMarc Zyngier 
205e47c2055SMarc Zyngier 	addr = core_reg_addr(vcpu, reg);
206e47c2055SMarc Zyngier 	if (!addr)
207d26c25a9SDave Martin 		return -EINVAL;
208d26c25a9SDave Martin 
209e47c2055SMarc Zyngier 	if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
2102f4a07c5SMarc Zyngier 		return -EFAULT;
2112f4a07c5SMarc Zyngier 
2122f4a07c5SMarc Zyngier 	return 0;
2132f4a07c5SMarc Zyngier }
2142f4a07c5SMarc Zyngier 
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2152f4a07c5SMarc Zyngier static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2162f4a07c5SMarc Zyngier {
2172f4a07c5SMarc Zyngier 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
218e47c2055SMarc Zyngier 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
2192f4a07c5SMarc Zyngier 	__uint128_t tmp;
220e47c2055SMarc Zyngier 	void *valp = &tmp, *addr;
2212f4a07c5SMarc Zyngier 	u64 off;
2222f4a07c5SMarc Zyngier 	int err = 0;
2232f4a07c5SMarc Zyngier 
2242f4a07c5SMarc Zyngier 	/* Our ID is an index into the kvm_regs struct. */
2252f4a07c5SMarc Zyngier 	off = core_reg_offset_from_id(reg->id);
2262f4a07c5SMarc Zyngier 	if (off >= nr_regs ||
2272f4a07c5SMarc Zyngier 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
2282f4a07c5SMarc Zyngier 		return -ENOENT;
2292f4a07c5SMarc Zyngier 
230e47c2055SMarc Zyngier 	addr = core_reg_addr(vcpu, reg);
231e47c2055SMarc Zyngier 	if (!addr)
232d26c25a9SDave Martin 		return -EINVAL;
233d26c25a9SDave Martin 
2342f4a07c5SMarc Zyngier 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
2352f4a07c5SMarc Zyngier 		return -EINVAL;
2362f4a07c5SMarc Zyngier 
2372f4a07c5SMarc Zyngier 	if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
2382f4a07c5SMarc Zyngier 		err = -EFAULT;
2392f4a07c5SMarc Zyngier 		goto out;
2402f4a07c5SMarc Zyngier 	}
2412f4a07c5SMarc Zyngier 
2422f4a07c5SMarc Zyngier 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
2432a3f9345SMarc Zyngier 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
2442f4a07c5SMarc Zyngier 		switch (mode) {
245256c0960SMark Rutland 		case PSR_AA32_MODE_USR:
246b10d86fbSOliver Upton 			if (!kvm_supports_32bit_el0())
2472a3f9345SMarc Zyngier 				return -EINVAL;
2482a3f9345SMarc Zyngier 			break;
249256c0960SMark Rutland 		case PSR_AA32_MODE_FIQ:
250256c0960SMark Rutland 		case PSR_AA32_MODE_IRQ:
251256c0960SMark Rutland 		case PSR_AA32_MODE_SVC:
252256c0960SMark Rutland 		case PSR_AA32_MODE_ABT:
253256c0960SMark Rutland 		case PSR_AA32_MODE_UND:
254*5bff951fSMarc Zyngier 		case PSR_AA32_MODE_SYS:
2552a3f9345SMarc Zyngier 			if (!vcpu_el1_is_32bit(vcpu))
2562a3f9345SMarc Zyngier 				return -EINVAL;
2572a3f9345SMarc Zyngier 			break;
2581d05d51bSChristoffer Dall 		case PSR_MODE_EL2h:
2591d05d51bSChristoffer Dall 		case PSR_MODE_EL2t:
2601d05d51bSChristoffer Dall 			if (!vcpu_has_nv(vcpu))
2611d05d51bSChristoffer Dall 				return -EINVAL;
2621d05d51bSChristoffer Dall 			fallthrough;
2632f4a07c5SMarc Zyngier 		case PSR_MODE_EL0t:
2642f4a07c5SMarc Zyngier 		case PSR_MODE_EL1t:
2652f4a07c5SMarc Zyngier 		case PSR_MODE_EL1h:
2662a3f9345SMarc Zyngier 			if (vcpu_el1_is_32bit(vcpu))
2672a3f9345SMarc Zyngier 				return -EINVAL;
2682f4a07c5SMarc Zyngier 			break;
2692f4a07c5SMarc Zyngier 		default:
2702f4a07c5SMarc Zyngier 			err = -EINVAL;
2712f4a07c5SMarc Zyngier 			goto out;
2722f4a07c5SMarc Zyngier 		}
2732f4a07c5SMarc Zyngier 	}
2742f4a07c5SMarc Zyngier 
275e47c2055SMarc Zyngier 	memcpy(addr, valp, KVM_REG_SIZE(reg->id));
2760225fd5eSMarc Zyngier 
2770225fd5eSMarc Zyngier 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
27890c1f934SMarc Zyngier 		int i, nr_reg;
2790225fd5eSMarc Zyngier 
2806660e152SMarc Zyngier 		switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
28190c1f934SMarc Zyngier 		/*
28290c1f934SMarc Zyngier 		 * Either we are dealing with user mode, and only the
28390c1f934SMarc Zyngier 		 * first 15 registers (+ PC) must be narrowed to 32bit.
28490c1f934SMarc Zyngier 		 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
28590c1f934SMarc Zyngier 		 */
28690c1f934SMarc Zyngier 		case PSR_AA32_MODE_USR:
28790c1f934SMarc Zyngier 		case PSR_AA32_MODE_SYS:
28890c1f934SMarc Zyngier 			nr_reg = 15;
28990c1f934SMarc Zyngier 			break;
29090c1f934SMarc Zyngier 
29190c1f934SMarc Zyngier 		/*
29221ea4578SJulia Lawall 		 * Otherwise, this is a privileged mode, and *all* the
29390c1f934SMarc Zyngier 		 * registers must be narrowed to 32bit.
29490c1f934SMarc Zyngier 		 */
29590c1f934SMarc Zyngier 		default:
29690c1f934SMarc Zyngier 			nr_reg = 31;
29790c1f934SMarc Zyngier 			break;
29890c1f934SMarc Zyngier 		}
29990c1f934SMarc Zyngier 
30090c1f934SMarc Zyngier 		for (i = 0; i < nr_reg; i++)
30190c1f934SMarc Zyngier 			vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
30290c1f934SMarc Zyngier 
30390c1f934SMarc Zyngier 		*vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
3040225fd5eSMarc Zyngier 	}
3052f4a07c5SMarc Zyngier out:
3062f4a07c5SMarc Zyngier 	return err;
3072f4a07c5SMarc Zyngier }
3082f4a07c5SMarc Zyngier 
3099033bba4SDave Martin #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
3109033bba4SDave Martin #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
311e644fa18SZhang Lei #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
3129033bba4SDave Martin 
get_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)3139033bba4SDave Martin static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3149033bba4SDave Martin {
3159033bba4SDave Martin 	unsigned int max_vq, vq;
3164bd774e5SDave Martin 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
3179033bba4SDave Martin 
31852110aa9SDave Martin 	if (!vcpu_has_sve(vcpu))
31952110aa9SDave Martin 		return -ENOENT;
32052110aa9SDave Martin 
3219033bba4SDave Martin 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
3229033bba4SDave Martin 		return -EINVAL;
3239033bba4SDave Martin 
3249033bba4SDave Martin 	memset(vqs, 0, sizeof(vqs));
3259033bba4SDave Martin 
326468f3477SMarc Zyngier 	max_vq = vcpu_sve_max_vq(vcpu);
3279033bba4SDave Martin 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
3289033bba4SDave Martin 		if (sve_vq_available(vq))
3299033bba4SDave Martin 			vqs[vq_word(vq)] |= vq_mask(vq);
3309033bba4SDave Martin 
3319033bba4SDave Martin 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
3329033bba4SDave Martin 		return -EFAULT;
3339033bba4SDave Martin 
3349033bba4SDave Martin 	return 0;
3359033bba4SDave Martin }
3369033bba4SDave Martin 
set_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)3379033bba4SDave Martin static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3389033bba4SDave Martin {
3399033bba4SDave Martin 	unsigned int max_vq, vq;
3404bd774e5SDave Martin 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
3419033bba4SDave Martin 
34252110aa9SDave Martin 	if (!vcpu_has_sve(vcpu))
34352110aa9SDave Martin 		return -ENOENT;
34452110aa9SDave Martin 
3459033bba4SDave Martin 	if (kvm_arm_vcpu_sve_finalized(vcpu))
3469033bba4SDave Martin 		return -EPERM; /* too late! */
3479033bba4SDave Martin 
3489033bba4SDave Martin 	if (WARN_ON(vcpu->arch.sve_state))
3499033bba4SDave Martin 		return -EINVAL;
3509033bba4SDave Martin 
3519033bba4SDave Martin 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
3529033bba4SDave Martin 		return -EFAULT;
3539033bba4SDave Martin 
3549033bba4SDave Martin 	max_vq = 0;
3559033bba4SDave Martin 	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
3560c529ff7SViresh Kumar 		if (vq_present(vqs, vq))
3579033bba4SDave Martin 			max_vq = vq;
3589033bba4SDave Martin 
3599033bba4SDave Martin 	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
3609033bba4SDave Martin 		return -EINVAL;
3619033bba4SDave Martin 
362ecfb6ed4SDave Martin 	/*
363ecfb6ed4SDave Martin 	 * Vector lengths supported by the host can't currently be
364ecfb6ed4SDave Martin 	 * hidden from the guest individually: instead we can only set a
365656012c7SFuad Tabba 	 * maximum via ZCR_EL2.LEN.  So, make sure the available vector
366ecfb6ed4SDave Martin 	 * lengths match the set requested exactly up to the requested
367ecfb6ed4SDave Martin 	 * maximum:
368ecfb6ed4SDave Martin 	 */
3699033bba4SDave Martin 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
3700c529ff7SViresh Kumar 		if (vq_present(vqs, vq) != sve_vq_available(vq))
3719033bba4SDave Martin 			return -EINVAL;
3729033bba4SDave Martin 
3739033bba4SDave Martin 	/* Can't run with no vector lengths at all: */
3749033bba4SDave Martin 	if (max_vq < SVE_VQ_MIN)
3759033bba4SDave Martin 		return -EINVAL;
3769033bba4SDave Martin 
3779033bba4SDave Martin 	/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
3789033bba4SDave Martin 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
3799033bba4SDave Martin 
3809033bba4SDave Martin 	return 0;
3819033bba4SDave Martin }
3829033bba4SDave Martin 
383e1c9c983SDave Martin #define SVE_REG_SLICE_SHIFT	0
384e1c9c983SDave Martin #define SVE_REG_SLICE_BITS	5
385e1c9c983SDave Martin #define SVE_REG_ID_SHIFT	(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
386e1c9c983SDave Martin #define SVE_REG_ID_BITS		5
387e1c9c983SDave Martin 
388e1c9c983SDave Martin #define SVE_REG_SLICE_MASK					\
389e1c9c983SDave Martin 	GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,	\
390e1c9c983SDave Martin 		SVE_REG_SLICE_SHIFT)
391e1c9c983SDave Martin #define SVE_REG_ID_MASK							\
392e1c9c983SDave Martin 	GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
393e1c9c983SDave Martin 
394e1c9c983SDave Martin #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
395e1c9c983SDave Martin 
396e1c9c983SDave Martin #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
397e1c9c983SDave Martin #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
398e1c9c983SDave Martin 
3998e3c54c8SDave Martin /*
400f8d4635aSDave Martin  * Number of register slices required to cover each whole SVE register.
401f8d4635aSDave Martin  * NOTE: Only the first slice every exists, for now.
402f8d4635aSDave Martin  * If you are tempted to modify this, you must also rework sve_reg_to_region()
403f8d4635aSDave Martin  * to match:
4048e3c54c8SDave Martin  */
4058e3c54c8SDave Martin #define vcpu_sve_slices(vcpu) 1
4068e3c54c8SDave Martin 
407e1c9c983SDave Martin /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
408e1c9c983SDave Martin struct sve_state_reg_region {
409e1c9c983SDave Martin 	unsigned int koffset;	/* offset into sve_state in kernel memory */
410e1c9c983SDave Martin 	unsigned int klen;	/* length in kernel memory */
411e1c9c983SDave Martin 	unsigned int upad;	/* extra trailing padding in user memory */
412e1c9c983SDave Martin };
413e1c9c983SDave Martin 
41452110aa9SDave Martin /*
41552110aa9SDave Martin  * Validate SVE register ID and get sanitised bounds for user/kernel SVE
41652110aa9SDave Martin  * register copy
41752110aa9SDave Martin  */
sve_reg_to_region(struct sve_state_reg_region * region,struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)418e1c9c983SDave Martin static int sve_reg_to_region(struct sve_state_reg_region *region,
419e1c9c983SDave Martin 			     struct kvm_vcpu *vcpu,
420e1c9c983SDave Martin 			     const struct kvm_one_reg *reg)
421e1c9c983SDave Martin {
422e1c9c983SDave Martin 	/* reg ID ranges for Z- registers */
423e1c9c983SDave Martin 	const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
424e1c9c983SDave Martin 	const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
425e1c9c983SDave Martin 						       SVE_NUM_SLICES - 1);
426e1c9c983SDave Martin 
427e1c9c983SDave Martin 	/* reg ID ranges for P- registers and FFR (which are contiguous) */
428e1c9c983SDave Martin 	const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
429e1c9c983SDave Martin 	const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
430e1c9c983SDave Martin 
431e1c9c983SDave Martin 	unsigned int vq;
432e1c9c983SDave Martin 	unsigned int reg_num;
433e1c9c983SDave Martin 
434e1c9c983SDave Martin 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
435656012c7SFuad Tabba 	unsigned int maxlen; /* Maximum permitted length */
436e1c9c983SDave Martin 
437e1c9c983SDave Martin 	size_t sve_state_size;
438e1c9c983SDave Martin 
4398ae6efddSDave Martin 	const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
4408ae6efddSDave Martin 							SVE_NUM_SLICES - 1);
4418ae6efddSDave Martin 
4428ae6efddSDave Martin 	/* Verify that the P-regs and FFR really do have contiguous IDs: */
4438ae6efddSDave Martin 	BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
4448ae6efddSDave Martin 
4458ae6efddSDave Martin 	/* Verify that we match the UAPI header: */
4468ae6efddSDave Martin 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
4478ae6efddSDave Martin 
44852110aa9SDave Martin 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
44952110aa9SDave Martin 
45052110aa9SDave Martin 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
45152110aa9SDave Martin 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
452e1c9c983SDave Martin 			return -ENOENT;
453e1c9c983SDave Martin 
454468f3477SMarc Zyngier 		vq = vcpu_sve_max_vq(vcpu);
455e1c9c983SDave Martin 
456e1c9c983SDave Martin 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
457e1c9c983SDave Martin 				SVE_SIG_REGS_OFFSET;
458e1c9c983SDave Martin 		reqlen = KVM_SVE_ZREG_SIZE;
459e1c9c983SDave Martin 		maxlen = SVE_SIG_ZREG_SIZE(vq);
460e1c9c983SDave Martin 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
46152110aa9SDave Martin 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
46252110aa9SDave Martin 			return -ENOENT;
46352110aa9SDave Martin 
464468f3477SMarc Zyngier 		vq = vcpu_sve_max_vq(vcpu);
46552110aa9SDave Martin 
466e1c9c983SDave Martin 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
467e1c9c983SDave Martin 				SVE_SIG_REGS_OFFSET;
468e1c9c983SDave Martin 		reqlen = KVM_SVE_PREG_SIZE;
469e1c9c983SDave Martin 		maxlen = SVE_SIG_PREG_SIZE(vq);
470e1c9c983SDave Martin 	} else {
47152110aa9SDave Martin 		return -EINVAL;
472e1c9c983SDave Martin 	}
473e1c9c983SDave Martin 
474e1c9c983SDave Martin 	sve_state_size = vcpu_sve_state_size(vcpu);
47555ffad3bSDave Martin 	if (WARN_ON(!sve_state_size))
476e1c9c983SDave Martin 		return -EINVAL;
477e1c9c983SDave Martin 
478e1c9c983SDave Martin 	region->koffset = array_index_nospec(reqoffset, sve_state_size);
479e1c9c983SDave Martin 	region->klen = min(maxlen, reqlen);
480e1c9c983SDave Martin 	region->upad = reqlen - region->klen;
481e1c9c983SDave Martin 
482e1c9c983SDave Martin 	return 0;
483e1c9c983SDave Martin }
484e1c9c983SDave Martin 
get_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)485e1c9c983SDave Martin static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
486e1c9c983SDave Martin {
48752110aa9SDave Martin 	int ret;
488e1c9c983SDave Martin 	struct sve_state_reg_region region;
489e1c9c983SDave Martin 	char __user *uptr = (char __user *)reg->addr;
490e1c9c983SDave Martin 
4919033bba4SDave Martin 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
4929033bba4SDave Martin 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
4939033bba4SDave Martin 		return get_sve_vls(vcpu, reg);
4949033bba4SDave Martin 
49552110aa9SDave Martin 	/* Try to interpret reg ID as an architectural SVE register... */
49652110aa9SDave Martin 	ret = sve_reg_to_region(&region, vcpu, reg);
49752110aa9SDave Martin 	if (ret)
49852110aa9SDave Martin 		return ret;
4999033bba4SDave Martin 
5009033bba4SDave Martin 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
5019033bba4SDave Martin 		return -EPERM;
5029033bba4SDave Martin 
503e1c9c983SDave Martin 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
504e1c9c983SDave Martin 			 region.klen) ||
505e1c9c983SDave Martin 	    clear_user(uptr + region.klen, region.upad))
506e1c9c983SDave Martin 		return -EFAULT;
507e1c9c983SDave Martin 
508e1c9c983SDave Martin 	return 0;
509e1c9c983SDave Martin }
510e1c9c983SDave Martin 
set_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)511e1c9c983SDave Martin static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
512e1c9c983SDave Martin {
51352110aa9SDave Martin 	int ret;
514e1c9c983SDave Martin 	struct sve_state_reg_region region;
515e1c9c983SDave Martin 	const char __user *uptr = (const char __user *)reg->addr;
516e1c9c983SDave Martin 
5179033bba4SDave Martin 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
5189033bba4SDave Martin 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
5199033bba4SDave Martin 		return set_sve_vls(vcpu, reg);
5209033bba4SDave Martin 
52152110aa9SDave Martin 	/* Try to interpret reg ID as an architectural SVE register... */
52252110aa9SDave Martin 	ret = sve_reg_to_region(&region, vcpu, reg);
52352110aa9SDave Martin 	if (ret)
52452110aa9SDave Martin 		return ret;
5259033bba4SDave Martin 
5269033bba4SDave Martin 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
5279033bba4SDave Martin 		return -EPERM;
5289033bba4SDave Martin 
529e1c9c983SDave Martin 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
530e1c9c983SDave Martin 			   region.klen))
531e1c9c983SDave Martin 		return -EFAULT;
532e1c9c983SDave Martin 
533e1c9c983SDave Martin 	return 0;
534e1c9c983SDave Martin }
535e1c9c983SDave Martin 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)5362f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5372f4a07c5SMarc Zyngier {
5382f4a07c5SMarc Zyngier 	return -EINVAL;
5392f4a07c5SMarc Zyngier }
5402f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)5412f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5422f4a07c5SMarc Zyngier {
5432f4a07c5SMarc Zyngier 	return -EINVAL;
5442f4a07c5SMarc Zyngier }
5452f4a07c5SMarc Zyngier 
copy_core_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)5468c86dfe3SDave Martin static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
5478c86dfe3SDave Martin 				 u64 __user *uindices)
548be25bbb3SDave Martin {
549be25bbb3SDave Martin 	unsigned int i;
550be25bbb3SDave Martin 	int n = 0;
551be25bbb3SDave Martin 
552be25bbb3SDave Martin 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
553df205b5cSDave Martin 		u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
554df205b5cSDave Martin 		int size = core_reg_size_from_offset(vcpu, i);
555df205b5cSDave Martin 
556df205b5cSDave Martin 		if (size < 0)
5578c86dfe3SDave Martin 			continue;
5588c86dfe3SDave Martin 
559df205b5cSDave Martin 		switch (size) {
560df205b5cSDave Martin 		case sizeof(__u32):
561df205b5cSDave Martin 			reg |= KVM_REG_SIZE_U32;
562df205b5cSDave Martin 			break;
563df205b5cSDave Martin 
564df205b5cSDave Martin 		case sizeof(__u64):
565df205b5cSDave Martin 			reg |= KVM_REG_SIZE_U64;
566df205b5cSDave Martin 			break;
567df205b5cSDave Martin 
568df205b5cSDave Martin 		case sizeof(__uint128_t):
569df205b5cSDave Martin 			reg |= KVM_REG_SIZE_U128;
570df205b5cSDave Martin 			break;
571df205b5cSDave Martin 
572df205b5cSDave Martin 		default:
573df205b5cSDave Martin 			WARN_ON(1);
574df205b5cSDave Martin 			continue;
575df205b5cSDave Martin 		}
576df205b5cSDave Martin 
577be25bbb3SDave Martin 		if (uindices) {
578df205b5cSDave Martin 			if (put_user(reg, uindices))
579be25bbb3SDave Martin 				return -EFAULT;
580be25bbb3SDave Martin 			uindices++;
581be25bbb3SDave Martin 		}
582be25bbb3SDave Martin 
583be25bbb3SDave Martin 		n++;
584be25bbb3SDave Martin 	}
585be25bbb3SDave Martin 
586be25bbb3SDave Martin 	return n;
587be25bbb3SDave Martin }
588be25bbb3SDave Martin 
num_core_regs(const struct kvm_vcpu * vcpu)5898c86dfe3SDave Martin static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
5902f4a07c5SMarc Zyngier {
5918c86dfe3SDave Martin 	return copy_core_reg_indices(vcpu, NULL);
5922f4a07c5SMarc Zyngier }
5932f4a07c5SMarc Zyngier 
594680232a9SMarc Zyngier static const u64 timer_reg_list[] = {
595680232a9SMarc Zyngier 	KVM_REG_ARM_TIMER_CTL,
596680232a9SMarc Zyngier 	KVM_REG_ARM_TIMER_CNT,
597680232a9SMarc Zyngier 	KVM_REG_ARM_TIMER_CVAL,
598680232a9SMarc Zyngier 	KVM_REG_ARM_PTIMER_CTL,
599680232a9SMarc Zyngier 	KVM_REG_ARM_PTIMER_CNT,
600680232a9SMarc Zyngier 	KVM_REG_ARM_PTIMER_CVAL,
601680232a9SMarc Zyngier };
6021df08ba0SAlex Bennée 
603680232a9SMarc Zyngier #define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
6041df08ba0SAlex Bennée 
is_timer_reg(u64 index)6051df08ba0SAlex Bennée static bool is_timer_reg(u64 index)
6061df08ba0SAlex Bennée {
6071df08ba0SAlex Bennée 	switch (index) {
6081df08ba0SAlex Bennée 	case KVM_REG_ARM_TIMER_CTL:
6091df08ba0SAlex Bennée 	case KVM_REG_ARM_TIMER_CNT:
6101df08ba0SAlex Bennée 	case KVM_REG_ARM_TIMER_CVAL:
611680232a9SMarc Zyngier 	case KVM_REG_ARM_PTIMER_CTL:
612680232a9SMarc Zyngier 	case KVM_REG_ARM_PTIMER_CNT:
613680232a9SMarc Zyngier 	case KVM_REG_ARM_PTIMER_CVAL:
6141df08ba0SAlex Bennée 		return true;
6151df08ba0SAlex Bennée 	}
6161df08ba0SAlex Bennée 	return false;
6171df08ba0SAlex Bennée }
6181df08ba0SAlex Bennée 
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)6191df08ba0SAlex Bennée static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
6201df08ba0SAlex Bennée {
621680232a9SMarc Zyngier 	for (int i = 0; i < NUM_TIMER_REGS; i++) {
622680232a9SMarc Zyngier 		if (put_user(timer_reg_list[i], uindices))
6231df08ba0SAlex Bennée 			return -EFAULT;
6241df08ba0SAlex Bennée 		uindices++;
625680232a9SMarc Zyngier 	}
6261df08ba0SAlex Bennée 
6271df08ba0SAlex Bennée 	return 0;
6281df08ba0SAlex Bennée }
6291df08ba0SAlex Bennée 
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)6301df08ba0SAlex Bennée static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
6311df08ba0SAlex Bennée {
6321df08ba0SAlex Bennée 	void __user *uaddr = (void __user *)(long)reg->addr;
6331df08ba0SAlex Bennée 	u64 val;
6341df08ba0SAlex Bennée 	int ret;
6351df08ba0SAlex Bennée 
6361df08ba0SAlex Bennée 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
6371df08ba0SAlex Bennée 	if (ret != 0)
638bd218bceSWill Deacon 		return -EFAULT;
6391df08ba0SAlex Bennée 
6401df08ba0SAlex Bennée 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
6411df08ba0SAlex Bennée }
6421df08ba0SAlex Bennée 
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)6431df08ba0SAlex Bennée static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
6441df08ba0SAlex Bennée {
6451df08ba0SAlex Bennée 	void __user *uaddr = (void __user *)(long)reg->addr;
6461df08ba0SAlex Bennée 	u64 val;
6471df08ba0SAlex Bennée 
6481df08ba0SAlex Bennée 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
6494cad67fcSMichael S. Tsirkin 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
6501df08ba0SAlex Bennée }
6511df08ba0SAlex Bennée 
num_sve_regs(const struct kvm_vcpu * vcpu)6528e3c54c8SDave Martin static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
6538e3c54c8SDave Martin {
6548e3c54c8SDave Martin 	const unsigned int slices = vcpu_sve_slices(vcpu);
6558e3c54c8SDave Martin 
6568e3c54c8SDave Martin 	if (!vcpu_has_sve(vcpu))
6578e3c54c8SDave Martin 		return 0;
6588e3c54c8SDave Martin 
6599033bba4SDave Martin 	/* Policed by KVM_GET_REG_LIST: */
6609033bba4SDave Martin 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
6619033bba4SDave Martin 
6629033bba4SDave Martin 	return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
6639033bba4SDave Martin 		+ 1; /* KVM_REG_ARM64_SVE_VLS */
6648e3c54c8SDave Martin }
6658e3c54c8SDave Martin 
copy_sve_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)6668e3c54c8SDave Martin static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
6678e3c54c8SDave Martin 				u64 __user *uindices)
6688e3c54c8SDave Martin {
6698e3c54c8SDave Martin 	const unsigned int slices = vcpu_sve_slices(vcpu);
6708e3c54c8SDave Martin 	u64 reg;
6718e3c54c8SDave Martin 	unsigned int i, n;
6728e3c54c8SDave Martin 	int num_regs = 0;
6738e3c54c8SDave Martin 
6748e3c54c8SDave Martin 	if (!vcpu_has_sve(vcpu))
6758e3c54c8SDave Martin 		return 0;
6768e3c54c8SDave Martin 
6779033bba4SDave Martin 	/* Policed by KVM_GET_REG_LIST: */
6789033bba4SDave Martin 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
6799033bba4SDave Martin 
6809033bba4SDave Martin 	/*
6819033bba4SDave Martin 	 * Enumerate this first, so that userspace can save/restore in
6829033bba4SDave Martin 	 * the order reported by KVM_GET_REG_LIST:
6839033bba4SDave Martin 	 */
6849033bba4SDave Martin 	reg = KVM_REG_ARM64_SVE_VLS;
6859033bba4SDave Martin 	if (put_user(reg, uindices++))
6869033bba4SDave Martin 		return -EFAULT;
6879033bba4SDave Martin 	++num_regs;
6889033bba4SDave Martin 
6898e3c54c8SDave Martin 	for (i = 0; i < slices; i++) {
6908e3c54c8SDave Martin 		for (n = 0; n < SVE_NUM_ZREGS; n++) {
6918e3c54c8SDave Martin 			reg = KVM_REG_ARM64_SVE_ZREG(n, i);
6928e3c54c8SDave Martin 			if (put_user(reg, uindices++))
6938e3c54c8SDave Martin 				return -EFAULT;
6948e3c54c8SDave Martin 			num_regs++;
6958e3c54c8SDave Martin 		}
6968e3c54c8SDave Martin 
6978e3c54c8SDave Martin 		for (n = 0; n < SVE_NUM_PREGS; n++) {
6988e3c54c8SDave Martin 			reg = KVM_REG_ARM64_SVE_PREG(n, i);
6998e3c54c8SDave Martin 			if (put_user(reg, uindices++))
7008e3c54c8SDave Martin 				return -EFAULT;
7018e3c54c8SDave Martin 			num_regs++;
7028e3c54c8SDave Martin 		}
7038e3c54c8SDave Martin 
7048e3c54c8SDave Martin 		reg = KVM_REG_ARM64_SVE_FFR(i);
7058e3c54c8SDave Martin 		if (put_user(reg, uindices++))
7068e3c54c8SDave Martin 			return -EFAULT;
7078e3c54c8SDave Martin 		num_regs++;
7088e3c54c8SDave Martin 	}
7098e3c54c8SDave Martin 
7108e3c54c8SDave Martin 	return num_regs;
7118e3c54c8SDave Martin }
7128e3c54c8SDave Martin 
7131df08ba0SAlex Bennée /**
7142f4a07c5SMarc Zyngier  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
7152f4a07c5SMarc Zyngier  *
7162f4a07c5SMarc Zyngier  * This is for all registers.
7172f4a07c5SMarc Zyngier  */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)7182f4a07c5SMarc Zyngier unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
7192f4a07c5SMarc Zyngier {
7207aa92cf3SDave Martin 	unsigned long res = 0;
7217aa92cf3SDave Martin 
7228c86dfe3SDave Martin 	res += num_core_regs(vcpu);
7238e3c54c8SDave Martin 	res += num_sve_regs(vcpu);
7247aa92cf3SDave Martin 	res += kvm_arm_num_sys_reg_descs(vcpu);
7257aa92cf3SDave Martin 	res += kvm_arm_get_fw_num_regs(vcpu);
7267aa92cf3SDave Martin 	res += NUM_TIMER_REGS;
7277aa92cf3SDave Martin 
7287aa92cf3SDave Martin 	return res;
7292f4a07c5SMarc Zyngier }
7302f4a07c5SMarc Zyngier 
7312f4a07c5SMarc Zyngier /**
7322f4a07c5SMarc Zyngier  * kvm_arm_copy_reg_indices - get indices of all registers.
7332f4a07c5SMarc Zyngier  *
734edce2292SAndrea Gelmini  * We do core registers right here, then we append system regs.
7352f4a07c5SMarc Zyngier  */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)7362f4a07c5SMarc Zyngier int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
7372f4a07c5SMarc Zyngier {
7381df08ba0SAlex Bennée 	int ret;
7392f4a07c5SMarc Zyngier 
7408c86dfe3SDave Martin 	ret = copy_core_reg_indices(vcpu, uindices);
7415d8d4af2SMarc Zyngier 	if (ret < 0)
742be25bbb3SDave Martin 		return ret;
743be25bbb3SDave Martin 	uindices += ret;
7442f4a07c5SMarc Zyngier 
7458e3c54c8SDave Martin 	ret = copy_sve_reg_indices(vcpu, uindices);
7465d8d4af2SMarc Zyngier 	if (ret < 0)
7478e3c54c8SDave Martin 		return ret;
7488e3c54c8SDave Martin 	uindices += ret;
7498e3c54c8SDave Martin 
75085bd0ba1SMarc Zyngier 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
7515d8d4af2SMarc Zyngier 	if (ret < 0)
75285bd0ba1SMarc Zyngier 		return ret;
75385bd0ba1SMarc Zyngier 	uindices += kvm_arm_get_fw_num_regs(vcpu);
75485bd0ba1SMarc Zyngier 
7551df08ba0SAlex Bennée 	ret = copy_timer_indices(vcpu, uindices);
7565d8d4af2SMarc Zyngier 	if (ret < 0)
7571df08ba0SAlex Bennée 		return ret;
7581df08ba0SAlex Bennée 	uindices += NUM_TIMER_REGS;
7591df08ba0SAlex Bennée 
7602f4a07c5SMarc Zyngier 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
7612f4a07c5SMarc Zyngier }
7622f4a07c5SMarc Zyngier 
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)7632f4a07c5SMarc Zyngier int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
7642f4a07c5SMarc Zyngier {
7652f4a07c5SMarc Zyngier 	/* We currently use nothing arch-specific in upper 32 bits */
7662f4a07c5SMarc Zyngier 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
7672f4a07c5SMarc Zyngier 		return -EINVAL;
7682f4a07c5SMarc Zyngier 
769e1c9c983SDave Martin 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
770e1c9c983SDave Martin 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
77105714cabSRaghavendra Rao Ananta 	case KVM_REG_ARM_FW:
77205714cabSRaghavendra Rao Ananta 	case KVM_REG_ARM_FW_FEAT_BMAP:
77305714cabSRaghavendra Rao Ananta 		return kvm_arm_get_fw_reg(vcpu, reg);
774e1c9c983SDave Martin 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
775e1c9c983SDave Martin 	}
77685bd0ba1SMarc Zyngier 
7771df08ba0SAlex Bennée 	if (is_timer_reg(reg->id))
7781df08ba0SAlex Bennée 		return get_timer_reg(vcpu, reg);
7791df08ba0SAlex Bennée 
7802f4a07c5SMarc Zyngier 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
7812f4a07c5SMarc Zyngier }
7822f4a07c5SMarc Zyngier 
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)7832f4a07c5SMarc Zyngier int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
7842f4a07c5SMarc Zyngier {
7852f4a07c5SMarc Zyngier 	/* We currently use nothing arch-specific in upper 32 bits */
7862f4a07c5SMarc Zyngier 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
7872f4a07c5SMarc Zyngier 		return -EINVAL;
7882f4a07c5SMarc Zyngier 
789e1c9c983SDave Martin 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
790e1c9c983SDave Martin 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
79105714cabSRaghavendra Rao Ananta 	case KVM_REG_ARM_FW:
79205714cabSRaghavendra Rao Ananta 	case KVM_REG_ARM_FW_FEAT_BMAP:
79305714cabSRaghavendra Rao Ananta 		return kvm_arm_set_fw_reg(vcpu, reg);
794e1c9c983SDave Martin 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
795e1c9c983SDave Martin 	}
79685bd0ba1SMarc Zyngier 
7971df08ba0SAlex Bennée 	if (is_timer_reg(reg->id))
7981df08ba0SAlex Bennée 		return set_timer_reg(vcpu, reg);
7991df08ba0SAlex Bennée 
8002f4a07c5SMarc Zyngier 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
8012f4a07c5SMarc Zyngier }
8022f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)8032f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
8042f4a07c5SMarc Zyngier 				  struct kvm_sregs *sregs)
8052f4a07c5SMarc Zyngier {
8062f4a07c5SMarc Zyngier 	return -EINVAL;
8072f4a07c5SMarc Zyngier }
8082f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)8092f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
8102f4a07c5SMarc Zyngier 				  struct kvm_sregs *sregs)
8112f4a07c5SMarc Zyngier {
8122f4a07c5SMarc Zyngier 	return -EINVAL;
8132f4a07c5SMarc Zyngier }
8142f4a07c5SMarc Zyngier 
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)815539aee0eSJames Morse int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
816b7b27facSDongjiu Geng 			      struct kvm_vcpu_events *events)
817b7b27facSDongjiu Geng {
818b7b27facSDongjiu Geng 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
819b7b27facSDongjiu Geng 	events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
820b7b27facSDongjiu Geng 
821b7b27facSDongjiu Geng 	if (events->exception.serror_pending && events->exception.serror_has_esr)
822b7b27facSDongjiu Geng 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
823b7b27facSDongjiu Geng 
824da345174SChristoffer Dall 	/*
825da345174SChristoffer Dall 	 * We never return a pending ext_dabt here because we deliver it to
826da345174SChristoffer Dall 	 * the virtual CPU directly when setting the event and it's no longer
827da345174SChristoffer Dall 	 * 'pending' at this point.
828da345174SChristoffer Dall 	 */
829da345174SChristoffer Dall 
830b7b27facSDongjiu Geng 	return 0;
831b7b27facSDongjiu Geng }
832b7b27facSDongjiu Geng 
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)833539aee0eSJames Morse int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
834b7b27facSDongjiu Geng 			      struct kvm_vcpu_events *events)
835b7b27facSDongjiu Geng {
836b7b27facSDongjiu Geng 	bool serror_pending = events->exception.serror_pending;
837b7b27facSDongjiu Geng 	bool has_esr = events->exception.serror_has_esr;
838da345174SChristoffer Dall 	bool ext_dabt_pending = events->exception.ext_dabt_pending;
839b7b27facSDongjiu Geng 
840b7b27facSDongjiu Geng 	if (serror_pending && has_esr) {
841b7b27facSDongjiu Geng 		if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
842b7b27facSDongjiu Geng 			return -EINVAL;
843b7b27facSDongjiu Geng 
844b7b27facSDongjiu Geng 		if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
845b7b27facSDongjiu Geng 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
846b7b27facSDongjiu Geng 		else
847b7b27facSDongjiu Geng 			return -EINVAL;
848b7b27facSDongjiu Geng 	} else if (serror_pending) {
849b7b27facSDongjiu Geng 		kvm_inject_vabt(vcpu);
850b7b27facSDongjiu Geng 	}
851b7b27facSDongjiu Geng 
852da345174SChristoffer Dall 	if (ext_dabt_pending)
853da345174SChristoffer Dall 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
854da345174SChristoffer Dall 
855b7b27facSDongjiu Geng 	return 0;
856b7b27facSDongjiu Geng }
857b7b27facSDongjiu Geng 
kvm_target_cpu(void)8586b7982feSAnshuman Khandual u32 __attribute_const__ kvm_target_cpu(void)
8592f4a07c5SMarc Zyngier {
8602f4a07c5SMarc Zyngier 	unsigned long implementor = read_cpuid_implementor();
8612f4a07c5SMarc Zyngier 	unsigned long part_number = read_cpuid_part_number();
8622f4a07c5SMarc Zyngier 
863e28100bdSAnup Patel 	switch (implementor) {
864e28100bdSAnup Patel 	case ARM_CPU_IMP_ARM:
8652f4a07c5SMarc Zyngier 		switch (part_number) {
8662f4a07c5SMarc Zyngier 		case ARM_CPU_PART_AEM_V8:
8672f4a07c5SMarc Zyngier 			return KVM_ARM_TARGET_AEM_V8;
8682f4a07c5SMarc Zyngier 		case ARM_CPU_PART_FOUNDATION:
8692f4a07c5SMarc Zyngier 			return KVM_ARM_TARGET_FOUNDATION_V8;
8701252b331SMarc Zyngier 		case ARM_CPU_PART_CORTEX_A53:
8711252b331SMarc Zyngier 			return KVM_ARM_TARGET_CORTEX_A53;
8722f4a07c5SMarc Zyngier 		case ARM_CPU_PART_CORTEX_A57:
8732f4a07c5SMarc Zyngier 			return KVM_ARM_TARGET_CORTEX_A57;
874f0725345Szhong jiang 		}
875e28100bdSAnup Patel 		break;
876e28100bdSAnup Patel 	case ARM_CPU_IMP_APM:
877e28100bdSAnup Patel 		switch (part_number) {
8782a8eb560SAndre Przywara 		case APM_CPU_PART_XGENE:
879e28100bdSAnup Patel 			return KVM_ARM_TARGET_XGENE_POTENZA;
880f0725345Szhong jiang 		}
881e28100bdSAnup Patel 		break;
882f0725345Szhong jiang 	}
883e28100bdSAnup Patel 
884bca556acSSuzuki K. Poulose 	/* Return a default generic target */
885bca556acSSuzuki K. Poulose 	return KVM_ARM_TARGET_GENERIC_V8;
8862f4a07c5SMarc Zyngier }
8872f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)8882f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
8892f4a07c5SMarc Zyngier {
8902f4a07c5SMarc Zyngier 	return -EINVAL;
8912f4a07c5SMarc Zyngier }
8922f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)8932f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
8942f4a07c5SMarc Zyngier {
8952f4a07c5SMarc Zyngier 	return -EINVAL;
8962f4a07c5SMarc Zyngier }
8972f4a07c5SMarc Zyngier 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)8982f4a07c5SMarc Zyngier int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
8992f4a07c5SMarc Zyngier 				  struct kvm_translation *tr)
9002f4a07c5SMarc Zyngier {
9012f4a07c5SMarc Zyngier 	return -EINVAL;
9022f4a07c5SMarc Zyngier }
9030e6f07f2SAlex Bennée 
9040e6f07f2SAlex Bennée /**
9050e6f07f2SAlex Bennée  * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
9060e6f07f2SAlex Bennée  * @kvm:	pointer to the KVM struct
9070e6f07f2SAlex Bennée  * @kvm_guest_debug: the ioctl data buffer
9080e6f07f2SAlex Bennée  *
9090e6f07f2SAlex Bennée  * This sets up and enables the VM for guest debugging. Userspace
9100e6f07f2SAlex Bennée  * passes in a control flag to enable different debug types and
9110e6f07f2SAlex Bennée  * potentially other architecture specific information in the rest of
9120e6f07f2SAlex Bennée  * the structure.
9130e6f07f2SAlex Bennée  */
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)9140e6f07f2SAlex Bennée int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
9150e6f07f2SAlex Bennée 					struct kvm_guest_debug *dbg)
9160e6f07f2SAlex Bennée {
91766b56562SChristoffer Dall 	int ret = 0;
91866b56562SChristoffer Dall 
919eef8c85aSAlex Bennée 	trace_kvm_set_guest_debug(vcpu, dbg->control);
920eef8c85aSAlex Bennée 
92166b56562SChristoffer Dall 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
92266b56562SChristoffer Dall 		ret = -EINVAL;
92366b56562SChristoffer Dall 		goto out;
92466b56562SChristoffer Dall 	}
9250e6f07f2SAlex Bennée 
9260e6f07f2SAlex Bennée 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
9270e6f07f2SAlex Bennée 		vcpu->guest_debug = dbg->control;
928834bf887SAlex Bennée 
929834bf887SAlex Bennée 		/* Hardware assisted Break and Watch points */
930834bf887SAlex Bennée 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
931834bf887SAlex Bennée 			vcpu->arch.external_debug_state = dbg->arch;
932834bf887SAlex Bennée 		}
933834bf887SAlex Bennée 
9340e6f07f2SAlex Bennée 	} else {
9350e6f07f2SAlex Bennée 		/* If not enabled clear all flags */
9360e6f07f2SAlex Bennée 		vcpu->guest_debug = 0;
937370531d1SReiji Watanabe 		vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
9380e6f07f2SAlex Bennée 	}
93966b56562SChristoffer Dall 
94066b56562SChristoffer Dall out:
94166b56562SChristoffer Dall 	return ret;
9420e6f07f2SAlex Bennée }
943bb0c70bcSShannon Zhao 
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)944bb0c70bcSShannon Zhao int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
945bb0c70bcSShannon Zhao 			       struct kvm_device_attr *attr)
946bb0c70bcSShannon Zhao {
947bb0c70bcSShannon Zhao 	int ret;
948bb0c70bcSShannon Zhao 
949bb0c70bcSShannon Zhao 	switch (attr->group) {
950bb0c70bcSShannon Zhao 	case KVM_ARM_VCPU_PMU_V3_CTRL:
9514bba7f7dSOliver Upton 		mutex_lock(&vcpu->kvm->arch.config_lock);
952bb0c70bcSShannon Zhao 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
9534bba7f7dSOliver Upton 		mutex_unlock(&vcpu->kvm->arch.config_lock);
954bb0c70bcSShannon Zhao 		break;
95599a1db7aSChristoffer Dall 	case KVM_ARM_VCPU_TIMER_CTRL:
95699a1db7aSChristoffer Dall 		ret = kvm_arm_timer_set_attr(vcpu, attr);
95799a1db7aSChristoffer Dall 		break;
95858772e9aSSteven Price 	case KVM_ARM_VCPU_PVTIME_CTRL:
95958772e9aSSteven Price 		ret = kvm_arm_pvtime_set_attr(vcpu, attr);
96058772e9aSSteven Price 		break;
961bb0c70bcSShannon Zhao 	default:
962bb0c70bcSShannon Zhao 		ret = -ENXIO;
963bb0c70bcSShannon Zhao 		break;
964bb0c70bcSShannon Zhao 	}
965bb0c70bcSShannon Zhao 
966bb0c70bcSShannon Zhao 	return ret;
967bb0c70bcSShannon Zhao }
968bb0c70bcSShannon Zhao 
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)969bb0c70bcSShannon Zhao int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
970bb0c70bcSShannon Zhao 			       struct kvm_device_attr *attr)
971bb0c70bcSShannon Zhao {
972bb0c70bcSShannon Zhao 	int ret;
973bb0c70bcSShannon Zhao 
974bb0c70bcSShannon Zhao 	switch (attr->group) {
975bb0c70bcSShannon Zhao 	case KVM_ARM_VCPU_PMU_V3_CTRL:
976bb0c70bcSShannon Zhao 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
977bb0c70bcSShannon Zhao 		break;
97899a1db7aSChristoffer Dall 	case KVM_ARM_VCPU_TIMER_CTRL:
97999a1db7aSChristoffer Dall 		ret = kvm_arm_timer_get_attr(vcpu, attr);
98099a1db7aSChristoffer Dall 		break;
98158772e9aSSteven Price 	case KVM_ARM_VCPU_PVTIME_CTRL:
98258772e9aSSteven Price 		ret = kvm_arm_pvtime_get_attr(vcpu, attr);
98358772e9aSSteven Price 		break;
984bb0c70bcSShannon Zhao 	default:
985bb0c70bcSShannon Zhao 		ret = -ENXIO;
986bb0c70bcSShannon Zhao 		break;
987bb0c70bcSShannon Zhao 	}
988bb0c70bcSShannon Zhao 
989bb0c70bcSShannon Zhao 	return ret;
990bb0c70bcSShannon Zhao }
991bb0c70bcSShannon Zhao 
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)992bb0c70bcSShannon Zhao int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
993bb0c70bcSShannon Zhao 			       struct kvm_device_attr *attr)
994bb0c70bcSShannon Zhao {
995bb0c70bcSShannon Zhao 	int ret;
996bb0c70bcSShannon Zhao 
997bb0c70bcSShannon Zhao 	switch (attr->group) {
998bb0c70bcSShannon Zhao 	case KVM_ARM_VCPU_PMU_V3_CTRL:
999bb0c70bcSShannon Zhao 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
1000bb0c70bcSShannon Zhao 		break;
100199a1db7aSChristoffer Dall 	case KVM_ARM_VCPU_TIMER_CTRL:
100299a1db7aSChristoffer Dall 		ret = kvm_arm_timer_has_attr(vcpu, attr);
100399a1db7aSChristoffer Dall 		break;
100458772e9aSSteven Price 	case KVM_ARM_VCPU_PVTIME_CTRL:
100558772e9aSSteven Price 		ret = kvm_arm_pvtime_has_attr(vcpu, attr);
100658772e9aSSteven Price 		break;
1007bb0c70bcSShannon Zhao 	default:
1008bb0c70bcSShannon Zhao 		ret = -ENXIO;
1009bb0c70bcSShannon Zhao 		break;
1010bb0c70bcSShannon Zhao 	}
1011bb0c70bcSShannon Zhao 
1012bb0c70bcSShannon Zhao 	return ret;
1013bb0c70bcSShannon Zhao }
1014f0376edbSSteven Price 
kvm_vm_ioctl_mte_copy_tags(struct kvm * kvm,struct kvm_arm_copy_mte_tags * copy_tags)10152def950cSThomas Huth int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1016f0376edbSSteven Price 			       struct kvm_arm_copy_mte_tags *copy_tags)
1017f0376edbSSteven Price {
1018f0376edbSSteven Price 	gpa_t guest_ipa = copy_tags->guest_ipa;
1019f0376edbSSteven Price 	size_t length = copy_tags->length;
1020f0376edbSSteven Price 	void __user *tags = copy_tags->addr;
1021f0376edbSSteven Price 	gpa_t gfn;
1022f0376edbSSteven Price 	bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
1023f0376edbSSteven Price 	int ret = 0;
1024f0376edbSSteven Price 
1025f0376edbSSteven Price 	if (!kvm_has_mte(kvm))
1026f0376edbSSteven Price 		return -EINVAL;
1027f0376edbSSteven Price 
1028f0376edbSSteven Price 	if (copy_tags->reserved[0] || copy_tags->reserved[1])
1029f0376edbSSteven Price 		return -EINVAL;
1030f0376edbSSteven Price 
1031f0376edbSSteven Price 	if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
1032f0376edbSSteven Price 		return -EINVAL;
1033f0376edbSSteven Price 
1034f0376edbSSteven Price 	if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
1035f0376edbSSteven Price 		return -EINVAL;
1036f0376edbSSteven Price 
10372def950cSThomas Huth 	/* Lengths above INT_MAX cannot be represented in the return value */
10382def950cSThomas Huth 	if (length > INT_MAX)
10392def950cSThomas Huth 		return -EINVAL;
10402def950cSThomas Huth 
1041f0376edbSSteven Price 	gfn = gpa_to_gfn(guest_ipa);
1042f0376edbSSteven Price 
1043f0376edbSSteven Price 	mutex_lock(&kvm->slots_lock);
1044f0376edbSSteven Price 
1045f0376edbSSteven Price 	while (length > 0) {
1046f0376edbSSteven Price 		kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
1047f0376edbSSteven Price 		void *maddr;
1048f0376edbSSteven Price 		unsigned long num_tags;
1049f0376edbSSteven Price 		struct page *page;
1050f0376edbSSteven Price 
1051f0376edbSSteven Price 		if (is_error_noslot_pfn(pfn)) {
1052f0376edbSSteven Price 			ret = -EFAULT;
1053f0376edbSSteven Price 			goto out;
1054f0376edbSSteven Price 		}
1055f0376edbSSteven Price 
1056f0376edbSSteven Price 		page = pfn_to_online_page(pfn);
1057f0376edbSSteven Price 		if (!page) {
1058f0376edbSSteven Price 			/* Reject ZONE_DEVICE memory */
1059f0376edbSSteven Price 			ret = -EFAULT;
1060f0376edbSSteven Price 			goto out;
1061f0376edbSSteven Price 		}
1062f0376edbSSteven Price 		maddr = page_address(page);
1063f0376edbSSteven Price 
1064f0376edbSSteven Price 		if (!write) {
1065e059853dSCatalin Marinas 			if (page_mte_tagged(page))
1066f0376edbSSteven Price 				num_tags = mte_copy_tags_to_user(tags, maddr,
1067f0376edbSSteven Price 							MTE_GRANULES_PER_PAGE);
1068f0376edbSSteven Price 			else
1069f0376edbSSteven Price 				/* No tags in memory, so write zeros */
1070f0376edbSSteven Price 				num_tags = MTE_GRANULES_PER_PAGE -
1071f0376edbSSteven Price 					clear_user(tags, MTE_GRANULES_PER_PAGE);
1072f0376edbSSteven Price 			kvm_release_pfn_clean(pfn);
1073f0376edbSSteven Price 		} else {
1074d77e59a8SCatalin Marinas 			/*
1075d77e59a8SCatalin Marinas 			 * Only locking to serialise with a concurrent
1076d77e59a8SCatalin Marinas 			 * set_pte_at() in the VMM but still overriding the
1077d77e59a8SCatalin Marinas 			 * tags, hence ignoring the return value.
1078d77e59a8SCatalin Marinas 			 */
1079d77e59a8SCatalin Marinas 			try_page_mte_tagging(page);
1080f0376edbSSteven Price 			num_tags = mte_copy_tags_from_user(maddr, tags,
1081f0376edbSSteven Price 							MTE_GRANULES_PER_PAGE);
108298db7259SMarc Zyngier 
1083d77e59a8SCatalin Marinas 			/* uaccess failed, don't leave stale tags */
1084d77e59a8SCatalin Marinas 			if (num_tags != MTE_GRANULES_PER_PAGE)
1085c3b37c2dSCatalin Marinas 				mte_clear_page_tags(maddr);
1086e059853dSCatalin Marinas 			set_page_mte_tagged(page);
108798db7259SMarc Zyngier 
1088f0376edbSSteven Price 			kvm_release_pfn_dirty(pfn);
1089f0376edbSSteven Price 		}
1090f0376edbSSteven Price 
1091f0376edbSSteven Price 		if (num_tags != MTE_GRANULES_PER_PAGE) {
1092f0376edbSSteven Price 			ret = -EFAULT;
1093f0376edbSSteven Price 			goto out;
1094f0376edbSSteven Price 		}
1095f0376edbSSteven Price 
1096f0376edbSSteven Price 		gfn++;
1097f0376edbSSteven Price 		tags += num_tags;
1098f0376edbSSteven Price 		length -= PAGE_SIZE;
1099f0376edbSSteven Price 	}
1100f0376edbSSteven Price 
1101f0376edbSSteven Price out:
1102f0376edbSSteven Price 	mutex_unlock(&kvm->slots_lock);
1103f0376edbSSteven Price 	/* If some data has been copied report the number of bytes copied */
1104f0376edbSSteven Price 	if (length != copy_tags->length)
1105f0376edbSSteven Price 		return copy_tags->length - length;
1106f0376edbSSteven Price 	return ret;
1107f0376edbSSteven Price }
1108