1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VGIC: KVM DEVICE API
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14 
15 /* common helpers */
16 
17 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
18 		      phys_addr_t addr, phys_addr_t alignment)
19 {
20 	if (addr & ~kvm_phys_mask(kvm))
21 		return -E2BIG;
22 
23 	if (!IS_ALIGNED(addr, alignment))
24 		return -EINVAL;
25 
26 	if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
27 		return -EEXIST;
28 
29 	return 0;
30 }
31 
32 static int vgic_check_type(struct kvm *kvm, int type_needed)
33 {
34 	if (kvm->arch.vgic.vgic_model != type_needed)
35 		return -ENODEV;
36 	else
37 		return 0;
38 }
39 
40 /**
41  * kvm_vgic_addr - set or get vgic VM base addresses
42  * @kvm:   pointer to the vm struct
43  * @type:  the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
44  * @addr:  pointer to address value
45  * @write: if true set the address in the VM address space, if false read the
46  *          address
47  *
48  * Set or get the vgic base addresses for the distributor and the virtual CPU
49  * interface in the VM physical address space.  These addresses are properties
50  * of the emulated core/SoC and therefore user space initially knows this
51  * information.
52  * Check them for sanity (alignment, double assignment). We can't check for
53  * overlapping regions in case of a virtual GICv3 here, since we don't know
54  * the number of VCPUs yet, so we defer this check to map_resources().
55  */
56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
57 {
58 	int r = 0;
59 	struct vgic_dist *vgic = &kvm->arch.vgic;
60 	phys_addr_t *addr_ptr, alignment;
61 	u64 undef_value = VGIC_ADDR_UNDEF;
62 
63 	mutex_lock(&kvm->lock);
64 	switch (type) {
65 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
66 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
67 		addr_ptr = &vgic->vgic_dist_base;
68 		alignment = SZ_4K;
69 		break;
70 	case KVM_VGIC_V2_ADDR_TYPE_CPU:
71 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
72 		addr_ptr = &vgic->vgic_cpu_base;
73 		alignment = SZ_4K;
74 		break;
75 	case KVM_VGIC_V3_ADDR_TYPE_DIST:
76 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
77 		addr_ptr = &vgic->vgic_dist_base;
78 		alignment = SZ_64K;
79 		break;
80 	case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
81 		struct vgic_redist_region *rdreg;
82 
83 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
84 		if (r)
85 			break;
86 		if (write) {
87 			r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
88 			goto out;
89 		}
90 		rdreg = list_first_entry(&vgic->rd_regions,
91 					 struct vgic_redist_region, list);
92 		if (!rdreg)
93 			addr_ptr = &undef_value;
94 		else
95 			addr_ptr = &rdreg->base;
96 		break;
97 	}
98 	case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
99 	{
100 		struct vgic_redist_region *rdreg;
101 		u8 index;
102 
103 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
104 		if (r)
105 			break;
106 
107 		index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
108 
109 		if (write) {
110 			gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
111 			u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
112 					>> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
113 			u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
114 					>> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
115 
116 			if (!count || flags)
117 				r = -EINVAL;
118 			else
119 				r = vgic_v3_set_redist_base(kvm, index,
120 							    base, count);
121 			goto out;
122 		}
123 
124 		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
125 		if (!rdreg) {
126 			r = -ENOENT;
127 			goto out;
128 		}
129 
130 		*addr = index;
131 		*addr |= rdreg->base;
132 		*addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
133 		goto out;
134 	}
135 	default:
136 		r = -ENODEV;
137 	}
138 
139 	if (r)
140 		goto out;
141 
142 	if (write) {
143 		r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
144 		if (!r)
145 			*addr_ptr = *addr;
146 	} else {
147 		*addr = *addr_ptr;
148 	}
149 
150 out:
151 	mutex_unlock(&kvm->lock);
152 	return r;
153 }
154 
155 static int vgic_set_common_attr(struct kvm_device *dev,
156 				struct kvm_device_attr *attr)
157 {
158 	int r;
159 
160 	switch (attr->group) {
161 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
162 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
163 		u64 addr;
164 		unsigned long type = (unsigned long)attr->attr;
165 
166 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
167 			return -EFAULT;
168 
169 		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
170 		return (r == -ENODEV) ? -ENXIO : r;
171 	}
172 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
173 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
174 		u32 val;
175 		int ret = 0;
176 
177 		if (get_user(val, uaddr))
178 			return -EFAULT;
179 
180 		/*
181 		 * We require:
182 		 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
183 		 * - at most 1024 interrupts
184 		 * - a multiple of 32 interrupts
185 		 */
186 		if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
187 		    val > VGIC_MAX_RESERVED ||
188 		    (val & 31))
189 			return -EINVAL;
190 
191 		mutex_lock(&dev->kvm->lock);
192 
193 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
194 			ret = -EBUSY;
195 		else
196 			dev->kvm->arch.vgic.nr_spis =
197 				val - VGIC_NR_PRIVATE_IRQS;
198 
199 		mutex_unlock(&dev->kvm->lock);
200 
201 		return ret;
202 	}
203 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
204 		switch (attr->attr) {
205 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
206 			mutex_lock(&dev->kvm->lock);
207 			r = vgic_init(dev->kvm);
208 			mutex_unlock(&dev->kvm->lock);
209 			return r;
210 		}
211 		break;
212 	}
213 	}
214 
215 	return -ENXIO;
216 }
217 
218 static int vgic_get_common_attr(struct kvm_device *dev,
219 				struct kvm_device_attr *attr)
220 {
221 	int r = -ENXIO;
222 
223 	switch (attr->group) {
224 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
225 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
226 		u64 addr;
227 		unsigned long type = (unsigned long)attr->attr;
228 
229 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
230 		if (r)
231 			return (r == -ENODEV) ? -ENXIO : r;
232 
233 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
234 			return -EFAULT;
235 		break;
236 	}
237 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
238 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
239 
240 		r = put_user(dev->kvm->arch.vgic.nr_spis +
241 			     VGIC_NR_PRIVATE_IRQS, uaddr);
242 		break;
243 	}
244 	}
245 
246 	return r;
247 }
248 
249 static int vgic_create(struct kvm_device *dev, u32 type)
250 {
251 	return kvm_vgic_create(dev->kvm, type);
252 }
253 
254 static void vgic_destroy(struct kvm_device *dev)
255 {
256 	kfree(dev);
257 }
258 
259 int kvm_register_vgic_device(unsigned long type)
260 {
261 	int ret = -ENODEV;
262 
263 	switch (type) {
264 	case KVM_DEV_TYPE_ARM_VGIC_V2:
265 		ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
266 					      KVM_DEV_TYPE_ARM_VGIC_V2);
267 		break;
268 	case KVM_DEV_TYPE_ARM_VGIC_V3:
269 		ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
270 					      KVM_DEV_TYPE_ARM_VGIC_V3);
271 
272 		if (ret)
273 			break;
274 		ret = kvm_vgic_register_its_device();
275 		break;
276 	}
277 
278 	return ret;
279 }
280 
281 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
282 		       struct vgic_reg_attr *reg_attr)
283 {
284 	int cpuid;
285 
286 	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
287 		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
288 
289 	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
290 		return -EINVAL;
291 
292 	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
293 	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
294 
295 	return 0;
296 }
297 
298 /* unlocks vcpus from @vcpu_lock_idx and smaller */
299 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
300 {
301 	struct kvm_vcpu *tmp_vcpu;
302 
303 	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
304 		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
305 		mutex_unlock(&tmp_vcpu->mutex);
306 	}
307 }
308 
309 void unlock_all_vcpus(struct kvm *kvm)
310 {
311 	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
312 }
313 
314 /* Returns true if all vcpus were locked, false otherwise */
315 bool lock_all_vcpus(struct kvm *kvm)
316 {
317 	struct kvm_vcpu *tmp_vcpu;
318 	int c;
319 
320 	/*
321 	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
322 	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
323 	 * that no other VCPUs are run and fiddle with the vgic state while we
324 	 * access it.
325 	 */
326 	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
327 		if (!mutex_trylock(&tmp_vcpu->mutex)) {
328 			unlock_vcpus(kvm, c - 1);
329 			return false;
330 		}
331 	}
332 
333 	return true;
334 }
335 
336 /**
337  * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
338  *
339  * @dev:      kvm device handle
340  * @attr:     kvm device attribute
341  * @reg:      address the value is read or written
342  * @is_write: true if userspace is writing a register
343  */
344 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
345 				    struct kvm_device_attr *attr,
346 				    u32 *reg, bool is_write)
347 {
348 	struct vgic_reg_attr reg_attr;
349 	gpa_t addr;
350 	struct kvm_vcpu *vcpu;
351 	int ret;
352 
353 	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
354 	if (ret)
355 		return ret;
356 
357 	vcpu = reg_attr.vcpu;
358 	addr = reg_attr.addr;
359 
360 	mutex_lock(&dev->kvm->lock);
361 
362 	ret = vgic_init(dev->kvm);
363 	if (ret)
364 		goto out;
365 
366 	if (!lock_all_vcpus(dev->kvm)) {
367 		ret = -EBUSY;
368 		goto out;
369 	}
370 
371 	switch (attr->group) {
372 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
373 		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
374 		break;
375 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
376 		ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
377 		break;
378 	default:
379 		ret = -EINVAL;
380 		break;
381 	}
382 
383 	unlock_all_vcpus(dev->kvm);
384 out:
385 	mutex_unlock(&dev->kvm->lock);
386 	return ret;
387 }
388 
389 static int vgic_v2_set_attr(struct kvm_device *dev,
390 			    struct kvm_device_attr *attr)
391 {
392 	int ret;
393 
394 	ret = vgic_set_common_attr(dev, attr);
395 	if (ret != -ENXIO)
396 		return ret;
397 
398 	switch (attr->group) {
399 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
400 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
401 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
402 		u32 reg;
403 
404 		if (get_user(reg, uaddr))
405 			return -EFAULT;
406 
407 		return vgic_v2_attr_regs_access(dev, attr, &reg, true);
408 	}
409 	}
410 
411 	return -ENXIO;
412 }
413 
414 static int vgic_v2_get_attr(struct kvm_device *dev,
415 			    struct kvm_device_attr *attr)
416 {
417 	int ret;
418 
419 	ret = vgic_get_common_attr(dev, attr);
420 	if (ret != -ENXIO)
421 		return ret;
422 
423 	switch (attr->group) {
424 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
425 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
426 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
427 		u32 reg = 0;
428 
429 		ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
430 		if (ret)
431 			return ret;
432 		return put_user(reg, uaddr);
433 	}
434 	}
435 
436 	return -ENXIO;
437 }
438 
439 static int vgic_v2_has_attr(struct kvm_device *dev,
440 			    struct kvm_device_attr *attr)
441 {
442 	switch (attr->group) {
443 	case KVM_DEV_ARM_VGIC_GRP_ADDR:
444 		switch (attr->attr) {
445 		case KVM_VGIC_V2_ADDR_TYPE_DIST:
446 		case KVM_VGIC_V2_ADDR_TYPE_CPU:
447 			return 0;
448 		}
449 		break;
450 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
451 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
452 		return vgic_v2_has_attr_regs(dev, attr);
453 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
454 		return 0;
455 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
456 		switch (attr->attr) {
457 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
458 			return 0;
459 		}
460 	}
461 	return -ENXIO;
462 }
463 
464 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
465 	.name = "kvm-arm-vgic-v2",
466 	.create = vgic_create,
467 	.destroy = vgic_destroy,
468 	.set_attr = vgic_v2_set_attr,
469 	.get_attr = vgic_v2_get_attr,
470 	.has_attr = vgic_v2_has_attr,
471 };
472 
473 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
474 		       struct vgic_reg_attr *reg_attr)
475 {
476 	unsigned long vgic_mpidr, mpidr_reg;
477 
478 	/*
479 	 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
480 	 * attr might not hold MPIDR. Hence assume vcpu0.
481 	 */
482 	if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
483 		vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
484 			      KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
485 
486 		mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
487 		reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
488 	} else {
489 		reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
490 	}
491 
492 	if (!reg_attr->vcpu)
493 		return -EINVAL;
494 
495 	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
496 
497 	return 0;
498 }
499 
500 /*
501  * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
502  *
503  * @dev:      kvm device handle
504  * @attr:     kvm device attribute
505  * @reg:      address the value is read or written
506  * @is_write: true if userspace is writing a register
507  */
508 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
509 				    struct kvm_device_attr *attr,
510 				    u64 *reg, bool is_write)
511 {
512 	struct vgic_reg_attr reg_attr;
513 	gpa_t addr;
514 	struct kvm_vcpu *vcpu;
515 	int ret;
516 	u32 tmp32;
517 
518 	ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
519 	if (ret)
520 		return ret;
521 
522 	vcpu = reg_attr.vcpu;
523 	addr = reg_attr.addr;
524 
525 	mutex_lock(&dev->kvm->lock);
526 
527 	if (unlikely(!vgic_initialized(dev->kvm))) {
528 		ret = -EBUSY;
529 		goto out;
530 	}
531 
532 	if (!lock_all_vcpus(dev->kvm)) {
533 		ret = -EBUSY;
534 		goto out;
535 	}
536 
537 	switch (attr->group) {
538 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
539 		if (is_write)
540 			tmp32 = *reg;
541 
542 		ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
543 		if (!is_write)
544 			*reg = tmp32;
545 		break;
546 	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
547 		if (is_write)
548 			tmp32 = *reg;
549 
550 		ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
551 		if (!is_write)
552 			*reg = tmp32;
553 		break;
554 	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
555 		u64 regid;
556 
557 		regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
558 		ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
559 						  regid, reg);
560 		break;
561 	}
562 	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
563 		unsigned int info, intid;
564 
565 		info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
566 			KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
567 		if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
568 			intid = attr->attr &
569 				KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
570 			ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
571 							      intid, reg);
572 		} else {
573 			ret = -EINVAL;
574 		}
575 		break;
576 	}
577 	default:
578 		ret = -EINVAL;
579 		break;
580 	}
581 
582 	unlock_all_vcpus(dev->kvm);
583 out:
584 	mutex_unlock(&dev->kvm->lock);
585 	return ret;
586 }
587 
588 static int vgic_v3_set_attr(struct kvm_device *dev,
589 			    struct kvm_device_attr *attr)
590 {
591 	int ret;
592 
593 	ret = vgic_set_common_attr(dev, attr);
594 	if (ret != -ENXIO)
595 		return ret;
596 
597 	switch (attr->group) {
598 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
599 	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
600 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
601 		u32 tmp32;
602 		u64 reg;
603 
604 		if (get_user(tmp32, uaddr))
605 			return -EFAULT;
606 
607 		reg = tmp32;
608 		return vgic_v3_attr_regs_access(dev, attr, &reg, true);
609 	}
610 	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
611 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
612 		u64 reg;
613 
614 		if (get_user(reg, uaddr))
615 			return -EFAULT;
616 
617 		return vgic_v3_attr_regs_access(dev, attr, &reg, true);
618 	}
619 	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
620 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
621 		u64 reg;
622 		u32 tmp32;
623 
624 		if (get_user(tmp32, uaddr))
625 			return -EFAULT;
626 
627 		reg = tmp32;
628 		return vgic_v3_attr_regs_access(dev, attr, &reg, true);
629 	}
630 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
631 		int ret;
632 
633 		switch (attr->attr) {
634 		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
635 			mutex_lock(&dev->kvm->lock);
636 
637 			if (!lock_all_vcpus(dev->kvm)) {
638 				mutex_unlock(&dev->kvm->lock);
639 				return -EBUSY;
640 			}
641 			ret = vgic_v3_save_pending_tables(dev->kvm);
642 			unlock_all_vcpus(dev->kvm);
643 			mutex_unlock(&dev->kvm->lock);
644 			return ret;
645 		}
646 		break;
647 	}
648 	}
649 	return -ENXIO;
650 }
651 
652 static int vgic_v3_get_attr(struct kvm_device *dev,
653 			    struct kvm_device_attr *attr)
654 {
655 	int ret;
656 
657 	ret = vgic_get_common_attr(dev, attr);
658 	if (ret != -ENXIO)
659 		return ret;
660 
661 	switch (attr->group) {
662 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
663 	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
664 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
665 		u64 reg;
666 		u32 tmp32;
667 
668 		ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
669 		if (ret)
670 			return ret;
671 		tmp32 = reg;
672 		return put_user(tmp32, uaddr);
673 	}
674 	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
675 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
676 		u64 reg;
677 
678 		ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
679 		if (ret)
680 			return ret;
681 		return put_user(reg, uaddr);
682 	}
683 	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
684 		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
685 		u64 reg;
686 		u32 tmp32;
687 
688 		ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
689 		if (ret)
690 			return ret;
691 		tmp32 = reg;
692 		return put_user(tmp32, uaddr);
693 	}
694 	}
695 	return -ENXIO;
696 }
697 
698 static int vgic_v3_has_attr(struct kvm_device *dev,
699 			    struct kvm_device_attr *attr)
700 {
701 	switch (attr->group) {
702 	case KVM_DEV_ARM_VGIC_GRP_ADDR:
703 		switch (attr->attr) {
704 		case KVM_VGIC_V3_ADDR_TYPE_DIST:
705 		case KVM_VGIC_V3_ADDR_TYPE_REDIST:
706 		case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
707 			return 0;
708 		}
709 		break;
710 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
711 	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
712 	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
713 		return vgic_v3_has_attr_regs(dev, attr);
714 	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
715 		return 0;
716 	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
717 		if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
718 		      KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
719 		      VGIC_LEVEL_INFO_LINE_LEVEL)
720 			return 0;
721 		break;
722 	}
723 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
724 		switch (attr->attr) {
725 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
726 			return 0;
727 		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
728 			return 0;
729 		}
730 	}
731 	return -ENXIO;
732 }
733 
734 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
735 	.name = "kvm-arm-vgic-v3",
736 	.create = vgic_create,
737 	.destroy = vgic_destroy,
738 	.set_attr = vgic_v3_set_attr,
739 	.get_attr = vgic_v3_get_attr,
740 	.has_attr = vgic_v3_has_attr,
741 };
742