1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VGIC: KVM DEVICE API 4 * 5 * Copyright (C) 2015 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 #include <linux/kvm_host.h> 9 #include <kvm/arm_vgic.h> 10 #include <linux/uaccess.h> 11 #include <asm/kvm_mmu.h> 12 #include <asm/cputype.h> 13 #include "vgic.h" 14 15 /* common helpers */ 16 17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, 18 phys_addr_t addr, phys_addr_t alignment, 19 phys_addr_t size) 20 { 21 if (!IS_VGIC_ADDR_UNDEF(ioaddr)) 22 return -EEXIST; 23 24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment)) 25 return -EINVAL; 26 27 if (addr + size < addr) 28 return -EINVAL; 29 30 if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm)) 31 return -E2BIG; 32 33 return 0; 34 } 35 36 static int vgic_check_type(struct kvm *kvm, int type_needed) 37 { 38 if (kvm->arch.vgic.vgic_model != type_needed) 39 return -ENODEV; 40 else 41 return 0; 42 } 43 44 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) 45 { 46 struct vgic_dist *vgic = &kvm->arch.vgic; 47 int r; 48 49 mutex_lock(&kvm->arch.config_lock); 50 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { 51 case KVM_VGIC_V2_ADDR_TYPE_DIST: 52 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 53 if (!r) 54 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr, 55 SZ_4K, KVM_VGIC_V2_DIST_SIZE); 56 if (!r) 57 vgic->vgic_dist_base = dev_addr->addr; 58 break; 59 case KVM_VGIC_V2_ADDR_TYPE_CPU: 60 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 61 if (!r) 62 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr, 63 SZ_4K, KVM_VGIC_V2_CPU_SIZE); 64 if (!r) 65 vgic->vgic_cpu_base = dev_addr->addr; 66 break; 67 default: 68 r = -ENODEV; 69 } 70 71 mutex_unlock(&kvm->arch.config_lock); 72 73 return r; 74 } 75 76 /** 77 * kvm_vgic_addr - set or get vgic VM base addresses 78 * @kvm: pointer to the vm struct 79 * @attr: pointer to the attribute being retrieved/updated 80 * @write: if true set the address in the VM address space, if false read the 81 * address 82 * 83 * Set or get the vgic base addresses for the distributor and the virtual CPU 84 * interface in the VM physical address space. These addresses are properties 85 * of the emulated core/SoC and therefore user space initially knows this 86 * information. 87 * Check them for sanity (alignment, double assignment). We can't check for 88 * overlapping regions in case of a virtual GICv3 here, since we don't know 89 * the number of VCPUs yet, so we defer this check to map_resources(). 90 */ 91 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write) 92 { 93 u64 __user *uaddr = (u64 __user *)attr->addr; 94 struct vgic_dist *vgic = &kvm->arch.vgic; 95 phys_addr_t *addr_ptr, alignment, size; 96 u64 undef_value = VGIC_ADDR_UNDEF; 97 u64 addr; 98 int r; 99 100 /* Reading a redistributor region addr implies getting the index */ 101 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION) 102 if (get_user(addr, uaddr)) 103 return -EFAULT; 104 105 mutex_lock(&kvm->arch.config_lock); 106 switch (attr->attr) { 107 case KVM_VGIC_V2_ADDR_TYPE_DIST: 108 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 109 addr_ptr = &vgic->vgic_dist_base; 110 alignment = SZ_4K; 111 size = KVM_VGIC_V2_DIST_SIZE; 112 break; 113 case KVM_VGIC_V2_ADDR_TYPE_CPU: 114 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 115 addr_ptr = &vgic->vgic_cpu_base; 116 alignment = SZ_4K; 117 size = KVM_VGIC_V2_CPU_SIZE; 118 break; 119 case KVM_VGIC_V3_ADDR_TYPE_DIST: 120 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 121 addr_ptr = &vgic->vgic_dist_base; 122 alignment = SZ_64K; 123 size = KVM_VGIC_V3_DIST_SIZE; 124 break; 125 case KVM_VGIC_V3_ADDR_TYPE_REDIST: { 126 struct vgic_redist_region *rdreg; 127 128 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 129 if (r) 130 break; 131 if (write) { 132 r = vgic_v3_set_redist_base(kvm, 0, addr, 0); 133 goto out; 134 } 135 rdreg = list_first_entry_or_null(&vgic->rd_regions, 136 struct vgic_redist_region, list); 137 if (!rdreg) 138 addr_ptr = &undef_value; 139 else 140 addr_ptr = &rdreg->base; 141 break; 142 } 143 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION: 144 { 145 struct vgic_redist_region *rdreg; 146 u8 index; 147 148 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 149 if (r) 150 break; 151 152 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK; 153 154 if (write) { 155 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK; 156 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr); 157 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr); 158 159 if (!count || flags) 160 r = -EINVAL; 161 else 162 r = vgic_v3_set_redist_base(kvm, index, 163 base, count); 164 goto out; 165 } 166 167 rdreg = vgic_v3_rdist_region_from_index(kvm, index); 168 if (!rdreg) { 169 r = -ENOENT; 170 goto out; 171 } 172 173 addr = index; 174 addr |= rdreg->base; 175 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT; 176 goto out; 177 } 178 default: 179 r = -ENODEV; 180 } 181 182 if (r) 183 goto out; 184 185 if (write) { 186 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); 187 if (!r) 188 *addr_ptr = addr; 189 } else { 190 addr = *addr_ptr; 191 } 192 193 out: 194 mutex_unlock(&kvm->arch.config_lock); 195 196 if (!r && !write) 197 r = put_user(addr, uaddr); 198 199 return r; 200 } 201 202 static int vgic_set_common_attr(struct kvm_device *dev, 203 struct kvm_device_attr *attr) 204 { 205 int r; 206 207 switch (attr->group) { 208 case KVM_DEV_ARM_VGIC_GRP_ADDR: 209 r = kvm_vgic_addr(dev->kvm, attr, true); 210 return (r == -ENODEV) ? -ENXIO : r; 211 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { 212 u32 __user *uaddr = (u32 __user *)(long)attr->addr; 213 u32 val; 214 int ret = 0; 215 216 if (get_user(val, uaddr)) 217 return -EFAULT; 218 219 /* 220 * We require: 221 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs 222 * - at most 1024 interrupts 223 * - a multiple of 32 interrupts 224 */ 225 if (val < (VGIC_NR_PRIVATE_IRQS + 32) || 226 val > VGIC_MAX_RESERVED || 227 (val & 31)) 228 return -EINVAL; 229 230 mutex_lock(&dev->kvm->arch.config_lock); 231 232 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) 233 ret = -EBUSY; 234 else 235 dev->kvm->arch.vgic.nr_spis = 236 val - VGIC_NR_PRIVATE_IRQS; 237 238 mutex_unlock(&dev->kvm->arch.config_lock); 239 240 return ret; 241 } 242 case KVM_DEV_ARM_VGIC_GRP_CTRL: { 243 switch (attr->attr) { 244 case KVM_DEV_ARM_VGIC_CTRL_INIT: 245 mutex_lock(&dev->kvm->arch.config_lock); 246 r = vgic_init(dev->kvm); 247 mutex_unlock(&dev->kvm->arch.config_lock); 248 return r; 249 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: 250 /* 251 * OK, this one isn't common at all, but we 252 * want to handle all control group attributes 253 * in a single place. 254 */ 255 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3)) 256 return -ENXIO; 257 mutex_lock(&dev->kvm->lock); 258 259 if (!lock_all_vcpus(dev->kvm)) { 260 mutex_unlock(&dev->kvm->lock); 261 return -EBUSY; 262 } 263 264 mutex_lock(&dev->kvm->arch.config_lock); 265 r = vgic_v3_save_pending_tables(dev->kvm); 266 mutex_unlock(&dev->kvm->arch.config_lock); 267 unlock_all_vcpus(dev->kvm); 268 mutex_unlock(&dev->kvm->lock); 269 return r; 270 } 271 break; 272 } 273 } 274 275 return -ENXIO; 276 } 277 278 static int vgic_get_common_attr(struct kvm_device *dev, 279 struct kvm_device_attr *attr) 280 { 281 int r = -ENXIO; 282 283 switch (attr->group) { 284 case KVM_DEV_ARM_VGIC_GRP_ADDR: 285 r = kvm_vgic_addr(dev->kvm, attr, false); 286 return (r == -ENODEV) ? -ENXIO : r; 287 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { 288 u32 __user *uaddr = (u32 __user *)(long)attr->addr; 289 290 r = put_user(dev->kvm->arch.vgic.nr_spis + 291 VGIC_NR_PRIVATE_IRQS, uaddr); 292 break; 293 } 294 } 295 296 return r; 297 } 298 299 static int vgic_create(struct kvm_device *dev, u32 type) 300 { 301 return kvm_vgic_create(dev->kvm, type); 302 } 303 304 static void vgic_destroy(struct kvm_device *dev) 305 { 306 kfree(dev); 307 } 308 309 int kvm_register_vgic_device(unsigned long type) 310 { 311 int ret = -ENODEV; 312 313 switch (type) { 314 case KVM_DEV_TYPE_ARM_VGIC_V2: 315 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops, 316 KVM_DEV_TYPE_ARM_VGIC_V2); 317 break; 318 case KVM_DEV_TYPE_ARM_VGIC_V3: 319 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, 320 KVM_DEV_TYPE_ARM_VGIC_V3); 321 322 if (ret) 323 break; 324 ret = kvm_vgic_register_its_device(); 325 break; 326 } 327 328 return ret; 329 } 330 331 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, 332 struct vgic_reg_attr *reg_attr) 333 { 334 int cpuid; 335 336 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 337 KVM_DEV_ARM_VGIC_CPUID_SHIFT; 338 339 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) 340 return -EINVAL; 341 342 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); 343 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 344 345 return 0; 346 } 347 348 /** 349 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state 350 * 351 * @dev: kvm device handle 352 * @attr: kvm device attribute 353 * @is_write: true if userspace is writing a register 354 */ 355 static int vgic_v2_attr_regs_access(struct kvm_device *dev, 356 struct kvm_device_attr *attr, 357 bool is_write) 358 { 359 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 360 struct vgic_reg_attr reg_attr; 361 gpa_t addr; 362 struct kvm_vcpu *vcpu; 363 int ret; 364 u32 val; 365 366 ret = vgic_v2_parse_attr(dev, attr, ®_attr); 367 if (ret) 368 return ret; 369 370 vcpu = reg_attr.vcpu; 371 addr = reg_attr.addr; 372 373 if (is_write) 374 if (get_user(val, uaddr)) 375 return -EFAULT; 376 377 mutex_lock(&dev->kvm->lock); 378 379 if (!lock_all_vcpus(dev->kvm)) { 380 mutex_unlock(&dev->kvm->lock); 381 return -EBUSY; 382 } 383 384 mutex_lock(&dev->kvm->arch.config_lock); 385 386 ret = vgic_init(dev->kvm); 387 if (ret) 388 goto out; 389 390 switch (attr->group) { 391 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 392 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); 393 break; 394 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 395 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val); 396 break; 397 default: 398 ret = -EINVAL; 399 break; 400 } 401 402 out: 403 mutex_unlock(&dev->kvm->arch.config_lock); 404 unlock_all_vcpus(dev->kvm); 405 mutex_unlock(&dev->kvm->lock); 406 407 if (!ret && !is_write) 408 ret = put_user(val, uaddr); 409 410 return ret; 411 } 412 413 static int vgic_v2_set_attr(struct kvm_device *dev, 414 struct kvm_device_attr *attr) 415 { 416 switch (attr->group) { 417 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 418 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 419 return vgic_v2_attr_regs_access(dev, attr, true); 420 default: 421 return vgic_set_common_attr(dev, attr); 422 } 423 } 424 425 static int vgic_v2_get_attr(struct kvm_device *dev, 426 struct kvm_device_attr *attr) 427 { 428 switch (attr->group) { 429 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 430 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 431 return vgic_v2_attr_regs_access(dev, attr, false); 432 default: 433 return vgic_get_common_attr(dev, attr); 434 } 435 } 436 437 static int vgic_v2_has_attr(struct kvm_device *dev, 438 struct kvm_device_attr *attr) 439 { 440 switch (attr->group) { 441 case KVM_DEV_ARM_VGIC_GRP_ADDR: 442 switch (attr->attr) { 443 case KVM_VGIC_V2_ADDR_TYPE_DIST: 444 case KVM_VGIC_V2_ADDR_TYPE_CPU: 445 return 0; 446 } 447 break; 448 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 449 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 450 return vgic_v2_has_attr_regs(dev, attr); 451 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: 452 return 0; 453 case KVM_DEV_ARM_VGIC_GRP_CTRL: 454 switch (attr->attr) { 455 case KVM_DEV_ARM_VGIC_CTRL_INIT: 456 return 0; 457 } 458 } 459 return -ENXIO; 460 } 461 462 struct kvm_device_ops kvm_arm_vgic_v2_ops = { 463 .name = "kvm-arm-vgic-v2", 464 .create = vgic_create, 465 .destroy = vgic_destroy, 466 .set_attr = vgic_v2_set_attr, 467 .get_attr = vgic_v2_get_attr, 468 .has_attr = vgic_v2_has_attr, 469 }; 470 471 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, 472 struct vgic_reg_attr *reg_attr) 473 { 474 unsigned long vgic_mpidr, mpidr_reg; 475 476 /* 477 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group, 478 * attr might not hold MPIDR. Hence assume vcpu0. 479 */ 480 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) { 481 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >> 482 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT; 483 484 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr); 485 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg); 486 } else { 487 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0); 488 } 489 490 if (!reg_attr->vcpu) 491 return -EINVAL; 492 493 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 494 495 return 0; 496 } 497 498 /* 499 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state 500 * 501 * @dev: kvm device handle 502 * @attr: kvm device attribute 503 * @is_write: true if userspace is writing a register 504 */ 505 static int vgic_v3_attr_regs_access(struct kvm_device *dev, 506 struct kvm_device_attr *attr, 507 bool is_write) 508 { 509 struct vgic_reg_attr reg_attr; 510 gpa_t addr; 511 struct kvm_vcpu *vcpu; 512 bool uaccess; 513 u32 val; 514 int ret; 515 516 ret = vgic_v3_parse_attr(dev, attr, ®_attr); 517 if (ret) 518 return ret; 519 520 vcpu = reg_attr.vcpu; 521 addr = reg_attr.addr; 522 523 switch (attr->group) { 524 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 525 /* Sysregs uaccess is performed by the sysreg handling code */ 526 uaccess = false; 527 break; 528 default: 529 uaccess = true; 530 } 531 532 if (uaccess && is_write) { 533 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 534 if (get_user(val, uaddr)) 535 return -EFAULT; 536 } 537 538 mutex_lock(&dev->kvm->lock); 539 540 if (!lock_all_vcpus(dev->kvm)) { 541 mutex_unlock(&dev->kvm->lock); 542 return -EBUSY; 543 } 544 545 mutex_lock(&dev->kvm->arch.config_lock); 546 547 if (unlikely(!vgic_initialized(dev->kvm))) { 548 ret = -EBUSY; 549 goto out; 550 } 551 552 switch (attr->group) { 553 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 554 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val); 555 break; 556 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 557 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val); 558 break; 559 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 560 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write); 561 break; 562 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: { 563 unsigned int info, intid; 564 565 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> 566 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT; 567 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) { 568 intid = attr->attr & 569 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK; 570 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write, 571 intid, &val); 572 } else { 573 ret = -EINVAL; 574 } 575 break; 576 } 577 default: 578 ret = -EINVAL; 579 break; 580 } 581 582 out: 583 mutex_unlock(&dev->kvm->arch.config_lock); 584 unlock_all_vcpus(dev->kvm); 585 mutex_unlock(&dev->kvm->lock); 586 587 if (!ret && uaccess && !is_write) { 588 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 589 ret = put_user(val, uaddr); 590 } 591 592 return ret; 593 } 594 595 static int vgic_v3_set_attr(struct kvm_device *dev, 596 struct kvm_device_attr *attr) 597 { 598 switch (attr->group) { 599 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 600 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 601 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 602 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: 603 return vgic_v3_attr_regs_access(dev, attr, true); 604 default: 605 return vgic_set_common_attr(dev, attr); 606 } 607 } 608 609 static int vgic_v3_get_attr(struct kvm_device *dev, 610 struct kvm_device_attr *attr) 611 { 612 switch (attr->group) { 613 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 614 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 615 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 616 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: 617 return vgic_v3_attr_regs_access(dev, attr, false); 618 default: 619 return vgic_get_common_attr(dev, attr); 620 } 621 } 622 623 static int vgic_v3_has_attr(struct kvm_device *dev, 624 struct kvm_device_attr *attr) 625 { 626 switch (attr->group) { 627 case KVM_DEV_ARM_VGIC_GRP_ADDR: 628 switch (attr->attr) { 629 case KVM_VGIC_V3_ADDR_TYPE_DIST: 630 case KVM_VGIC_V3_ADDR_TYPE_REDIST: 631 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION: 632 return 0; 633 } 634 break; 635 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 636 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 637 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 638 return vgic_v3_has_attr_regs(dev, attr); 639 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: 640 return 0; 641 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: { 642 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> 643 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) == 644 VGIC_LEVEL_INFO_LINE_LEVEL) 645 return 0; 646 break; 647 } 648 case KVM_DEV_ARM_VGIC_GRP_CTRL: 649 switch (attr->attr) { 650 case KVM_DEV_ARM_VGIC_CTRL_INIT: 651 return 0; 652 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: 653 return 0; 654 } 655 } 656 return -ENXIO; 657 } 658 659 struct kvm_device_ops kvm_arm_vgic_v3_ops = { 660 .name = "kvm-arm-vgic-v3", 661 .create = vgic_create, 662 .destroy = vgic_destroy, 663 .set_attr = vgic_v3_set_attr, 664 .get_attr = vgic_v3_get_attr, 665 .has_attr = vgic_v3_has_attr, 666 }; 667