1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012-2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/compiler.h> 8 #include <linux/irqchip/arm-gic-v3.h> 9 #include <linux/kvm_host.h> 10 11 #include <asm/kvm_emulate.h> 12 #include <asm/kvm_hyp.h> 13 #include <asm/kvm_mmu.h> 14 15 #define vtr_to_max_lr_idx(v) ((v) & 0xf) 16 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) 17 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) 18 19 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 20 { 21 switch (lr & 0xf) { 22 case 0: 23 return read_gicreg(ICH_LR0_EL2); 24 case 1: 25 return read_gicreg(ICH_LR1_EL2); 26 case 2: 27 return read_gicreg(ICH_LR2_EL2); 28 case 3: 29 return read_gicreg(ICH_LR3_EL2); 30 case 4: 31 return read_gicreg(ICH_LR4_EL2); 32 case 5: 33 return read_gicreg(ICH_LR5_EL2); 34 case 6: 35 return read_gicreg(ICH_LR6_EL2); 36 case 7: 37 return read_gicreg(ICH_LR7_EL2); 38 case 8: 39 return read_gicreg(ICH_LR8_EL2); 40 case 9: 41 return read_gicreg(ICH_LR9_EL2); 42 case 10: 43 return read_gicreg(ICH_LR10_EL2); 44 case 11: 45 return read_gicreg(ICH_LR11_EL2); 46 case 12: 47 return read_gicreg(ICH_LR12_EL2); 48 case 13: 49 return read_gicreg(ICH_LR13_EL2); 50 case 14: 51 return read_gicreg(ICH_LR14_EL2); 52 case 15: 53 return read_gicreg(ICH_LR15_EL2); 54 } 55 56 unreachable(); 57 } 58 59 static void __hyp_text __gic_v3_set_lr(u64 val, int lr) 60 { 61 switch (lr & 0xf) { 62 case 0: 63 write_gicreg(val, ICH_LR0_EL2); 64 break; 65 case 1: 66 write_gicreg(val, ICH_LR1_EL2); 67 break; 68 case 2: 69 write_gicreg(val, ICH_LR2_EL2); 70 break; 71 case 3: 72 write_gicreg(val, ICH_LR3_EL2); 73 break; 74 case 4: 75 write_gicreg(val, ICH_LR4_EL2); 76 break; 77 case 5: 78 write_gicreg(val, ICH_LR5_EL2); 79 break; 80 case 6: 81 write_gicreg(val, ICH_LR6_EL2); 82 break; 83 case 7: 84 write_gicreg(val, ICH_LR7_EL2); 85 break; 86 case 8: 87 write_gicreg(val, ICH_LR8_EL2); 88 break; 89 case 9: 90 write_gicreg(val, ICH_LR9_EL2); 91 break; 92 case 10: 93 write_gicreg(val, ICH_LR10_EL2); 94 break; 95 case 11: 96 write_gicreg(val, ICH_LR11_EL2); 97 break; 98 case 12: 99 write_gicreg(val, ICH_LR12_EL2); 100 break; 101 case 13: 102 write_gicreg(val, ICH_LR13_EL2); 103 break; 104 case 14: 105 write_gicreg(val, ICH_LR14_EL2); 106 break; 107 case 15: 108 write_gicreg(val, ICH_LR15_EL2); 109 break; 110 } 111 } 112 113 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) 114 { 115 switch (n) { 116 case 0: 117 write_gicreg(val, ICH_AP0R0_EL2); 118 break; 119 case 1: 120 write_gicreg(val, ICH_AP0R1_EL2); 121 break; 122 case 2: 123 write_gicreg(val, ICH_AP0R2_EL2); 124 break; 125 case 3: 126 write_gicreg(val, ICH_AP0R3_EL2); 127 break; 128 } 129 } 130 131 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) 132 { 133 switch (n) { 134 case 0: 135 write_gicreg(val, ICH_AP1R0_EL2); 136 break; 137 case 1: 138 write_gicreg(val, ICH_AP1R1_EL2); 139 break; 140 case 2: 141 write_gicreg(val, ICH_AP1R2_EL2); 142 break; 143 case 3: 144 write_gicreg(val, ICH_AP1R3_EL2); 145 break; 146 } 147 } 148 149 static u32 __hyp_text __vgic_v3_read_ap0rn(int n) 150 { 151 u32 val; 152 153 switch (n) { 154 case 0: 155 val = read_gicreg(ICH_AP0R0_EL2); 156 break; 157 case 1: 158 val = read_gicreg(ICH_AP0R1_EL2); 159 break; 160 case 2: 161 val = read_gicreg(ICH_AP0R2_EL2); 162 break; 163 case 3: 164 val = read_gicreg(ICH_AP0R3_EL2); 165 break; 166 default: 167 unreachable(); 168 } 169 170 return val; 171 } 172 173 static u32 __hyp_text __vgic_v3_read_ap1rn(int n) 174 { 175 u32 val; 176 177 switch (n) { 178 case 0: 179 val = read_gicreg(ICH_AP1R0_EL2); 180 break; 181 case 1: 182 val = read_gicreg(ICH_AP1R1_EL2); 183 break; 184 case 2: 185 val = read_gicreg(ICH_AP1R2_EL2); 186 break; 187 case 3: 188 val = read_gicreg(ICH_AP1R3_EL2); 189 break; 190 default: 191 unreachable(); 192 } 193 194 return val; 195 } 196 197 void __hyp_text __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if) 198 { 199 u64 used_lrs = cpu_if->used_lrs; 200 201 /* 202 * Make sure stores to the GIC via the memory mapped interface 203 * are now visible to the system register interface when reading the 204 * LRs, and when reading back the VMCR on non-VHE systems. 205 */ 206 if (used_lrs || !has_vhe()) { 207 if (!cpu_if->vgic_sre) { 208 dsb(sy); 209 isb(); 210 } 211 } 212 213 if (used_lrs || cpu_if->its_vpe.its_vm) { 214 int i; 215 u32 elrsr; 216 217 elrsr = read_gicreg(ICH_ELRSR_EL2); 218 219 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2); 220 221 for (i = 0; i < used_lrs; i++) { 222 if (elrsr & (1 << i)) 223 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; 224 else 225 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); 226 227 __gic_v3_set_lr(0, i); 228 } 229 } 230 } 231 232 void __hyp_text __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if) 233 { 234 u64 used_lrs = cpu_if->used_lrs; 235 int i; 236 237 if (used_lrs || cpu_if->its_vpe.its_vm) { 238 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 239 240 for (i = 0; i < used_lrs; i++) 241 __gic_v3_set_lr(cpu_if->vgic_lr[i], i); 242 } 243 244 /* 245 * Ensure that writes to the LRs, and on non-VHE systems ensure that 246 * the write to the VMCR in __vgic_v3_activate_traps(), will have 247 * reached the (re)distributors. This ensure the guest will read the 248 * correct values from the memory-mapped interface. 249 */ 250 if (used_lrs || !has_vhe()) { 251 if (!cpu_if->vgic_sre) { 252 isb(); 253 dsb(sy); 254 } 255 } 256 } 257 258 void __hyp_text __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if) 259 { 260 /* 261 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a 262 * Group0 interrupt (as generated in GICv2 mode) to be 263 * delivered as a FIQ to the guest, with potentially fatal 264 * consequences. So we must make sure that ICC_SRE_EL1 has 265 * been actually programmed with the value we want before 266 * starting to mess with the rest of the GIC, and VMCR_EL2 in 267 * particular. This logic must be called before 268 * __vgic_v3_restore_state(). 269 */ 270 if (!cpu_if->vgic_sre) { 271 write_gicreg(0, ICC_SRE_EL1); 272 isb(); 273 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); 274 275 276 if (has_vhe()) { 277 /* 278 * Ensure that the write to the VMCR will have reached 279 * the (re)distributors. This ensure the guest will 280 * read the correct values from the memory-mapped 281 * interface. 282 */ 283 isb(); 284 dsb(sy); 285 } 286 } 287 288 /* 289 * Prevent the guest from touching the GIC system registers if 290 * SRE isn't enabled for GICv3 emulation. 291 */ 292 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, 293 ICC_SRE_EL2); 294 295 /* 296 * If we need to trap system registers, we must write 297 * ICH_HCR_EL2 anyway, even if no interrupts are being 298 * injected, 299 */ 300 if (static_branch_unlikely(&vgic_v3_cpuif_trap) || 301 cpu_if->its_vpe.its_vm) 302 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 303 } 304 305 void __hyp_text __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if) 306 { 307 u64 val; 308 309 if (!cpu_if->vgic_sre) { 310 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); 311 } 312 313 val = read_gicreg(ICC_SRE_EL2); 314 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); 315 316 if (!cpu_if->vgic_sre) { 317 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ 318 isb(); 319 write_gicreg(1, ICC_SRE_EL1); 320 } 321 322 /* 323 * If we were trapping system registers, we enabled the VGIC even if 324 * no interrupts were being injected, and we disable it again here. 325 */ 326 if (static_branch_unlikely(&vgic_v3_cpuif_trap) || 327 cpu_if->its_vpe.its_vm) 328 write_gicreg(0, ICH_HCR_EL2); 329 } 330 331 void __hyp_text __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) 332 { 333 u64 val; 334 u32 nr_pre_bits; 335 336 val = read_gicreg(ICH_VTR_EL2); 337 nr_pre_bits = vtr_to_nr_pre_bits(val); 338 339 switch (nr_pre_bits) { 340 case 7: 341 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); 342 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); 343 /* Fall through */ 344 case 6: 345 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); 346 /* Fall through */ 347 default: 348 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); 349 } 350 351 switch (nr_pre_bits) { 352 case 7: 353 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); 354 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); 355 /* Fall through */ 356 case 6: 357 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); 358 /* Fall through */ 359 default: 360 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); 361 } 362 } 363 364 void __hyp_text __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) 365 { 366 u64 val; 367 u32 nr_pre_bits; 368 369 val = read_gicreg(ICH_VTR_EL2); 370 nr_pre_bits = vtr_to_nr_pre_bits(val); 371 372 switch (nr_pre_bits) { 373 case 7: 374 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); 375 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); 376 /* Fall through */ 377 case 6: 378 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); 379 /* Fall through */ 380 default: 381 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); 382 } 383 384 switch (nr_pre_bits) { 385 case 7: 386 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); 387 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); 388 /* Fall through */ 389 case 6: 390 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); 391 /* Fall through */ 392 default: 393 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); 394 } 395 } 396 397 void __hyp_text __vgic_v3_init_lrs(void) 398 { 399 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); 400 int i; 401 402 for (i = 0; i <= max_lr_idx; i++) 403 __gic_v3_set_lr(0, i); 404 } 405 406 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) 407 { 408 return read_gicreg(ICH_VTR_EL2); 409 } 410 411 u64 __hyp_text __vgic_v3_read_vmcr(void) 412 { 413 return read_gicreg(ICH_VMCR_EL2); 414 } 415 416 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) 417 { 418 write_gicreg(vmcr, ICH_VMCR_EL2); 419 } 420 421 static int __hyp_text __vgic_v3_bpr_min(void) 422 { 423 /* See Pseudocode for VPriorityGroup */ 424 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); 425 } 426 427 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) 428 { 429 u32 esr = kvm_vcpu_get_hsr(vcpu); 430 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; 431 432 return crm != 8; 433 } 434 435 #define GICv3_IDLE_PRIORITY 0xff 436 437 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, 438 u32 vmcr, 439 u64 *lr_val) 440 { 441 unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; 442 u8 priority = GICv3_IDLE_PRIORITY; 443 int i, lr = -1; 444 445 for (i = 0; i < used_lrs; i++) { 446 u64 val = __gic_v3_get_lr(i); 447 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; 448 449 /* Not pending in the state? */ 450 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) 451 continue; 452 453 /* Group-0 interrupt, but Group-0 disabled? */ 454 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) 455 continue; 456 457 /* Group-1 interrupt, but Group-1 disabled? */ 458 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) 459 continue; 460 461 /* Not the highest priority? */ 462 if (lr_prio >= priority) 463 continue; 464 465 /* This is a candidate */ 466 priority = lr_prio; 467 *lr_val = val; 468 lr = i; 469 } 470 471 if (lr == -1) 472 *lr_val = ICC_IAR1_EL1_SPURIOUS; 473 474 return lr; 475 } 476 477 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, 478 int intid, u64 *lr_val) 479 { 480 unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; 481 int i; 482 483 for (i = 0; i < used_lrs; i++) { 484 u64 val = __gic_v3_get_lr(i); 485 486 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && 487 (val & ICH_LR_ACTIVE_BIT)) { 488 *lr_val = val; 489 return i; 490 } 491 } 492 493 *lr_val = ICC_IAR1_EL1_SPURIOUS; 494 return -1; 495 } 496 497 static int __hyp_text __vgic_v3_get_highest_active_priority(void) 498 { 499 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); 500 u32 hap = 0; 501 int i; 502 503 for (i = 0; i < nr_apr_regs; i++) { 504 u32 val; 505 506 /* 507 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers 508 * contain the active priority levels for this VCPU 509 * for the maximum number of supported priority 510 * levels, and we return the full priority level only 511 * if the BPR is programmed to its minimum, otherwise 512 * we return a combination of the priority level and 513 * subpriority, as determined by the setting of the 514 * BPR, but without the full subpriority. 515 */ 516 val = __vgic_v3_read_ap0rn(i); 517 val |= __vgic_v3_read_ap1rn(i); 518 if (!val) { 519 hap += 32; 520 continue; 521 } 522 523 return (hap + __ffs(val)) << __vgic_v3_bpr_min(); 524 } 525 526 return GICv3_IDLE_PRIORITY; 527 } 528 529 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) 530 { 531 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 532 } 533 534 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) 535 { 536 unsigned int bpr; 537 538 if (vmcr & ICH_VMCR_CBPR_MASK) { 539 bpr = __vgic_v3_get_bpr0(vmcr); 540 if (bpr < 7) 541 bpr++; 542 } else { 543 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 544 } 545 546 return bpr; 547 } 548 549 /* 550 * Convert a priority to a preemption level, taking the relevant BPR 551 * into account by zeroing the sub-priority bits. 552 */ 553 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) 554 { 555 unsigned int bpr; 556 557 if (!grp) 558 bpr = __vgic_v3_get_bpr0(vmcr) + 1; 559 else 560 bpr = __vgic_v3_get_bpr1(vmcr); 561 562 return pri & (GENMASK(7, 0) << bpr); 563 } 564 565 /* 566 * The priority value is independent of any of the BPR values, so we 567 * normalize it using the minimal BPR value. This guarantees that no 568 * matter what the guest does with its BPR, we can always set/get the 569 * same value of a priority. 570 */ 571 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) 572 { 573 u8 pre, ap; 574 u32 val; 575 int apr; 576 577 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); 578 ap = pre >> __vgic_v3_bpr_min(); 579 apr = ap / 32; 580 581 if (!grp) { 582 val = __vgic_v3_read_ap0rn(apr); 583 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); 584 } else { 585 val = __vgic_v3_read_ap1rn(apr); 586 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); 587 } 588 } 589 590 static int __hyp_text __vgic_v3_clear_highest_active_priority(void) 591 { 592 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); 593 u32 hap = 0; 594 int i; 595 596 for (i = 0; i < nr_apr_regs; i++) { 597 u32 ap0, ap1; 598 int c0, c1; 599 600 ap0 = __vgic_v3_read_ap0rn(i); 601 ap1 = __vgic_v3_read_ap1rn(i); 602 if (!ap0 && !ap1) { 603 hap += 32; 604 continue; 605 } 606 607 c0 = ap0 ? __ffs(ap0) : 32; 608 c1 = ap1 ? __ffs(ap1) : 32; 609 610 /* Always clear the LSB, which is the highest priority */ 611 if (c0 < c1) { 612 ap0 &= ~BIT(c0); 613 __vgic_v3_write_ap0rn(ap0, i); 614 hap += c0; 615 } else { 616 ap1 &= ~BIT(c1); 617 __vgic_v3_write_ap1rn(ap1, i); 618 hap += c1; 619 } 620 621 /* Rescale to 8 bits of priority */ 622 return hap << __vgic_v3_bpr_min(); 623 } 624 625 return GICv3_IDLE_PRIORITY; 626 } 627 628 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 629 { 630 u64 lr_val; 631 u8 lr_prio, pmr; 632 int lr, grp; 633 634 grp = __vgic_v3_get_group(vcpu); 635 636 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); 637 if (lr < 0) 638 goto spurious; 639 640 if (grp != !!(lr_val & ICH_LR_GROUP)) 641 goto spurious; 642 643 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 644 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; 645 if (pmr <= lr_prio) 646 goto spurious; 647 648 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) 649 goto spurious; 650 651 lr_val &= ~ICH_LR_STATE; 652 /* No active state for LPIs */ 653 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) 654 lr_val |= ICH_LR_ACTIVE_BIT; 655 __gic_v3_set_lr(lr_val, lr); 656 __vgic_v3_set_active_priority(lr_prio, vmcr, grp); 657 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); 658 return; 659 660 spurious: 661 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); 662 } 663 664 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) 665 { 666 lr_val &= ~ICH_LR_ACTIVE_BIT; 667 if (lr_val & ICH_LR_HW) { 668 u32 pid; 669 670 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; 671 gic_write_dir(pid); 672 } 673 674 __gic_v3_set_lr(lr_val, lr); 675 } 676 677 static void __hyp_text __vgic_v3_bump_eoicount(void) 678 { 679 u32 hcr; 680 681 hcr = read_gicreg(ICH_HCR_EL2); 682 hcr += 1 << ICH_HCR_EOIcount_SHIFT; 683 write_gicreg(hcr, ICH_HCR_EL2); 684 } 685 686 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu, 687 u32 vmcr, int rt) 688 { 689 u32 vid = vcpu_get_reg(vcpu, rt); 690 u64 lr_val; 691 int lr; 692 693 /* EOImode == 0, nothing to be done here */ 694 if (!(vmcr & ICH_VMCR_EOIM_MASK)) 695 return; 696 697 /* No deactivate to be performed on an LPI */ 698 if (vid >= VGIC_MIN_LPI) 699 return; 700 701 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); 702 if (lr == -1) { 703 __vgic_v3_bump_eoicount(); 704 return; 705 } 706 707 __vgic_v3_clear_active_lr(lr, lr_val); 708 } 709 710 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 711 { 712 u32 vid = vcpu_get_reg(vcpu, rt); 713 u64 lr_val; 714 u8 lr_prio, act_prio; 715 int lr, grp; 716 717 grp = __vgic_v3_get_group(vcpu); 718 719 /* Drop priority in any case */ 720 act_prio = __vgic_v3_clear_highest_active_priority(); 721 722 /* If EOIing an LPI, no deactivate to be performed */ 723 if (vid >= VGIC_MIN_LPI) 724 return; 725 726 /* EOImode == 1, nothing to be done here */ 727 if (vmcr & ICH_VMCR_EOIM_MASK) 728 return; 729 730 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); 731 if (lr == -1) { 732 __vgic_v3_bump_eoicount(); 733 return; 734 } 735 736 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; 737 738 /* If priorities or group do not match, the guest has fscked-up. */ 739 if (grp != !!(lr_val & ICH_LR_GROUP) || 740 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) 741 return; 742 743 /* Let's now perform the deactivation */ 744 __vgic_v3_clear_active_lr(lr, lr_val); 745 } 746 747 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 748 { 749 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); 750 } 751 752 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 753 { 754 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); 755 } 756 757 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 758 { 759 u64 val = vcpu_get_reg(vcpu, rt); 760 761 if (val & 1) 762 vmcr |= ICH_VMCR_ENG0_MASK; 763 else 764 vmcr &= ~ICH_VMCR_ENG0_MASK; 765 766 __vgic_v3_write_vmcr(vmcr); 767 } 768 769 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 770 { 771 u64 val = vcpu_get_reg(vcpu, rt); 772 773 if (val & 1) 774 vmcr |= ICH_VMCR_ENG1_MASK; 775 else 776 vmcr &= ~ICH_VMCR_ENG1_MASK; 777 778 __vgic_v3_write_vmcr(vmcr); 779 } 780 781 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 782 { 783 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); 784 } 785 786 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 787 { 788 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); 789 } 790 791 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 792 { 793 u64 val = vcpu_get_reg(vcpu, rt); 794 u8 bpr_min = __vgic_v3_bpr_min() - 1; 795 796 /* Enforce BPR limiting */ 797 if (val < bpr_min) 798 val = bpr_min; 799 800 val <<= ICH_VMCR_BPR0_SHIFT; 801 val &= ICH_VMCR_BPR0_MASK; 802 vmcr &= ~ICH_VMCR_BPR0_MASK; 803 vmcr |= val; 804 805 __vgic_v3_write_vmcr(vmcr); 806 } 807 808 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) 809 { 810 u64 val = vcpu_get_reg(vcpu, rt); 811 u8 bpr_min = __vgic_v3_bpr_min(); 812 813 if (vmcr & ICH_VMCR_CBPR_MASK) 814 return; 815 816 /* Enforce BPR limiting */ 817 if (val < bpr_min) 818 val = bpr_min; 819 820 val <<= ICH_VMCR_BPR1_SHIFT; 821 val &= ICH_VMCR_BPR1_MASK; 822 vmcr &= ~ICH_VMCR_BPR1_MASK; 823 vmcr |= val; 824 825 __vgic_v3_write_vmcr(vmcr); 826 } 827 828 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) 829 { 830 u32 val; 831 832 if (!__vgic_v3_get_group(vcpu)) 833 val = __vgic_v3_read_ap0rn(n); 834 else 835 val = __vgic_v3_read_ap1rn(n); 836 837 vcpu_set_reg(vcpu, rt, val); 838 } 839 840 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) 841 { 842 u32 val = vcpu_get_reg(vcpu, rt); 843 844 if (!__vgic_v3_get_group(vcpu)) 845 __vgic_v3_write_ap0rn(val, n); 846 else 847 __vgic_v3_write_ap1rn(val, n); 848 } 849 850 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, 851 u32 vmcr, int rt) 852 { 853 __vgic_v3_read_apxrn(vcpu, rt, 0); 854 } 855 856 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, 857 u32 vmcr, int rt) 858 { 859 __vgic_v3_read_apxrn(vcpu, rt, 1); 860 } 861 862 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, 863 u32 vmcr, int rt) 864 { 865 __vgic_v3_read_apxrn(vcpu, rt, 2); 866 } 867 868 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, 869 u32 vmcr, int rt) 870 { 871 __vgic_v3_read_apxrn(vcpu, rt, 3); 872 } 873 874 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, 875 u32 vmcr, int rt) 876 { 877 __vgic_v3_write_apxrn(vcpu, rt, 0); 878 } 879 880 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, 881 u32 vmcr, int rt) 882 { 883 __vgic_v3_write_apxrn(vcpu, rt, 1); 884 } 885 886 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, 887 u32 vmcr, int rt) 888 { 889 __vgic_v3_write_apxrn(vcpu, rt, 2); 890 } 891 892 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, 893 u32 vmcr, int rt) 894 { 895 __vgic_v3_write_apxrn(vcpu, rt, 3); 896 } 897 898 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, 899 u32 vmcr, int rt) 900 { 901 u64 lr_val; 902 int lr, lr_grp, grp; 903 904 grp = __vgic_v3_get_group(vcpu); 905 906 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); 907 if (lr == -1) 908 goto spurious; 909 910 lr_grp = !!(lr_val & ICH_LR_GROUP); 911 if (lr_grp != grp) 912 lr_val = ICC_IAR1_EL1_SPURIOUS; 913 914 spurious: 915 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); 916 } 917 918 static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, 919 u32 vmcr, int rt) 920 { 921 vmcr &= ICH_VMCR_PMR_MASK; 922 vmcr >>= ICH_VMCR_PMR_SHIFT; 923 vcpu_set_reg(vcpu, rt, vmcr); 924 } 925 926 static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, 927 u32 vmcr, int rt) 928 { 929 u32 val = vcpu_get_reg(vcpu, rt); 930 931 val <<= ICH_VMCR_PMR_SHIFT; 932 val &= ICH_VMCR_PMR_MASK; 933 vmcr &= ~ICH_VMCR_PMR_MASK; 934 vmcr |= val; 935 936 write_gicreg(vmcr, ICH_VMCR_EL2); 937 } 938 939 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, 940 u32 vmcr, int rt) 941 { 942 u32 val = __vgic_v3_get_highest_active_priority(); 943 vcpu_set_reg(vcpu, rt, val); 944 } 945 946 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, 947 u32 vmcr, int rt) 948 { 949 u32 vtr, val; 950 951 vtr = read_gicreg(ICH_VTR_EL2); 952 /* PRIbits */ 953 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; 954 /* IDbits */ 955 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; 956 /* SEIS */ 957 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT; 958 /* A3V */ 959 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; 960 /* EOImode */ 961 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT; 962 /* CBPR */ 963 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 964 965 vcpu_set_reg(vcpu, rt, val); 966 } 967 968 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, 969 u32 vmcr, int rt) 970 { 971 u32 val = vcpu_get_reg(vcpu, rt); 972 973 if (val & ICC_CTLR_EL1_CBPR_MASK) 974 vmcr |= ICH_VMCR_CBPR_MASK; 975 else 976 vmcr &= ~ICH_VMCR_CBPR_MASK; 977 978 if (val & ICC_CTLR_EL1_EOImode_MASK) 979 vmcr |= ICH_VMCR_EOIM_MASK; 980 else 981 vmcr &= ~ICH_VMCR_EOIM_MASK; 982 983 write_gicreg(vmcr, ICH_VMCR_EL2); 984 } 985 986 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) 987 { 988 int rt; 989 u32 esr; 990 u32 vmcr; 991 void (*fn)(struct kvm_vcpu *, u32, int); 992 bool is_read; 993 u32 sysreg; 994 995 esr = kvm_vcpu_get_hsr(vcpu); 996 if (vcpu_mode_is_32bit(vcpu)) { 997 if (!kvm_condition_valid(vcpu)) { 998 __kvm_skip_instr(vcpu); 999 return 1; 1000 } 1001 1002 sysreg = esr_cp15_to_sysreg(esr); 1003 } else { 1004 sysreg = esr_sys64_to_sysreg(esr); 1005 } 1006 1007 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; 1008 1009 switch (sysreg) { 1010 case SYS_ICC_IAR0_EL1: 1011 case SYS_ICC_IAR1_EL1: 1012 if (unlikely(!is_read)) 1013 return 0; 1014 fn = __vgic_v3_read_iar; 1015 break; 1016 case SYS_ICC_EOIR0_EL1: 1017 case SYS_ICC_EOIR1_EL1: 1018 if (unlikely(is_read)) 1019 return 0; 1020 fn = __vgic_v3_write_eoir; 1021 break; 1022 case SYS_ICC_IGRPEN1_EL1: 1023 if (is_read) 1024 fn = __vgic_v3_read_igrpen1; 1025 else 1026 fn = __vgic_v3_write_igrpen1; 1027 break; 1028 case SYS_ICC_BPR1_EL1: 1029 if (is_read) 1030 fn = __vgic_v3_read_bpr1; 1031 else 1032 fn = __vgic_v3_write_bpr1; 1033 break; 1034 case SYS_ICC_AP0Rn_EL1(0): 1035 case SYS_ICC_AP1Rn_EL1(0): 1036 if (is_read) 1037 fn = __vgic_v3_read_apxr0; 1038 else 1039 fn = __vgic_v3_write_apxr0; 1040 break; 1041 case SYS_ICC_AP0Rn_EL1(1): 1042 case SYS_ICC_AP1Rn_EL1(1): 1043 if (is_read) 1044 fn = __vgic_v3_read_apxr1; 1045 else 1046 fn = __vgic_v3_write_apxr1; 1047 break; 1048 case SYS_ICC_AP0Rn_EL1(2): 1049 case SYS_ICC_AP1Rn_EL1(2): 1050 if (is_read) 1051 fn = __vgic_v3_read_apxr2; 1052 else 1053 fn = __vgic_v3_write_apxr2; 1054 break; 1055 case SYS_ICC_AP0Rn_EL1(3): 1056 case SYS_ICC_AP1Rn_EL1(3): 1057 if (is_read) 1058 fn = __vgic_v3_read_apxr3; 1059 else 1060 fn = __vgic_v3_write_apxr3; 1061 break; 1062 case SYS_ICC_HPPIR0_EL1: 1063 case SYS_ICC_HPPIR1_EL1: 1064 if (unlikely(!is_read)) 1065 return 0; 1066 fn = __vgic_v3_read_hppir; 1067 break; 1068 case SYS_ICC_IGRPEN0_EL1: 1069 if (is_read) 1070 fn = __vgic_v3_read_igrpen0; 1071 else 1072 fn = __vgic_v3_write_igrpen0; 1073 break; 1074 case SYS_ICC_BPR0_EL1: 1075 if (is_read) 1076 fn = __vgic_v3_read_bpr0; 1077 else 1078 fn = __vgic_v3_write_bpr0; 1079 break; 1080 case SYS_ICC_DIR_EL1: 1081 if (unlikely(is_read)) 1082 return 0; 1083 fn = __vgic_v3_write_dir; 1084 break; 1085 case SYS_ICC_RPR_EL1: 1086 if (unlikely(!is_read)) 1087 return 0; 1088 fn = __vgic_v3_read_rpr; 1089 break; 1090 case SYS_ICC_CTLR_EL1: 1091 if (is_read) 1092 fn = __vgic_v3_read_ctlr; 1093 else 1094 fn = __vgic_v3_write_ctlr; 1095 break; 1096 case SYS_ICC_PMR_EL1: 1097 if (is_read) 1098 fn = __vgic_v3_read_pmr; 1099 else 1100 fn = __vgic_v3_write_pmr; 1101 break; 1102 default: 1103 return 0; 1104 } 1105 1106 vmcr = __vgic_v3_read_vmcr(); 1107 rt = kvm_vcpu_sys_get_rt(vcpu); 1108 fn(vcpu, vmcr, rt); 1109 1110 __kvm_skip_instr(vcpu); 1111 1112 return 1; 1113 } 1114