1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VGIC system registers handling functions for AArch64 mode 4 */ 5 6 #include <linux/irqchip/arm-gic-v3.h> 7 #include <linux/kvm.h> 8 #include <linux/kvm_host.h> 9 #include <asm/kvm_emulate.h> 10 #include "vgic/vgic.h" 11 #include "sys_regs.h" 12 13 static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 14 u64 val) 15 { 16 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v; 17 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 18 struct vgic_vmcr vmcr; 19 20 vgic_get_vmcr(vcpu, &vmcr); 21 22 /* 23 * Disallow restoring VM state if not supported by this 24 * hardware. 25 */ 26 host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >> 27 ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1; 28 if (host_pri_bits > vgic_v3_cpu->num_pri_bits) 29 return -EINVAL; 30 31 vgic_v3_cpu->num_pri_bits = host_pri_bits; 32 33 host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >> 34 ICC_CTLR_EL1_ID_BITS_SHIFT; 35 if (host_id_bits > vgic_v3_cpu->num_id_bits) 36 return -EINVAL; 37 38 vgic_v3_cpu->num_id_bits = host_id_bits; 39 40 host_seis = ((kvm_vgic_global_state.ich_vtr_el2 & 41 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT); 42 seis = (val & ICC_CTLR_EL1_SEIS_MASK) >> 43 ICC_CTLR_EL1_SEIS_SHIFT; 44 if (host_seis != seis) 45 return -EINVAL; 46 47 host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 & 48 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT); 49 a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT; 50 if (host_a3v != a3v) 51 return -EINVAL; 52 53 /* 54 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 55 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 56 */ 57 vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; 58 vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; 59 vgic_set_vmcr(vcpu, &vmcr); 60 61 return 0; 62 } 63 64 static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 65 u64 *valp) 66 { 67 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 68 struct vgic_vmcr vmcr; 69 u64 val; 70 71 vgic_get_vmcr(vcpu, &vmcr); 72 val = 0; 73 val |= (vgic_v3_cpu->num_pri_bits - 1) << ICC_CTLR_EL1_PRI_BITS_SHIFT; 74 val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT; 75 val |= ((kvm_vgic_global_state.ich_vtr_el2 & 76 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) << 77 ICC_CTLR_EL1_SEIS_SHIFT; 78 val |= ((kvm_vgic_global_state.ich_vtr_el2 & 79 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) << 80 ICC_CTLR_EL1_A3V_SHIFT; 81 /* 82 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 83 * Extract it directly using ICC_CTLR_EL1 reg definitions. 84 */ 85 val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; 86 val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 87 88 *valp = val; 89 90 return 0; 91 } 92 93 static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 94 u64 val) 95 { 96 struct vgic_vmcr vmcr; 97 98 vgic_get_vmcr(vcpu, &vmcr); 99 vmcr.pmr = (val & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT; 100 vgic_set_vmcr(vcpu, &vmcr); 101 102 return 0; 103 } 104 105 static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 106 u64 *val) 107 { 108 struct vgic_vmcr vmcr; 109 110 vgic_get_vmcr(vcpu, &vmcr); 111 *val = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK; 112 113 return 0; 114 } 115 116 static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 117 u64 val) 118 { 119 struct vgic_vmcr vmcr; 120 121 vgic_get_vmcr(vcpu, &vmcr); 122 vmcr.bpr = (val & ICC_BPR0_EL1_MASK) >> ICC_BPR0_EL1_SHIFT; 123 vgic_set_vmcr(vcpu, &vmcr); 124 125 return 0; 126 } 127 128 static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 129 u64 *val) 130 { 131 struct vgic_vmcr vmcr; 132 133 vgic_get_vmcr(vcpu, &vmcr); 134 *val = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) & ICC_BPR0_EL1_MASK; 135 136 return 0; 137 } 138 139 static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 140 u64 val) 141 { 142 struct vgic_vmcr vmcr; 143 144 vgic_get_vmcr(vcpu, &vmcr); 145 if (!vmcr.cbpr) { 146 vmcr.abpr = (val & ICC_BPR1_EL1_MASK) >> ICC_BPR1_EL1_SHIFT; 147 vgic_set_vmcr(vcpu, &vmcr); 148 } 149 150 return 0; 151 } 152 153 static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 154 u64 *val) 155 { 156 struct vgic_vmcr vmcr; 157 158 vgic_get_vmcr(vcpu, &vmcr); 159 if (!vmcr.cbpr) 160 *val = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) & ICC_BPR1_EL1_MASK; 161 else 162 *val = min((vmcr.bpr + 1), 7U); 163 164 165 return 0; 166 } 167 168 static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 169 u64 val) 170 { 171 struct vgic_vmcr vmcr; 172 173 vgic_get_vmcr(vcpu, &vmcr); 174 vmcr.grpen0 = (val & ICC_IGRPEN0_EL1_MASK) >> ICC_IGRPEN0_EL1_SHIFT; 175 vgic_set_vmcr(vcpu, &vmcr); 176 177 return 0; 178 } 179 180 static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 181 u64 *val) 182 { 183 struct vgic_vmcr vmcr; 184 185 vgic_get_vmcr(vcpu, &vmcr); 186 *val = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) & ICC_IGRPEN0_EL1_MASK; 187 188 return 0; 189 } 190 191 static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 192 u64 val) 193 { 194 struct vgic_vmcr vmcr; 195 196 vgic_get_vmcr(vcpu, &vmcr); 197 vmcr.grpen1 = (val & ICC_IGRPEN1_EL1_MASK) >> ICC_IGRPEN1_EL1_SHIFT; 198 vgic_set_vmcr(vcpu, &vmcr); 199 200 return 0; 201 } 202 203 static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 204 u64 *val) 205 { 206 struct vgic_vmcr vmcr; 207 208 vgic_get_vmcr(vcpu, &vmcr); 209 *val = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) & ICC_IGRPEN1_EL1_MASK; 210 211 return 0; 212 } 213 214 static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx) 215 { 216 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 217 218 if (apr) 219 vgicv3->vgic_ap1r[idx] = val; 220 else 221 vgicv3->vgic_ap0r[idx] = val; 222 } 223 224 static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx) 225 { 226 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 227 228 if (apr) 229 return vgicv3->vgic_ap1r[idx]; 230 else 231 return vgicv3->vgic_ap0r[idx]; 232 } 233 234 static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 235 u64 val) 236 237 { 238 u8 idx = r->Op2 & 3; 239 240 if (idx > vgic_v3_max_apr_idx(vcpu)) 241 return -EINVAL; 242 243 set_apr_reg(vcpu, val, 0, idx); 244 return 0; 245 } 246 247 static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 248 u64 *val) 249 { 250 u8 idx = r->Op2 & 3; 251 252 if (idx > vgic_v3_max_apr_idx(vcpu)) 253 return -EINVAL; 254 255 *val = get_apr_reg(vcpu, 0, idx); 256 257 return 0; 258 } 259 260 static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 261 u64 val) 262 263 { 264 u8 idx = r->Op2 & 3; 265 266 if (idx > vgic_v3_max_apr_idx(vcpu)) 267 return -EINVAL; 268 269 set_apr_reg(vcpu, val, 1, idx); 270 return 0; 271 } 272 273 static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 274 u64 *val) 275 { 276 u8 idx = r->Op2 & 3; 277 278 if (idx > vgic_v3_max_apr_idx(vcpu)) 279 return -EINVAL; 280 281 *val = get_apr_reg(vcpu, 1, idx); 282 283 return 0; 284 } 285 286 static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 287 u64 val) 288 { 289 /* Validate SRE bit */ 290 if (!(val & ICC_SRE_EL1_SRE)) 291 return -EINVAL; 292 293 return 0; 294 } 295 296 static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 297 u64 *val) 298 { 299 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 300 301 *val = vgicv3->vgic_sre; 302 303 return 0; 304 } 305 306 static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { 307 { SYS_DESC(SYS_ICC_PMR_EL1), 308 .set_user = set_gic_pmr, .get_user = get_gic_pmr, }, 309 { SYS_DESC(SYS_ICC_BPR0_EL1), 310 .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, }, 311 { SYS_DESC(SYS_ICC_AP0R0_EL1), 312 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 313 { SYS_DESC(SYS_ICC_AP0R1_EL1), 314 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 315 { SYS_DESC(SYS_ICC_AP0R2_EL1), 316 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 317 { SYS_DESC(SYS_ICC_AP0R3_EL1), 318 .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 319 { SYS_DESC(SYS_ICC_AP1R0_EL1), 320 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 321 { SYS_DESC(SYS_ICC_AP1R1_EL1), 322 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 323 { SYS_DESC(SYS_ICC_AP1R2_EL1), 324 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 325 { SYS_DESC(SYS_ICC_AP1R3_EL1), 326 .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 327 { SYS_DESC(SYS_ICC_BPR1_EL1), 328 .set_user = set_gic_bpr1, .get_user = get_gic_bpr1, }, 329 { SYS_DESC(SYS_ICC_CTLR_EL1), 330 .set_user = set_gic_ctlr, .get_user = get_gic_ctlr, }, 331 { SYS_DESC(SYS_ICC_SRE_EL1), 332 .set_user = set_gic_sre, .get_user = get_gic_sre, }, 333 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), 334 .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, }, 335 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), 336 .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, }, 337 }; 338 339 static u64 attr_to_id(u64 attr) 340 { 341 return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr), 342 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr), 343 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr), 344 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr), 345 FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr)); 346 } 347 348 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 349 { 350 if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs, 351 ARRAY_SIZE(gic_v3_icc_reg_descs))) 352 return 0; 353 354 return -ENXIO; 355 } 356 357 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, 358 struct kvm_device_attr *attr, 359 bool is_write) 360 { 361 struct kvm_one_reg reg = { 362 .id = attr_to_id(attr->attr), 363 .addr = attr->addr, 364 }; 365 366 if (is_write) 367 return kvm_sys_reg_set_user(vcpu, ®, gic_v3_icc_reg_descs, 368 ARRAY_SIZE(gic_v3_icc_reg_descs)); 369 else 370 return kvm_sys_reg_get_user(vcpu, ®, gic_v3_icc_reg_descs, 371 ARRAY_SIZE(gic_v3_icc_reg_descs)); 372 } 373