1 /* 2 * VGIC system registers handling functions for AArch64 mode 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/irqchip/arm-gic-v3.h> 15 #include <linux/kvm.h> 16 #include <linux/kvm_host.h> 17 #include <asm/kvm_emulate.h> 18 #include "vgic.h" 19 #include "sys_regs.h" 20 21 static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 22 const struct sys_reg_desc *r) 23 { 24 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v; 25 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 26 struct vgic_vmcr vmcr; 27 u64 val; 28 29 vgic_get_vmcr(vcpu, &vmcr); 30 if (p->is_write) { 31 val = p->regval; 32 33 /* 34 * Disallow restoring VM state if not supported by this 35 * hardware. 36 */ 37 host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >> 38 ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1; 39 if (host_pri_bits > vgic_v3_cpu->num_pri_bits) 40 return false; 41 42 vgic_v3_cpu->num_pri_bits = host_pri_bits; 43 44 host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >> 45 ICC_CTLR_EL1_ID_BITS_SHIFT; 46 if (host_id_bits > vgic_v3_cpu->num_id_bits) 47 return false; 48 49 vgic_v3_cpu->num_id_bits = host_id_bits; 50 51 host_seis = ((kvm_vgic_global_state.ich_vtr_el2 & 52 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT); 53 seis = (val & ICC_CTLR_EL1_SEIS_MASK) >> 54 ICC_CTLR_EL1_SEIS_SHIFT; 55 if (host_seis != seis) 56 return false; 57 58 host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 & 59 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT); 60 a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT; 61 if (host_a3v != a3v) 62 return false; 63 64 /* 65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 66 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 67 */ 68 vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; 69 vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; 70 vgic_set_vmcr(vcpu, &vmcr); 71 } else { 72 val = 0; 73 val |= (vgic_v3_cpu->num_pri_bits - 1) << 74 ICC_CTLR_EL1_PRI_BITS_SHIFT; 75 val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT; 76 val |= ((kvm_vgic_global_state.ich_vtr_el2 & 77 ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) << 78 ICC_CTLR_EL1_SEIS_SHIFT; 79 val |= ((kvm_vgic_global_state.ich_vtr_el2 & 80 ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) << 81 ICC_CTLR_EL1_A3V_SHIFT; 82 /* 83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 84 * Extract it directly using ICC_CTLR_EL1 reg definitions. 85 */ 86 val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; 87 val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; 88 89 p->regval = val; 90 } 91 92 return true; 93 } 94 95 static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 96 const struct sys_reg_desc *r) 97 { 98 struct vgic_vmcr vmcr; 99 100 vgic_get_vmcr(vcpu, &vmcr); 101 if (p->is_write) { 102 vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT; 103 vgic_set_vmcr(vcpu, &vmcr); 104 } else { 105 p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK; 106 } 107 108 return true; 109 } 110 111 static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 112 const struct sys_reg_desc *r) 113 { 114 struct vgic_vmcr vmcr; 115 116 vgic_get_vmcr(vcpu, &vmcr); 117 if (p->is_write) { 118 vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >> 119 ICC_BPR0_EL1_SHIFT; 120 vgic_set_vmcr(vcpu, &vmcr); 121 } else { 122 p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) & 123 ICC_BPR0_EL1_MASK; 124 } 125 126 return true; 127 } 128 129 static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 130 const struct sys_reg_desc *r) 131 { 132 struct vgic_vmcr vmcr; 133 134 if (!p->is_write) 135 p->regval = 0; 136 137 vgic_get_vmcr(vcpu, &vmcr); 138 if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { 139 if (p->is_write) { 140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 141 ICC_BPR1_EL1_SHIFT; 142 vgic_set_vmcr(vcpu, &vmcr); 143 } else { 144 p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) & 145 ICC_BPR1_EL1_MASK; 146 } 147 } else { 148 if (!p->is_write) 149 p->regval = min((vmcr.bpr + 1), 7U); 150 } 151 152 return true; 153 } 154 155 static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 156 const struct sys_reg_desc *r) 157 { 158 struct vgic_vmcr vmcr; 159 160 vgic_get_vmcr(vcpu, &vmcr); 161 if (p->is_write) { 162 vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >> 163 ICC_IGRPEN0_EL1_SHIFT; 164 vgic_set_vmcr(vcpu, &vmcr); 165 } else { 166 p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) & 167 ICC_IGRPEN0_EL1_MASK; 168 } 169 170 return true; 171 } 172 173 static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 174 const struct sys_reg_desc *r) 175 { 176 struct vgic_vmcr vmcr; 177 178 vgic_get_vmcr(vcpu, &vmcr); 179 if (p->is_write) { 180 vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >> 181 ICC_IGRPEN1_EL1_SHIFT; 182 vgic_set_vmcr(vcpu, &vmcr); 183 } else { 184 p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) & 185 ICC_IGRPEN1_EL1_MASK; 186 } 187 188 return true; 189 } 190 191 static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu, 192 struct sys_reg_params *p, u8 apr, u8 idx) 193 { 194 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 195 uint32_t *ap_reg; 196 197 if (apr) 198 ap_reg = &vgicv3->vgic_ap1r[idx]; 199 else 200 ap_reg = &vgicv3->vgic_ap0r[idx]; 201 202 if (p->is_write) 203 *ap_reg = p->regval; 204 else 205 p->regval = *ap_reg; 206 } 207 208 static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 209 const struct sys_reg_desc *r, u8 apr) 210 { 211 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 212 u8 idx = r->Op2 & 3; 213 214 /* 215 * num_pri_bits are initialized with HW supported values. 216 * We can rely safely on num_pri_bits even if VM has not 217 * restored ICC_CTLR_EL1 before restoring APnR registers. 218 */ 219 switch (vgic_v3_cpu->num_pri_bits) { 220 case 7: 221 vgic_v3_access_apr_reg(vcpu, p, apr, idx); 222 break; 223 case 6: 224 if (idx > 1) 225 goto err; 226 vgic_v3_access_apr_reg(vcpu, p, apr, idx); 227 break; 228 default: 229 if (idx > 0) 230 goto err; 231 vgic_v3_access_apr_reg(vcpu, p, apr, idx); 232 } 233 234 return true; 235 err: 236 if (!p->is_write) 237 p->regval = 0; 238 239 return false; 240 } 241 242 static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 243 const struct sys_reg_desc *r) 244 245 { 246 return access_gic_aprn(vcpu, p, r, 0); 247 } 248 249 static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 250 const struct sys_reg_desc *r) 251 { 252 return access_gic_aprn(vcpu, p, r, 1); 253 } 254 255 static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 256 const struct sys_reg_desc *r) 257 { 258 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 259 260 /* Validate SRE bit */ 261 if (p->is_write) { 262 if (!(p->regval & ICC_SRE_EL1_SRE)) 263 return false; 264 } else { 265 p->regval = vgicv3->vgic_sre; 266 } 267 268 return true; 269 } 270 static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { 271 /* ICC_PMR_EL1 */ 272 { Op0(3), Op1(0), CRn(4), CRm(6), Op2(0), access_gic_pmr }, 273 /* ICC_BPR0_EL1 */ 274 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(3), access_gic_bpr0 }, 275 /* ICC_AP0R0_EL1 */ 276 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(4), access_gic_ap0r }, 277 /* ICC_AP0R1_EL1 */ 278 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(5), access_gic_ap0r }, 279 /* ICC_AP0R2_EL1 */ 280 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(6), access_gic_ap0r }, 281 /* ICC_AP0R3_EL1 */ 282 { Op0(3), Op1(0), CRn(12), CRm(8), Op2(7), access_gic_ap0r }, 283 /* ICC_AP1R0_EL1 */ 284 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(0), access_gic_ap1r }, 285 /* ICC_AP1R1_EL1 */ 286 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(1), access_gic_ap1r }, 287 /* ICC_AP1R2_EL1 */ 288 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(2), access_gic_ap1r }, 289 /* ICC_AP1R3_EL1 */ 290 { Op0(3), Op1(0), CRn(12), CRm(9), Op2(3), access_gic_ap1r }, 291 /* ICC_BPR1_EL1 */ 292 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(3), access_gic_bpr1 }, 293 /* ICC_CTLR_EL1 */ 294 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(4), access_gic_ctlr }, 295 /* ICC_SRE_EL1 */ 296 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(5), access_gic_sre }, 297 /* ICC_IGRPEN0_EL1 */ 298 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(6), access_gic_grpen0 }, 299 /* ICC_GRPEN1_EL1 */ 300 { Op0(3), Op1(0), CRn(12), CRm(12), Op2(7), access_gic_grpen1 }, 301 }; 302 303 int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, 304 u64 *reg) 305 { 306 struct sys_reg_params params; 307 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64; 308 309 params.regval = *reg; 310 params.is_write = is_write; 311 params.is_aarch32 = false; 312 params.is_32bit = false; 313 314 if (find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs, 315 ARRAY_SIZE(gic_v3_icc_reg_descs))) 316 return 0; 317 318 return -ENXIO; 319 } 320 321 int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id, 322 u64 *reg) 323 { 324 struct sys_reg_params params; 325 const struct sys_reg_desc *r; 326 u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64; 327 328 if (is_write) 329 params.regval = *reg; 330 params.is_write = is_write; 331 params.is_aarch32 = false; 332 params.is_32bit = false; 333 334 r = find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs, 335 ARRAY_SIZE(gic_v3_icc_reg_descs)); 336 if (!r) 337 return -ENXIO; 338 339 if (!r->access(vcpu, ¶ms, r)) 340 return -EINVAL; 341 342 if (!is_write) 343 *reg = params.regval; 344 345 return 0; 346 } 347