vsie.c (9ee71f20cb8d90e156c0e00ff9949328f455b06b) | vsie.c (67d49d52ae502eaea8858fbcb97e3c2891f78da9) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * kvm nested virtualization support for s390x 4 * 5 * Copyright IBM Corp. 2016, 2018 6 * 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 8 */ --- 121 unchanged lines hidden (view full) --- 130 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) 131 newflags |= cpuflags & CPUSTAT_IBS; 132 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) 133 newflags |= cpuflags & CPUSTAT_KSS; 134 135 atomic_set(&scb_s->cpuflags, newflags); 136 return 0; 137} | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * kvm nested virtualization support for s390x 4 * 5 * Copyright IBM Corp. 2016, 2018 6 * 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 8 */ --- 121 unchanged lines hidden (view full) --- 130 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) 131 newflags |= cpuflags & CPUSTAT_IBS; 132 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) 133 newflags |= cpuflags & CPUSTAT_KSS; 134 135 atomic_set(&scb_s->cpuflags, newflags); 136 return 0; 137} |
138/* Copy to APCB FORMAT1 from APCB FORMAT0 */ 139static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, 140 unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h) 141{ 142 struct kvm_s390_apcb0 tmp; | |
143 | 138 |
144 if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0))) 145 return -EFAULT; 146 147 apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0]; 148 apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL; 149 apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL; 150 151 return 0; 152 153} 154 155/** 156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 157 * @vcpu: pointer to the virtual CPU 158 * @apcb_s: pointer to start of apcb in the shadow crycb 159 * @apcb_o: pointer to start of original apcb in the guest2 160 * @apcb_h: pointer to start of apcb in the guest1 161 * 162 * Returns 0 and -EFAULT on error reading guest apcb 163 */ 164static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, 165 unsigned long apcb_o, unsigned long *apcb_h) 166{ 167 if (read_guest_real(vcpu, apcb_o, apcb_s, 168 sizeof(struct kvm_s390_apcb0))) 169 return -EFAULT; 170 171 bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0)); 172 173 return 0; 174} 175 176/** 177 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB 178 * @vcpu: pointer to the virtual CPU 179 * @apcb_s: pointer to start of apcb in the shadow crycb 180 * @apcb_o: pointer to start of original guest apcb 181 * @apcb_h: pointer to start of apcb in the host 182 * 183 * Returns 0 and -EFAULT on error reading guest apcb 184 */ 185static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, 186 unsigned long apcb_o, 187 unsigned long *apcb_h) 188{ 189 if (read_guest_real(vcpu, apcb_o, apcb_s, 190 sizeof(struct kvm_s390_apcb1))) 191 return -EFAULT; 192 193 bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1)); 194 195 return 0; 196} 197 198/** 199 * setup_apcb - Create a shadow copy of the apcb. 200 * @vcpu: pointer to the virtual CPU 201 * @crycb_s: pointer to shadow crycb 202 * @crycb_o: pointer to original guest crycb 203 * @crycb_h: pointer to the host crycb 204 * @fmt_o: format of the original guest crycb. 205 * @fmt_h: format of the host crycb. 206 * 207 * Checks the compatibility between the guest and host crycb and calls the 208 * appropriate copy function. 209 * 210 * Return 0 or an error number if the guest and host crycb are incompatible. 211 */ 212static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, 213 const u32 crycb_o, 214 struct kvm_s390_crypto_cb *crycb_h, 215 int fmt_o, int fmt_h) 216{ 217 struct kvm_s390_crypto_cb *crycb; 218 219 crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o; 220 221 switch (fmt_o) { 222 case CRYCB_FORMAT2: 223 if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK)) 224 return -EACCES; 225 if (fmt_h != CRYCB_FORMAT2) 226 return -EINVAL; 227 return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, 228 (unsigned long) &crycb->apcb1, 229 (unsigned long *)&crycb_h->apcb1); 230 case CRYCB_FORMAT1: 231 switch (fmt_h) { 232 case CRYCB_FORMAT2: 233 return setup_apcb10(vcpu, &crycb_s->apcb1, 234 (unsigned long) &crycb->apcb0, 235 &crycb_h->apcb1); 236 case CRYCB_FORMAT1: 237 return setup_apcb00(vcpu, 238 (unsigned long *) &crycb_s->apcb0, 239 (unsigned long) &crycb->apcb0, 240 (unsigned long *) &crycb_h->apcb0); 241 } 242 break; 243 case CRYCB_FORMAT0: 244 if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) 245 return -EACCES; 246 247 switch (fmt_h) { 248 case CRYCB_FORMAT2: 249 return setup_apcb10(vcpu, &crycb_s->apcb1, 250 (unsigned long) &crycb->apcb0, 251 &crycb_h->apcb1); 252 case CRYCB_FORMAT1: 253 case CRYCB_FORMAT0: 254 return setup_apcb00(vcpu, 255 (unsigned long *) &crycb_s->apcb0, 256 (unsigned long) &crycb->apcb0, 257 (unsigned long *) &crycb_h->apcb0); 258 } 259 } 260 return -EINVAL; 261} 262 263/** 264 * shadow_crycb - Create a shadow copy of the crycb block 265 * @vcpu: a pointer to the virtual CPU 266 * @vsie_page: a pointer to internal date used for the vSIE 267 * | 139/* |
268 * Create a shadow copy of the crycb block and setup key wrapping, if 269 * requested for guest 3 and enabled for guest 2. 270 * | 140 * Create a shadow copy of the crycb block and setup key wrapping, if 141 * requested for guest 3 and enabled for guest 2. 142 * |
271 * We accept format-1 or format-2, but we convert format-1 into format-2 272 * in the shadow CRYCB. 273 * Using format-2 enables the firmware to choose the right format when 274 * scheduling the SIE. | 143 * We only accept format-1 (no AP in g2), but convert it into format-2 |
275 * There is nothing to do for format-0. 276 * | 144 * There is nothing to do for format-0. 145 * |
277 * This function centralize the issuing of set_validity_icpt() for all 278 * the subfunctions working on the crycb. 279 * | |
280 * Returns: - 0 if shadowed or nothing to do 281 * - > 0 if control has to be given to guest 2 282 */ 283static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 284{ 285 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 286 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 287 const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); 288 const u32 crycb_addr = crycbd_o & 0x7ffffff8U; 289 unsigned long *b1, *b2; 290 u8 ecb3_flags; | 146 * Returns: - 0 if shadowed or nothing to do 147 * - > 0 if control has to be given to guest 2 148 */ 149static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 150{ 151 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 152 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 153 const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); 154 const u32 crycb_addr = crycbd_o & 0x7ffffff8U; 155 unsigned long *b1, *b2; 156 u8 ecb3_flags; |
291 int apie_h; 292 int key_msk = test_kvm_facility(vcpu->kvm, 76); 293 int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; 294 int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; 295 int ret = 0; | |
296 297 scb_s->crycbd = 0; | 157 158 scb_s->crycbd = 0; |
298 299 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; 300 if (!apie_h && !key_msk) | 159 if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) |
301 return 0; | 160 return 0; |
302 303 if (!crycb_addr) 304 return set_validity_icpt(scb_s, 0x0039U); 305 306 if (fmt_o == CRYCB_FORMAT1) 307 if ((crycb_addr & PAGE_MASK) != 308 ((crycb_addr + 128) & PAGE_MASK)) 309 return set_validity_icpt(scb_s, 0x003CU); 310 311 if (apie_h && (scb_o->eca & ECA_APIE)) { 312 ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, 313 vcpu->kvm->arch.crypto.crycb, 314 fmt_o, fmt_h); 315 if (ret) 316 goto end; 317 scb_s->eca |= scb_o->eca & ECA_APIE; 318 } 319 | 161 /* format-1 is supported with message-security-assist extension 3 */ 162 if (!test_kvm_facility(vcpu->kvm, 76)) 163 return 0; |
320 /* we may only allow it if enabled for guest 2 */ 321 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & 322 (ECB3_AES | ECB3_DEA); 323 if (!ecb3_flags) | 164 /* we may only allow it if enabled for guest 2 */ 165 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & 166 (ECB3_AES | ECB3_DEA); 167 if (!ecb3_flags) |
324 goto end; | 168 return 0; |
325 | 169 |
170 if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) 171 return set_validity_icpt(scb_s, 0x003CU); 172 else if (!crycb_addr) 173 return set_validity_icpt(scb_s, 0x0039U); 174 |
|
326 /* copy only the wrapping keys */ 327 if (read_guest_real(vcpu, crycb_addr + 72, 328 vsie_page->crycb.dea_wrapping_key_mask, 56)) 329 return set_validity_icpt(scb_s, 0x0035U); 330 331 scb_s->ecb3 |= ecb3_flags; | 175 /* copy only the wrapping keys */ 176 if (read_guest_real(vcpu, crycb_addr + 72, 177 vsie_page->crycb.dea_wrapping_key_mask, 56)) 178 return set_validity_icpt(scb_s, 0x0035U); 179 180 scb_s->ecb3 |= ecb3_flags; |
181 scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | 182 CRYCB_FORMAT2; |
|
332 333 /* xor both blocks in one run */ 334 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; 335 b2 = (unsigned long *) 336 vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; 337 /* as 56%8 == 0, bitmap_xor won't overwrite any data */ 338 bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); | 183 184 /* xor both blocks in one run */ 185 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; 186 b2 = (unsigned long *) 187 vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; 188 /* as 56%8 == 0, bitmap_xor won't overwrite any data */ 189 bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); |
339end: 340 switch (ret) { 341 case -EINVAL: 342 return set_validity_icpt(scb_s, 0x0020U); 343 case -EFAULT: 344 return set_validity_icpt(scb_s, 0x0035U); 345 case -EACCES: 346 return set_validity_icpt(scb_s, 0x003CU); 347 } 348 scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; | |
349 return 0; 350} 351 352/* shadow (round up/down) the ibc to avoid validity icpt */ 353static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 354{ 355 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 356 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; --- 180 unchanged lines hidden (view full) --- 537 /* Epoch Extension */ 538 if (test_kvm_facility(vcpu->kvm, 139)) 539 scb_s->ecd |= scb_o->ecd & ECD_MEF; 540 541 /* etoken */ 542 if (test_kvm_facility(vcpu->kvm, 156)) 543 scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; 544 | 190 return 0; 191} 192 193/* shadow (round up/down) the ibc to avoid validity icpt */ 194static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 195{ 196 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 197 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; --- 180 unchanged lines hidden (view full) --- 378 /* Epoch Extension */ 379 if (test_kvm_facility(vcpu->kvm, 139)) 380 scb_s->ecd |= scb_o->ecd & ECD_MEF; 381 382 /* etoken */ 383 if (test_kvm_facility(vcpu->kvm, 156)) 384 scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; 385 |
386 scb_s->hpid = HPID_VSIE; 387 |
|
545 prepare_ibc(vcpu, vsie_page); 546 rc = shadow_crycb(vcpu, vsie_page); 547out: 548 if (rc) 549 unshadow_scb(vcpu, vsie_page); 550 return rc; 551} 552 --- 431 unchanged lines hidden (view full) --- 984 */ 985static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 986 __releases(vcpu->kvm->srcu) 987 __acquires(vcpu->kvm->srcu) 988{ 989 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 990 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 991 int guest_bp_isolation; | 388 prepare_ibc(vcpu, vsie_page); 389 rc = shadow_crycb(vcpu, vsie_page); 390out: 391 if (rc) 392 unshadow_scb(vcpu, vsie_page); 393 return rc; 394} 395 --- 431 unchanged lines hidden (view full) --- 827 */ 828static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 829 __releases(vcpu->kvm->srcu) 830 __acquires(vcpu->kvm->srcu) 831{ 832 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 833 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 834 int guest_bp_isolation; |
992 int rc = 0; | 835 int rc; |
993 994 handle_last_fault(vcpu, vsie_page); 995 996 if (need_resched()) 997 schedule(); 998 if (test_cpu_flag(CIF_MCCK_PENDING)) 999 s390_handle_mcck(); 1000 --- 11 unchanged lines hidden (view full) --- 1012 if (test_kvm_facility(vcpu->kvm, 82) && 1013 vcpu->arch.sie_block->fpf & FPF_BPBC) 1014 set_thread_flag(TIF_ISOLATE_BP_GUEST); 1015 1016 local_irq_disable(); 1017 guest_enter_irqoff(); 1018 local_irq_enable(); 1019 | 836 837 handle_last_fault(vcpu, vsie_page); 838 839 if (need_resched()) 840 schedule(); 841 if (test_cpu_flag(CIF_MCCK_PENDING)) 842 s390_handle_mcck(); 843 --- 11 unchanged lines hidden (view full) --- 855 if (test_kvm_facility(vcpu->kvm, 82) && 856 vcpu->arch.sie_block->fpf & FPF_BPBC) 857 set_thread_flag(TIF_ISOLATE_BP_GUEST); 858 859 local_irq_disable(); 860 guest_enter_irqoff(); 861 local_irq_enable(); 862 |
1020 /* 1021 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking 1022 * and VCPU requests also hinder the vSIE from running and lead 1023 * to an immediate exit. kvm_s390_vsie_kick() has to be used to 1024 * also kick the vSIE. 1025 */ 1026 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; 1027 barrier(); 1028 if (!kvm_s390_vcpu_sie_inhibited(vcpu)) 1029 rc = sie64a(scb_s, vcpu->run->s.regs.gprs); 1030 barrier(); 1031 vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; | 863 rc = sie64a(scb_s, vcpu->run->s.regs.gprs); |
1032 1033 local_irq_disable(); 1034 guest_exit_irqoff(); 1035 local_irq_enable(); 1036 1037 /* restore guest state for bp isolation override */ 1038 if (!guest_bp_isolation) 1039 clear_thread_flag(TIF_ISOLATE_BP_GUEST); --- 130 unchanged lines hidden (view full) --- 1170 rc = do_vsie_run(vcpu, vsie_page); 1171 gmap_enable(vcpu->arch.gmap); 1172 } 1173 atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20); 1174 1175 if (rc == -EAGAIN) 1176 rc = 0; 1177 if (rc || scb_s->icptcode || signal_pending(current) || | 864 865 local_irq_disable(); 866 guest_exit_irqoff(); 867 local_irq_enable(); 868 869 /* restore guest state for bp isolation override */ 870 if (!guest_bp_isolation) 871 clear_thread_flag(TIF_ISOLATE_BP_GUEST); --- 130 unchanged lines hidden (view full) --- 1002 rc = do_vsie_run(vcpu, vsie_page); 1003 gmap_enable(vcpu->arch.gmap); 1004 } 1005 atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20); 1006 1007 if (rc == -EAGAIN) 1008 rc = 0; 1009 if (rc || scb_s->icptcode || signal_pending(current) || |
1178 kvm_s390_vcpu_has_irq(vcpu, 0) || 1179 kvm_s390_vcpu_sie_inhibited(vcpu)) | 1010 kvm_s390_vcpu_has_irq(vcpu, 0)) |
1180 break; 1181 } 1182 1183 if (rc == -EFAULT) { 1184 /* 1185 * Addressing exceptions are always presentes as intercepts. 1186 * As addressing exceptions are suppressing and our guest 3 PSW 1187 * points at the responsible instruction, we have to --- 100 unchanged lines hidden (view full) --- 1288 1289 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); 1290 scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL); 1291 1292 /* 512 byte alignment */ 1293 if (unlikely(scb_addr & 0x1ffUL)) 1294 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1295 | 1011 break; 1012 } 1013 1014 if (rc == -EFAULT) { 1015 /* 1016 * Addressing exceptions are always presentes as intercepts. 1017 * As addressing exceptions are suppressing and our guest 3 PSW 1018 * points at the responsible instruction, we have to --- 100 unchanged lines hidden (view full) --- 1119 1120 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); 1121 scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL); 1122 1123 /* 512 byte alignment */ 1124 if (unlikely(scb_addr & 0x1ffUL)) 1125 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 1126 |
1296 if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) || 1297 kvm_s390_vcpu_sie_inhibited(vcpu)) | 1127 if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) |
1298 return 0; 1299 1300 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); 1301 if (IS_ERR(vsie_page)) 1302 return PTR_ERR(vsie_page); 1303 else if (!vsie_page) 1304 /* double use of sie control block - simply do nothing */ 1305 return 0; --- 66 unchanged lines hidden --- | 1128 return 0; 1129 1130 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); 1131 if (IS_ERR(vsie_page)) 1132 return PTR_ERR(vsie_page); 1133 else if (!vsie_page) 1134 /* double use of sie control block - simply do nothing */ 1135 return 0; --- 66 unchanged lines hidden --- |