Lines Matching refs:arch

307 	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;  in shadow_crycb()
312 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; in shadow_crycb()
327 vcpu->kvm->arch.crypto.crycb, in shadow_crycb()
335 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & in shadow_crycb()
337 ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC; in shadow_crycb()
352 vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; in shadow_crycb()
380 if (vcpu->kvm->arch.model.ibc && new_ibc) { in prepare_ibc()
386 if (scb_s->ibc > vcpu->kvm->arch.model.ibc) in prepare_ibc()
387 scb_s->ibc = vcpu->kvm->arch.model.ibc; in prepare_ibc()
594 for (i = 0; i < kvm->arch.vsie.page_count; i++) { in kvm_s390_vsie_gmap_notifier()
595 page = READ_ONCE(kvm->arch.vsie.pages[i]); in kvm_s390_vsie_gmap_notifier()
1028 cr0.val = vcpu->arch.sie_block->gcr[0]; in vsie_handle_mvpg()
1122 vcpu->arch.sie_block->fpf & FPF_BPBC) in do_vsie_run()
1135 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; in do_vsie_run()
1142 vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; in do_vsie_run()
1203 asce = vcpu->arch.sie_block->gcr[1]; in acquire_gmap_shadow()
1204 cr0.val = vcpu->arch.sie_block->gcr[0]; in acquire_gmap_shadow()
1220 gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); in acquire_gmap_shadow()
1236 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); in register_shadow_scb()
1247 scb_s->epoch += vcpu->kvm->arch.epoch; in register_shadow_scb()
1250 scb_s->epdx += vcpu->kvm->arch.epdx; in register_shadow_scb()
1251 if (scb_s->epoch < vcpu->kvm->arch.epoch) in register_shadow_scb()
1264 WRITE_ONCE(vcpu->arch.vsie_block, NULL); in unregister_shadow_scb()
1288 gmap_enable(vcpu->arch.gmap); in vsie_run()
1335 page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); in get_vsie_page()
1349 mutex_lock(&kvm->arch.vsie.mutex); in get_vsie_page()
1350 if (kvm->arch.vsie.page_count < nr_vcpus) { in get_vsie_page()
1353 mutex_unlock(&kvm->arch.vsie.mutex); in get_vsie_page()
1357 kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page; in get_vsie_page()
1358 kvm->arch.vsie.page_count++; in get_vsie_page()
1362 page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; in get_vsie_page()
1366 kvm->arch.vsie.next++; in get_vsie_page()
1367 kvm->arch.vsie.next %= nr_vcpus; in get_vsie_page()
1369 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); in get_vsie_page()
1373 if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) { in get_vsie_page()
1375 mutex_unlock(&kvm->arch.vsie.mutex); in get_vsie_page()
1378 mutex_unlock(&kvm->arch.vsie.mutex); in get_vsie_page()
1405 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_vsie()
1452 mutex_init(&kvm->arch.vsie.mutex); in kvm_s390_vsie_init()
1453 INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT); in kvm_s390_vsie_init()
1463 mutex_lock(&kvm->arch.vsie.mutex); in kvm_s390_vsie_destroy()
1464 for (i = 0; i < kvm->arch.vsie.page_count; i++) { in kvm_s390_vsie_destroy()
1465 page = kvm->arch.vsie.pages[i]; in kvm_s390_vsie_destroy()
1466 kvm->arch.vsie.pages[i] = NULL; in kvm_s390_vsie_destroy()
1470 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); in kvm_s390_vsie_destroy()
1473 kvm->arch.vsie.page_count = 0; in kvm_s390_vsie_destroy()
1474 mutex_unlock(&kvm->arch.vsie.mutex); in kvm_s390_vsie_destroy()
1479 struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block); in kvm_s390_vsie_kick()