Lines Matching refs:arch

310 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
312 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
313 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
315 if (vcpu->arch.cputm_enabled)
316 vcpu->arch.cputm_start += *delta;
317 if (vcpu->arch.vsie_block)
318 kvm_clock_sync_scb(vcpu->arch.vsie_block,
676 struct gmap *gmap = kvm->arch.gmap;
759 kvm->arch.use_irqchip = 1;
764 kvm->arch.user_sigp = 1;
772 set_kvm_facility(kvm->arch.model.fac_mask, 129);
773 set_kvm_facility(kvm->arch.model.fac_list, 129);
775 set_kvm_facility(kvm->arch.model.fac_mask, 134);
776 set_kvm_facility(kvm->arch.model.fac_list, 134);
779 set_kvm_facility(kvm->arch.model.fac_mask, 135);
780 set_kvm_facility(kvm->arch.model.fac_list, 135);
783 set_kvm_facility(kvm->arch.model.fac_mask, 148);
784 set_kvm_facility(kvm->arch.model.fac_list, 148);
787 set_kvm_facility(kvm->arch.model.fac_mask, 152);
788 set_kvm_facility(kvm->arch.model.fac_list, 152);
791 set_kvm_facility(kvm->arch.model.fac_mask, 192);
792 set_kvm_facility(kvm->arch.model.fac_list, 192);
807 set_kvm_facility(kvm->arch.model.fac_mask, 64);
808 set_kvm_facility(kvm->arch.model.fac_list, 64);
820 set_kvm_facility(kvm->arch.model.fac_mask, 72);
821 set_kvm_facility(kvm->arch.model.fac_list, 72);
834 set_kvm_facility(kvm->arch.model.fac_mask, 133);
835 set_kvm_facility(kvm->arch.model.fac_list, 133);
846 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
858 kvm->arch.use_skf = 0;
859 kvm->arch.use_pfmfi = 0;
867 kvm->arch.user_stsi = 1;
872 kvm->arch.user_instr0 = 1;
882 set_kvm_facility(kvm->arch.model.fac_mask, 11);
883 set_kvm_facility(kvm->arch.model.fac_list, 11);
905 kvm->arch.mem_limit);
906 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
933 kvm->arch.use_cmma = 1;
935 kvm->arch.use_pfmfi = 0;
945 if (!kvm->arch.use_cmma)
951 s390_reset_cmma(kvm->arch.gmap->mm);
965 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
966 new_limit > kvm->arch.mem_limit)
985 gmap_remove(kvm->arch.gmap);
987 kvm->arch.gmap = new;
994 (void *) kvm->arch.gmap->asce);
1032 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1033 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1034 kvm->arch.crypto.aes_kw = 1;
1043 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1044 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1045 kvm->arch.crypto.dea_kw = 1;
1053 kvm->arch.crypto.aes_kw = 0;
1054 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1055 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1063 kvm->arch.crypto.dea_kw = 0;
1064 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1065 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1073 kvm->arch.crypto.apie = 1;
1080 kvm->arch.crypto.apie = 0;
1095 if (!vcpu->kvm->arch.use_zpci_interp)
1098 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1099 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1116 kvm->arch.use_zpci_interp = 1;
1149 if (kvm->arch.migration_mode)
1155 if (!kvm->arch.use_cmma) {
1156 kvm->arch.migration_mode = 1;
1172 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1173 kvm->arch.migration_mode = 1;
1185 if (!kvm->arch.migration_mode)
1187 kvm->arch.migration_mode = 0;
1188 if (kvm->arch.use_cmma)
1217 u64 mig = kvm->arch.migration_mode;
1320 gtod->tod = clk.tod + kvm->arch.epoch;
1323 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1411 kvm->arch.model.cpuid = proc->cpuid;
1416 kvm->arch.model.ibc = unblocked_ibc;
1418 kvm->arch.model.ibc = lowest_ibc;
1420 kvm->arch.model.ibc = proc->ibc;
1422 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1425 kvm->arch.model.ibc,
1426 kvm->arch.model.cpuid);
1428 kvm->arch.model.fac_list[0],
1429 kvm->arch.model.fac_list[1],
1430 kvm->arch.model.fac_list[2]);
1456 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1474 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1482 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1483 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1484 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1485 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1487 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1488 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1491 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1493 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1494 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1496 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1497 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1502 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1503 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1505 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1506 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1508 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1509 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1517 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1529 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1530 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1534 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1536 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1567 kvm->arch.model.uv_feat_guest.feat = data;
1606 proc->cpuid = kvm->arch.model.cpuid;
1607 proc->ibc = kvm->arch.model.ibc;
1608 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1611 kvm->arch.model.ibc,
1612 kvm->arch.model.cpuid);
1614 kvm->arch.model.fac_list[0],
1615 kvm->arch.model.fac_list[1],
1616 kvm->arch.model.fac_list[2]);
1636 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1641 kvm->arch.model.ibc,
1642 kvm->arch.model.cpuid);
1663 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1691 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1696 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1697 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1698 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1699 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1701 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1702 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1704 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1705 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1707 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1708 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1710 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1711 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1713 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1714 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1716 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1717 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1719 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1720 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1722 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1723 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1725 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1726 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1728 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1729 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1731 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1732 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1735 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1738 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1743 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1744 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1745 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1746 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1748 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1749 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1750 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1751 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1827 unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1900 read_lock(&kvm->arch.sca_lock);
1901 sca = kvm->arch.sca;
1907 read_unlock(&kvm->arch.sca_lock);
1928 read_lock(&kvm->arch.sca_lock);
1929 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1930 read_unlock(&kvm->arch.sca_lock);
2275 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2316 if (!kvm->arch.use_cmma)
2323 if (!peek && !kvm->arch.migration_mode)
2332 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2350 if (kvm->arch.migration_mode)
2351 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2376 if (!kvm->arch.use_cmma)
2558 if (kvm->arch.pv.dumping)
2572 kvm->arch.pv.dumping = true;
2580 if (!kvm->arch.pv.dumping)
2593 if (!kvm->arch.pv.dumping)
2649 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2668 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2693 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
3004 if (kvm->arch.use_irqchip) {
3156 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
3159 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3166 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3168 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3189 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3193 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3240 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3241 sizeof(kvm->arch.crypto.crycb->apcb0));
3242 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3243 sizeof(kvm->arch.crypto.crycb->apcb1));
3263 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3265 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3271 kvm->arch.crypto.aes_kw = 1;
3272 kvm->arch.crypto.dea_kw = 1;
3273 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3274 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3275 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3276 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3281 if (kvm->arch.use_esca)
3282 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3284 free_page((unsigned long)(kvm->arch.sca));
3285 kvm->arch.sca = NULL;
3322 rwlock_init(&kvm->arch.sca_lock);
3324 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3325 if (!kvm->arch.sca)
3331 kvm->arch.sca = (struct bsca_block *)
3332 ((char *) kvm->arch.sca + sca_offset);
3337 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3338 if (!kvm->arch.dbf)
3342 kvm->arch.sie_page2 =
3344 if (!kvm->arch.sie_page2)
3347 kvm->arch.sie_page2->kvm = kvm;
3348 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3351 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3354 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3357 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3360 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3361 set_kvm_facility(kvm->arch.model.fac_list, 138);
3363 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3364 set_kvm_facility(kvm->arch.model.fac_list, 74);
3366 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3367 set_kvm_facility(kvm->arch.model.fac_list, 147);
3371 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3373 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3374 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3376 kvm->arch.model.uv_feat_guest.feat = 0;
3387 mutex_init(&kvm->arch.float_int.ais_lock);
3388 spin_lock_init(&kvm->arch.float_int.lock);
3390 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3391 init_waitqueue_head(&kvm->arch.ipte_wq);
3392 mutex_init(&kvm->arch.ipte_mutex);
3394 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3398 kvm->arch.gmap = NULL;
3399 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3402 kvm->arch.mem_limit = TASK_SIZE_MAX;
3404 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3406 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3407 if (!kvm->arch.gmap)
3409 kvm->arch.gmap->private = kvm;
3410 kvm->arch.gmap->pfault_enabled = 0;
3413 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3414 kvm->arch.use_skf = sclp.has_skey;
3415 spin_lock_init(&kvm->arch.start_stop_lock);
3419 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3420 kvm->arch.pv.set_aside = NULL;
3425 free_page((unsigned long)kvm->arch.sie_page2);
3426 debug_unregister(kvm->arch.dbf);
3445 gmap_remove(vcpu->arch.gmap);
3447 if (vcpu->kvm->arch.use_cmma)
3452 free_page((unsigned long)(vcpu->arch.sie_block));
3474 if (kvm->arch.pv.mmu_notifier.ops)
3475 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3477 debug_unregister(kvm->arch.dbf);
3478 free_page((unsigned long)kvm->arch.sie_page2);
3480 gmap_remove(kvm->arch.gmap);
3490 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3491 if (!vcpu->arch.gmap)
3493 vcpu->arch.gmap->private = vcpu->kvm;
3502 read_lock(&vcpu->kvm->arch.sca_lock);
3503 if (vcpu->kvm->arch.use_esca) {
3504 struct esca_block *sca = vcpu->kvm->arch.sca;
3509 struct bsca_block *sca = vcpu->kvm->arch.sca;
3514 read_unlock(&vcpu->kvm->arch.sca_lock);
3520 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3523 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3524 vcpu->arch.sie_block->scaol = sca_phys;
3527 read_lock(&vcpu->kvm->arch.sca_lock);
3528 if (vcpu->kvm->arch.use_esca) {
3529 struct esca_block *sca = vcpu->kvm->arch.sca;
3532 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3533 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3534 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3535 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3538 struct bsca_block *sca = vcpu->kvm->arch.sca;
3541 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3542 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3543 vcpu->arch.sie_block->scaol = sca_phys;
3546 read_unlock(&vcpu->kvm->arch.sca_lock);
3569 struct bsca_block *old_sca = kvm->arch.sca;
3576 if (kvm->arch.use_esca)
3588 write_lock(&kvm->arch.sca_lock);
3593 vcpu->arch.sie_block->scaoh = scaoh;
3594 vcpu->arch.sie_block->scaol = scaol;
3595 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3597 kvm->arch.sca = new_sca;
3598 kvm->arch.use_esca = 1;
3600 write_unlock(&kvm->arch.sca_lock);
3606 old_sca, kvm->arch.sca);
3624 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3632 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3633 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3634 vcpu->arch.cputm_start = get_tod_clock_fast();
3635 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3641 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3642 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3643 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3644 vcpu->arch.cputm_start = 0;
3645 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3651 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3652 vcpu->arch.cputm_enabled = true;
3659 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3661 vcpu->arch.cputm_enabled = false;
3682 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3683 if (vcpu->arch.cputm_enabled)
3684 vcpu->arch.cputm_start = get_tod_clock_fast();
3685 vcpu->arch.sie_block->cputm = cputm;
3686 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3696 if (unlikely(!vcpu->arch.cputm_enabled))
3697 return vcpu->arch.sie_block->cputm;
3701 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3707 value = vcpu->arch.sie_block->cputm;
3709 if (likely(vcpu->arch.cputm_start))
3710 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3711 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3719 gmap_enable(vcpu->arch.enabled_gmap);
3721 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3729 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3732 vcpu->arch.enabled_gmap = gmap_get_enabled();
3733 gmap_disable(vcpu->arch.enabled_gmap);
3741 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3742 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3746 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3749 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3750 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3752 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3757 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3780 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3783 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3784 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3785 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3786 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3788 if (vcpu->kvm->arch.crypto.apie)
3789 vcpu->arch.sie_block->eca |= ECA_APIE;
3792 if (vcpu->kvm->arch.crypto.aes_kw) {
3793 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3796 vcpu->arch.sie_block->ecd |= ECD_ECC;
3799 if (vcpu->kvm->arch.crypto.dea_kw)
3800 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3805 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3806 vcpu->arch.sie_block->cbrlo = 0;
3816 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3822 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3824 vcpu->arch.sie_block->ibc = model->ibc;
3826 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3834 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3847 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3849 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3851 vcpu->arch.sie_block->ecb |= ECB_PTF;
3853 vcpu->arch.sie_block->ecb |= ECB_TE;
3855 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3857 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3858 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3860 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3861 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3863 vcpu->arch.sie_block->eca |= ECA_CEI;
3865 vcpu->arch.sie_block->eca |= ECA_IB;
3867 vcpu->arch.sie_block->eca |= ECA_SII;
3869 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3871 vcpu->arch.sie_block->eca |= ECA_VX;
3872 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3875 vcpu->arch.sie_block->ecd |= ECD_MEF;
3877 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3878 if (vcpu->arch.sie_block->gd) {
3879 vcpu->arch.sie_block->eca |= ECA_AIV;
3881 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3883 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3884 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3889 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3891 if (vcpu->kvm->arch.use_cmma) {
3896 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3897 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3899 vcpu->arch.sie_block->hpid = HPID_KVM;
3933 vcpu->arch.sie_block = &sie_page->sie_block;
3934 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3937 vcpu->arch.sie_block->mso = 0;
3938 vcpu->arch.sie_block->msl = sclp.hamax;
3940 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3941 spin_lock_init(&vcpu->arch.local_int.lock);
3942 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3943 seqcount_init(&vcpu->arch.cputm_seqcount);
3945 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3978 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3979 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3990 gmap_remove(vcpu->arch.gmap);
3992 free_page((unsigned long)(vcpu->arch.sie_block));
3998 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4004 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4009 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4015 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4020 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4026 return atomic_read(&vcpu->arch.sie_block->prog20) &
4032 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4043 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4103 r = put_user(vcpu->arch.sie_block->todpr,
4107 r = put_user(vcpu->arch.sie_block->epoch,
4115 r = put_user(vcpu->arch.sie_block->ckc,
4119 r = put_user(vcpu->arch.pfault_token,
4123 r = put_user(vcpu->arch.pfault_compare,
4127 r = put_user(vcpu->arch.pfault_select,
4131 r = put_user(vcpu->arch.sie_block->pp,
4135 r = put_user(vcpu->arch.sie_block->gbea,
4153 r = get_user(vcpu->arch.sie_block->todpr,
4157 r = get_user(vcpu->arch.sie_block->epoch,
4166 r = get_user(vcpu->arch.sie_block->ckc,
4170 r = get_user(vcpu->arch.pfault_token,
4172 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4176 r = get_user(vcpu->arch.pfault_compare,
4180 r = get_user(vcpu->arch.pfault_select,
4184 r = get_user(vcpu->arch.sie_block->pp,
4188 r = get_user(vcpu->arch.sie_block->gbea,
4200 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4201 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4219 vcpu->arch.sie_block->gpsw.mask = 0;
4220 vcpu->arch.sie_block->gpsw.addr = 0;
4223 vcpu->arch.sie_block->ckc = 0;
4224 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4225 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4226 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4247 vcpu->arch.sie_block->gbea = 1;
4248 vcpu->arch.sie_block->pp = 0;
4249 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4250 vcpu->arch.sie_block->todpr = 0;
4292 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4304 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4395 vcpu->arch.guestdbg.last_bp = 0;
4478 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4489 vcpu->arch.sie_block->ihcpu = 0xffff;
4510 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4520 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4529 if ((vcpu->kvm->arch.use_cmma) &&
4531 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4551 kvm->arch.epoch = gtod->tod - clk.tod;
4552 kvm->arch.epdx = 0;
4554 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4555 if (kvm->arch.epoch > gtod->tod)
4556 kvm->arch.epdx -= 1;
4561 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4562 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4590 return gmap_fault(vcpu->arch.gmap, gpa,
4614 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4615 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4623 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4624 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4645 struct kvm_arch_async_pf arch;
4647 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4649 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4650 vcpu->arch.pfault_compare)
4656 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4658 if (!vcpu->arch.gmap->pfault_enabled)
4663 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4666 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4680 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4681 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4701 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4703 vcpu->arch.sie_block->icptcode = 0;
4704 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4730 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4739 pgm_info = vcpu->arch.pgm;
4753 vcpu->arch.sie_block->icptcode);
4754 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4759 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4760 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4764 sie_page = container_of(vcpu->arch.sie_block,
4771 if (vcpu->arch.sie_block->icptcode > 0) {
4777 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4778 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4779 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4805 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4834 exit_reason = sie64a(vcpu->arch.sie_block,
4846 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4847 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4848 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4872 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4873 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4875 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4876 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4877 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4880 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4881 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4882 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4883 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4887 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4888 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4889 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4898 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4900 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4909 !vcpu->arch.gs_enabled) {
4911 vcpu->arch.sie_block->ecb |= ECB_GS;
4912 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4913 vcpu->arch.gs_enabled = 1;
4917 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4918 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4924 vcpu->arch.host_gscb = current->thread.gs_cb;
4925 save_gs_cb(vcpu->arch.host_gscb);
4927 if (vcpu->arch.gs_enabled) {
4944 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4950 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4952 save_access_regs(vcpu->arch.host_acrs);
4956 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4957 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4980 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4981 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4992 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4993 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4994 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4995 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4996 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
5000 if (vcpu->arch.gs_enabled)
5002 current->thread.gs_cb = vcpu->arch.host_gscb;
5003 restore_gs_cb(vcpu->arch.host_gscb);
5004 if (!vcpu->arch.host_gscb)
5006 vcpu->arch.host_gscb = NULL;
5016 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5017 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5019 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5021 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5022 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5023 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5024 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5026 restore_access_regs(vcpu->arch.host_acrs);
5031 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
5032 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
5048 if (vcpu->kvm->arch.pv.dumping)
5151 &vcpu->arch.sie_block->gpsw, 16);
5157 &vcpu->arch.sie_block->todpr, 4);
5161 clkcomp = vcpu->arch.sie_block->ckc >> 8;
5167 &vcpu->arch.sie_block->gcr, 128);
5218 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5225 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5254 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5260 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5274 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5281 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5314 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5328 if (!vcpu->kvm->arch.css_support) {
5329 vcpu->kvm->arch.css_support = 1;
5353 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5358 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5423 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5508 if (!vcpu->kvm->arch.pv.dumping)
5628 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5645 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5651 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5746 vmf->page = virt_to_page(vcpu->arch.sie_block);
5786 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5790 if (!kvm->arch.migration_mode)
5818 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5822 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5828 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,