kvm-s390.c (2f8d01a1475cfede058c6a92b5d3dad576da2827) kvm-s390.c (4725c86055f5bbdcdfe47199c0715881893a2c79)
1/*
2 * hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 329 unchanged lines hidden (view full) ---

338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
341 /* Nothing todo */
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
1/*
2 * hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.

--- 329 unchanged lines hidden (view full) ---

338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
341 /* Nothing todo */
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
346 save_fp_regs(&vcpu->arch.host_fpregs);
346 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
347 save_fp_regs(vcpu->arch.host_fpregs.fprs);
347 save_access_regs(vcpu->arch.host_acrs);
348 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
349 restore_fp_regs(&vcpu->arch.guest_fpregs);
349 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
350 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
350 restore_access_regs(vcpu->run->s.regs.acrs);
351 gmap_enable(vcpu->arch.gmap);
352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
353}
354
355void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
356{
357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
358 gmap_disable(vcpu->arch.gmap);
351 restore_access_regs(vcpu->run->s.regs.acrs);
352 gmap_enable(vcpu->arch.gmap);
353 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
354}
355
356void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
357{
358 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
359 gmap_disable(vcpu->arch.gmap);
359 save_fp_regs(&vcpu->arch.guest_fpregs);
360 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
360 save_access_regs(vcpu->run->s.regs.acrs);
362 save_access_regs(vcpu->run->s.regs.acrs);
361 restore_fp_regs(&vcpu->arch.host_fpregs);
363 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
364 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
362 restore_access_regs(vcpu->arch.host_acrs);
363}
364
365static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
366{
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu->arch.sie_block->gpsw.mask = 0UL;
369 vcpu->arch.sie_block->gpsw.addr = 0UL;

--- 243 unchanged lines hidden (view full) ---

613{
614 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
615 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
616 return 0;
617}
618
619int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
620{
365 restore_access_regs(vcpu->arch.host_acrs);
366}
367
368static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
369{
370 /* this equals initial cpu reset in pop, but we don't switch to ESA */
371 vcpu->arch.sie_block->gpsw.mask = 0UL;
372 vcpu->arch.sie_block->gpsw.addr = 0UL;

--- 243 unchanged lines hidden (view full) ---

616{
617 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
618 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
619 return 0;
620}
621
622int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
623{
624 if (test_fp_ctl(fpu->fpc))
625 return -EINVAL;
621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
626 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
623 restore_fp_regs(&vcpu->arch.guest_fpregs);
627 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
628 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
629 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
628{
629 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
630 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
631 return 0;

--- 52 unchanged lines hidden (view full) ---

684 PAGE_SIZE * 2);
685 if (rc)
686 return rc;
687 s390_vcpu_unblock(vcpu);
688 }
689 return 0;
690}
691
630 return 0;
631}
632
633int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
634{
635 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
636 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
637 return 0;

--- 52 unchanged lines hidden (view full) ---

690 PAGE_SIZE * 2);
691 if (rc)
692 return rc;
693 s390_vcpu_unblock(vcpu);
694 }
695 return 0;
696}
697
692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
698static int __vcpu_run(struct kvm_vcpu *vcpu)
693{
699{
694 int rc, cpuflags;
700 int rc;
695
696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
697
698 if (need_resched())
699 schedule();
700
701 if (test_thread_flag(TIF_MCCK_PENDING))
702 s390_handle_mcck();
703
704 if (!kvm_is_ucontrol(vcpu->kvm))
705 kvm_s390_deliver_pending_interrupts(vcpu);
706
707 rc = kvm_s390_handle_requests(vcpu);
708 if (rc)
709 return rc;
710
711 vcpu->arch.sie_block->icptcode = 0;
701
702 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
703
704 if (need_resched())
705 schedule();
706
707 if (test_thread_flag(TIF_MCCK_PENDING))
708 s390_handle_mcck();
709
710 if (!kvm_is_ucontrol(vcpu->kvm))
711 kvm_s390_deliver_pending_interrupts(vcpu);
712
713 rc = kvm_s390_handle_requests(vcpu);
714 if (rc)
715 return rc;
716
717 vcpu->arch.sie_block->icptcode = 0;
712 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
713 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
714 trace_kvm_s390_sie_enter(vcpu, cpuflags);
718 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
719 atomic_read(&vcpu->arch.sie_block->cpuflags));
720 trace_kvm_s390_sie_enter(vcpu,
721 atomic_read(&vcpu->arch.sie_block->cpuflags));
715
722
716 return 0;
717}
723 /*
724 * As PF_VCPU will be used in fault handler, between guest_enter
725 * and guest_exit should be no uaccess.
726 */
727 preempt_disable();
728 kvm_guest_enter();
729 preempt_enable();
730 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
731 kvm_guest_exit();
718
732
719static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
720{
721 int rc;
722
723 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
724 vcpu->arch.sie_block->icptcode);
725 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
726
733 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
734 vcpu->arch.sie_block->icptcode);
735 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
736
727 if (exit_reason >= 0) {
737 if (rc > 0)
728 rc = 0;
738 rc = 0;
729 } else {
739 if (rc < 0) {
730 if (kvm_is_ucontrol(vcpu->kvm)) {
731 rc = SIE_INTERCEPT_UCONTROL;
732 } else {
733 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
734 trace_kvm_s390_sie_fault(vcpu);
735 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
736 }
737 }
738
739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
740 if (kvm_is_ucontrol(vcpu->kvm)) {
741 rc = SIE_INTERCEPT_UCONTROL;
742 } else {
743 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
744 trace_kvm_s390_sie_fault(vcpu);
745 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
746 }
747 }
748
749 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
740
741 if (rc == 0) {
742 if (kvm_is_ucontrol(vcpu->kvm))
743 rc = -EOPNOTSUPP;
744 else
745 rc = kvm_handle_sie_intercept(vcpu);
746 }
747
748 return rc;
749}
750
750 return rc;
751}
752
751static int __vcpu_run(struct kvm_vcpu *vcpu)
752{
753 int rc, exit_reason;
754
755 /*
756 * We try to hold kvm->srcu during most of vcpu_run (except when run-
757 * ning the guest), so that memslots (and other stuff) are protected
758 */
759 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
760
761 do {
762 rc = vcpu_pre_run(vcpu);
763 if (rc)
764 break;
765
766 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
767 /*
768 * As PF_VCPU will be used in fault handler, between
769 * guest_enter and guest_exit should be no uaccess.
770 */
771 preempt_disable();
772 kvm_guest_enter();
773 preempt_enable();
774 exit_reason = sie64a(vcpu->arch.sie_block,
775 vcpu->run->s.regs.gprs);
776 kvm_guest_exit();
777 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
778
779 rc = vcpu_post_run(vcpu, exit_reason);
780 } while (!signal_pending(current) && !rc);
781
782 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
783 return rc;
784}
785
786int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
787{
788 int rc;
789 sigset_t sigsaved;
790
753int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
754{
755 int rc;
756 sigset_t sigsaved;
757
758rerun_vcpu:
791 if (vcpu->sigset_active)
792 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
793
794 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
795
796 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
797
798 switch (kvm_run->exit_reason) {

--- 16 unchanged lines hidden (view full) ---

815 }
816 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
817 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
818 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
819 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
820 }
821
822 might_fault();
759 if (vcpu->sigset_active)
760 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
761
762 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
763
764 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
765
766 switch (kvm_run->exit_reason) {

--- 16 unchanged lines hidden (view full) ---

783 }
784 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
785 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
786 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
787 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
788 }
789
790 might_fault();
823 rc = __vcpu_run(vcpu);
824
791
792 do {
793 rc = __vcpu_run(vcpu);
794 if (rc)
795 break;
796 if (kvm_is_ucontrol(vcpu->kvm))
797 rc = -EOPNOTSUPP;
798 else
799 rc = kvm_handle_sie_intercept(vcpu);
800 } while (!signal_pending(current) && !rc);
801
802 if (rc == SIE_INTERCEPT_RERUNVCPU)
803 goto rerun_vcpu;
804
825 if (signal_pending(current) && !rc) {
826 kvm_run->exit_reason = KVM_EXIT_INTR;
827 rc = -EINTR;
828 }
829
830#ifdef CONFIG_KVM_S390_UCONTROL
831 if (rc == SIE_INTERCEPT_UCONTROL) {
832 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;

--- 64 unchanged lines hidden (view full) ---

897 } else
898 prefix = 0;
899
900 /*
901 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
902 * copying in vcpu load/put. Lets update our copies before we save
903 * it into the save area
904 */
805 if (signal_pending(current) && !rc) {
806 kvm_run->exit_reason = KVM_EXIT_INTR;
807 rc = -EINTR;
808 }
809
810#ifdef CONFIG_KVM_S390_UCONTROL
811 if (rc == SIE_INTERCEPT_UCONTROL) {
812 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;

--- 64 unchanged lines hidden (view full) ---

877 } else
878 prefix = 0;
879
880 /*
881 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
882 * copying in vcpu load/put. Lets update our copies before we save
883 * it into the save area
884 */
905 save_fp_regs(&vcpu->arch.guest_fpregs);
885 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
886 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
906 save_access_regs(vcpu->run->s.regs.acrs);
907
908 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
909 vcpu->arch.guest_fpregs.fprs, 128, prefix))
910 return -EFAULT;
911
912 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
913 vcpu->run->s.regs.gprs, 128, prefix))

--- 58 unchanged lines hidden (view full) ---

972 return r;
973}
974
975long kvm_arch_vcpu_ioctl(struct file *filp,
976 unsigned int ioctl, unsigned long arg)
977{
978 struct kvm_vcpu *vcpu = filp->private_data;
979 void __user *argp = (void __user *)arg;
887 save_access_regs(vcpu->run->s.regs.acrs);
888
889 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
890 vcpu->arch.guest_fpregs.fprs, 128, prefix))
891 return -EFAULT;
892
893 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
894 vcpu->run->s.regs.gprs, 128, prefix))

--- 58 unchanged lines hidden (view full) ---

953 return r;
954}
955
956long kvm_arch_vcpu_ioctl(struct file *filp,
957 unsigned int ioctl, unsigned long arg)
958{
959 struct kvm_vcpu *vcpu = filp->private_data;
960 void __user *argp = (void __user *)arg;
980 int idx;
981 long r;
982
983 switch (ioctl) {
984 case KVM_S390_INTERRUPT: {
985 struct kvm_s390_interrupt s390int;
986
987 r = -EFAULT;
988 if (copy_from_user(&s390int, argp, sizeof(s390int)))
989 break;
990 r = kvm_s390_inject_vcpu(vcpu, &s390int);
991 break;
992 }
993 case KVM_S390_STORE_STATUS:
961 long r;
962
963 switch (ioctl) {
964 case KVM_S390_INTERRUPT: {
965 struct kvm_s390_interrupt s390int;
966
967 r = -EFAULT;
968 if (copy_from_user(&s390int, argp, sizeof(s390int)))
969 break;
970 r = kvm_s390_inject_vcpu(vcpu, &s390int);
971 break;
972 }
973 case KVM_S390_STORE_STATUS:
994 idx = srcu_read_lock(&vcpu->kvm->srcu);
995 r = kvm_s390_vcpu_store_status(vcpu, arg);
974 r = kvm_s390_vcpu_store_status(vcpu, arg);
996 srcu_read_unlock(&vcpu->kvm->srcu, idx);
997 break;
998 case KVM_S390_SET_INITIAL_PSW: {
999 psw_t psw;
1000
1001 r = -EFAULT;
1002 if (copy_from_user(&psw, argp, sizeof(psw)))
1003 break;
1004 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);

--- 195 unchanged lines hidden ---
975 break;
976 case KVM_S390_SET_INITIAL_PSW: {
977 psw_t psw;
978
979 r = -EFAULT;
980 if (copy_from_user(&psw, argp, sizeof(psw)))
981 break;
982 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);

--- 195 unchanged lines hidden ---