xref: /openbmc/linux/arch/powerpc/kvm/book3s_hv_tm.c (revision 31af04cd)
1 /*
2  * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/kvm_host.h>
10 
11 #include <asm/kvm_ppc.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/kvm_book3s_64.h>
14 #include <asm/reg.h>
15 #include <asm/ppc-opcode.h>
16 
17 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
18 {
19 	u64 texasr, tfiar;
20 	u64 msr = vcpu->arch.shregs.msr;
21 
22 	tfiar = vcpu->arch.regs.nip & ~0x3ull;
23 	texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
24 	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
25 		texasr |= TEXASR_SUSP;
26 	if (msr & MSR_PR) {
27 		texasr |= TEXASR_PR;
28 		tfiar |= 1;
29 	}
30 	vcpu->arch.tfiar = tfiar;
31 	/* Preserve ROT and TL fields of existing TEXASR */
32 	vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
33 }
34 
35 /*
36  * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
37  * We expect to find a TM-related instruction to be emulated.  The
38  * instruction image is in vcpu->arch.emul_inst.  If the guest was in
39  * TM suspended or transactional state, the checkpointed state has been
40  * reclaimed and is in the vcpu struct.  The CPU is in virtual mode in
41  * host context.
42  */
43 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
44 {
45 	u32 instr = vcpu->arch.emul_inst;
46 	u64 msr = vcpu->arch.shregs.msr;
47 	u64 newmsr, bescr;
48 	int ra, rs;
49 
50 	switch (instr & 0xfc0007ff) {
51 	case PPC_INST_RFID:
52 		/* XXX do we need to check for PR=0 here? */
53 		newmsr = vcpu->arch.shregs.srr1;
54 		/* should only get here for Sx -> T1 transition */
55 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
56 			       MSR_TM_TRANSACTIONAL(newmsr) &&
57 			       (newmsr & MSR_TM)));
58 		newmsr = sanitize_msr(newmsr);
59 		vcpu->arch.shregs.msr = newmsr;
60 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
61 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
62 		return RESUME_GUEST;
63 
64 	case PPC_INST_RFEBB:
65 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
66 			/* generate an illegal instruction interrupt */
67 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
68 			return RESUME_GUEST;
69 		}
70 		/* check EBB facility is available */
71 		if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
72 			/* generate an illegal instruction interrupt */
73 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
74 			return RESUME_GUEST;
75 		}
76 		if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
77 			/* generate a facility unavailable interrupt */
78 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
79 				((u64)FSCR_EBB_LG << 56);
80 			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
81 			return RESUME_GUEST;
82 		}
83 		bescr = vcpu->arch.bescr;
84 		/* expect to see a S->T transition requested */
85 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
86 			       ((bescr >> 30) & 3) == 2));
87 		bescr &= ~BESCR_GE;
88 		if (instr & (1 << 11))
89 			bescr |= BESCR_GE;
90 		vcpu->arch.bescr = bescr;
91 		msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
92 		vcpu->arch.shregs.msr = msr;
93 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
94 		vcpu->arch.regs.nip = vcpu->arch.ebbrr;
95 		return RESUME_GUEST;
96 
97 	case PPC_INST_MTMSRD:
98 		/* XXX do we need to check for PR=0 here? */
99 		rs = (instr >> 21) & 0x1f;
100 		newmsr = kvmppc_get_gpr(vcpu, rs);
101 		/* check this is a Sx -> T1 transition */
102 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
103 			       MSR_TM_TRANSACTIONAL(newmsr) &&
104 			       (newmsr & MSR_TM)));
105 		/* mtmsrd doesn't change LE */
106 		newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
107 		newmsr = sanitize_msr(newmsr);
108 		vcpu->arch.shregs.msr = newmsr;
109 		return RESUME_GUEST;
110 
111 	case PPC_INST_TSR:
112 		/* check for PR=1 and arch 2.06 bit set in PCR */
113 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
114 			/* generate an illegal instruction interrupt */
115 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
116 			return RESUME_GUEST;
117 		}
118 		/* check for TM disabled in the HFSCR or MSR */
119 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
120 			/* generate an illegal instruction interrupt */
121 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
122 			return RESUME_GUEST;
123 		}
124 		if (!(msr & MSR_TM)) {
125 			/* generate a facility unavailable interrupt */
126 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
127 				((u64)FSCR_TM_LG << 56);
128 			kvmppc_book3s_queue_irqprio(vcpu,
129 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
130 			return RESUME_GUEST;
131 		}
132 		/* Set CR0 to indicate previous transactional state */
133 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
134 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
135 		/* L=1 => tresume, L=0 => tsuspend */
136 		if (instr & (1 << 21)) {
137 			if (MSR_TM_SUSPENDED(msr))
138 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
139 		} else {
140 			if (MSR_TM_TRANSACTIONAL(msr))
141 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
142 		}
143 		vcpu->arch.shregs.msr = msr;
144 		return RESUME_GUEST;
145 
146 	case PPC_INST_TRECLAIM:
147 		/* check for TM disabled in the HFSCR or MSR */
148 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
149 			/* generate an illegal instruction interrupt */
150 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
151 			return RESUME_GUEST;
152 		}
153 		if (!(msr & MSR_TM)) {
154 			/* generate a facility unavailable interrupt */
155 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
156 				((u64)FSCR_TM_LG << 56);
157 			kvmppc_book3s_queue_irqprio(vcpu,
158 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
159 			return RESUME_GUEST;
160 		}
161 		/* If no transaction active, generate TM bad thing */
162 		if (!MSR_TM_ACTIVE(msr)) {
163 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
164 			return RESUME_GUEST;
165 		}
166 		/* If failure was not previously recorded, recompute TEXASR */
167 		if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
168 			ra = (instr >> 16) & 0x1f;
169 			if (ra)
170 				ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
171 			emulate_tx_failure(vcpu, ra);
172 		}
173 
174 		copy_from_checkpoint(vcpu);
175 
176 		/* Set CR0 to indicate previous transactional state */
177 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
178 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
179 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
180 		return RESUME_GUEST;
181 
182 	case PPC_INST_TRECHKPT:
183 		/* XXX do we need to check for PR=0 here? */
184 		/* check for TM disabled in the HFSCR or MSR */
185 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
186 			/* generate an illegal instruction interrupt */
187 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
188 			return RESUME_GUEST;
189 		}
190 		if (!(msr & MSR_TM)) {
191 			/* generate a facility unavailable interrupt */
192 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
193 				((u64)FSCR_TM_LG << 56);
194 			kvmppc_book3s_queue_irqprio(vcpu,
195 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
196 			return RESUME_GUEST;
197 		}
198 		/* If transaction active or TEXASR[FS] = 0, bad thing */
199 		if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
200 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
201 			return RESUME_GUEST;
202 		}
203 
204 		copy_to_checkpoint(vcpu);
205 
206 		/* Set CR0 to indicate previous transactional state */
207 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
208 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
209 		vcpu->arch.shregs.msr = msr | MSR_TS_S;
210 		return RESUME_GUEST;
211 	}
212 
213 	/* What should we do here? We didn't recognize the instruction */
214 	WARN_ON_ONCE(1);
215 	return RESUME_GUEST;
216 }
217