xref: /openbmc/linux/arch/powerpc/kvm/book3s_hv_tm.c (revision ac5f3136)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/kvm_host.h>
9 
10 #include <asm/kvm_ppc.h>
11 #include <asm/kvm_book3s.h>
12 #include <asm/kvm_book3s_64.h>
13 #include <asm/reg.h>
14 #include <asm/ppc-opcode.h>
15 
16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
17 {
18 	u64 texasr, tfiar;
19 	u64 msr = vcpu->arch.shregs.msr;
20 
21 	tfiar = vcpu->arch.regs.nip & ~0x3ull;
22 	texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
23 	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
24 		texasr |= TEXASR_SUSP;
25 	if (msr & MSR_PR) {
26 		texasr |= TEXASR_PR;
27 		tfiar |= 1;
28 	}
29 	vcpu->arch.tfiar = tfiar;
30 	/* Preserve ROT and TL fields of existing TEXASR */
31 	vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
32 }
33 
34 /*
35  * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
36  * We expect to find a TM-related instruction to be emulated.  The
37  * instruction image is in vcpu->arch.emul_inst.  If the guest was in
38  * TM suspended or transactional state, the checkpointed state has been
39  * reclaimed and is in the vcpu struct.  The CPU is in virtual mode in
40  * host context.
41  */
42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
43 {
44 	u32 instr = vcpu->arch.emul_inst;
45 	u64 msr = vcpu->arch.shregs.msr;
46 	u64 newmsr, bescr;
47 	int ra, rs;
48 
49 	/*
50 	 * The TM softpatch interrupt sets NIP to the instruction following
51 	 * the faulting instruction, which is not executed. Rewind nip to the
52 	 * faulting instruction so it looks like a normal synchronous
53 	 * interrupt, then update nip in the places where the instruction is
54 	 * emulated.
55 	 */
56 	vcpu->arch.regs.nip -= 4;
57 
58 	/*
59 	 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
60 	 * in these instructions, so masking bit 31 out doesn't change these
61 	 * instructions. For treclaim., tsr., and trechkpt. instructions if bit
62 	 * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
63 	 * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
64 	 * 31 is an acceptable way to handle these invalid forms that have
65 	 * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
66 	 * bit 31 set) can generate a softpatch interrupt. Hence both forms
67 	 * are handled below for these instructions so they behave the same way.
68 	 */
69 	switch (instr & PO_XOP_OPCODE_MASK) {
70 	case PPC_INST_RFID:
71 		/* XXX do we need to check for PR=0 here? */
72 		newmsr = vcpu->arch.shregs.srr1;
73 		/* should only get here for Sx -> T1 transition */
74 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
75 			       MSR_TM_TRANSACTIONAL(newmsr) &&
76 			       (newmsr & MSR_TM)));
77 		newmsr = sanitize_msr(newmsr);
78 		vcpu->arch.shregs.msr = newmsr;
79 		vcpu->arch.cfar = vcpu->arch.regs.nip;
80 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
81 		return RESUME_GUEST;
82 
83 	case PPC_INST_RFEBB:
84 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
85 			/* generate an illegal instruction interrupt */
86 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
87 			return RESUME_GUEST;
88 		}
89 		/* check EBB facility is available */
90 		if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
91 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
92 			vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
93 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
94 			return -1; /* rerun host interrupt handler */
95 		}
96 		if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
97 			/* generate a facility unavailable interrupt */
98 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
99 			vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
100 			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
101 			return RESUME_GUEST;
102 		}
103 		bescr = vcpu->arch.bescr;
104 		/* expect to see a S->T transition requested */
105 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
106 			       ((bescr >> 30) & 3) == 2));
107 		bescr &= ~BESCR_GE;
108 		if (instr & (1 << 11))
109 			bescr |= BESCR_GE;
110 		vcpu->arch.bescr = bescr;
111 		msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
112 		vcpu->arch.shregs.msr = msr;
113 		vcpu->arch.cfar = vcpu->arch.regs.nip;
114 		vcpu->arch.regs.nip = vcpu->arch.ebbrr;
115 		return RESUME_GUEST;
116 
117 	case PPC_INST_MTMSRD:
118 		/* XXX do we need to check for PR=0 here? */
119 		rs = (instr >> 21) & 0x1f;
120 		newmsr = kvmppc_get_gpr(vcpu, rs);
121 		/* check this is a Sx -> T1 transition */
122 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
123 			       MSR_TM_TRANSACTIONAL(newmsr) &&
124 			       (newmsr & MSR_TM)));
125 		/* mtmsrd doesn't change LE */
126 		newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
127 		newmsr = sanitize_msr(newmsr);
128 		vcpu->arch.shregs.msr = newmsr;
129 		vcpu->arch.regs.nip += 4;
130 		return RESUME_GUEST;
131 
132 	/* ignore bit 31, see comment above */
133 	case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
134 		/* check for PR=1 and arch 2.06 bit set in PCR */
135 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
136 			/* generate an illegal instruction interrupt */
137 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
138 			return RESUME_GUEST;
139 		}
140 		/* check for TM disabled in the HFSCR or MSR */
141 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
142 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
143 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
144 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
145 			return -1; /* rerun host interrupt handler */
146 		}
147 		if (!(msr & MSR_TM)) {
148 			/* generate a facility unavailable interrupt */
149 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
150 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
151 			kvmppc_book3s_queue_irqprio(vcpu,
152 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
153 			return RESUME_GUEST;
154 		}
155 		/* Set CR0 to indicate previous transactional state */
156 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
157 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
158 		/* L=1 => tresume, L=0 => tsuspend */
159 		if (instr & (1 << 21)) {
160 			if (MSR_TM_SUSPENDED(msr))
161 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
162 		} else {
163 			if (MSR_TM_TRANSACTIONAL(msr))
164 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
165 		}
166 		vcpu->arch.shregs.msr = msr;
167 		vcpu->arch.regs.nip += 4;
168 		return RESUME_GUEST;
169 
170 	/* ignore bit 31, see comment above */
171 	case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
172 		/* check for TM disabled in the HFSCR or MSR */
173 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
174 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
175 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
176 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
177 			return -1; /* rerun host interrupt handler */
178 		}
179 		if (!(msr & MSR_TM)) {
180 			/* generate a facility unavailable interrupt */
181 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
182 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
183 			kvmppc_book3s_queue_irqprio(vcpu,
184 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
185 			return RESUME_GUEST;
186 		}
187 		/* If no transaction active, generate TM bad thing */
188 		if (!MSR_TM_ACTIVE(msr)) {
189 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
190 			return RESUME_GUEST;
191 		}
192 		/* If failure was not previously recorded, recompute TEXASR */
193 		if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
194 			ra = (instr >> 16) & 0x1f;
195 			if (ra)
196 				ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
197 			emulate_tx_failure(vcpu, ra);
198 		}
199 
200 		copy_from_checkpoint(vcpu);
201 
202 		/* Set CR0 to indicate previous transactional state */
203 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
204 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
205 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
206 		vcpu->arch.regs.nip += 4;
207 		return RESUME_GUEST;
208 
209 	/* ignore bit 31, see comment above */
210 	case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
211 		/* XXX do we need to check for PR=0 here? */
212 		/* check for TM disabled in the HFSCR or MSR */
213 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
214 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
215 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
216 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
217 			return -1; /* rerun host interrupt handler */
218 		}
219 		if (!(msr & MSR_TM)) {
220 			/* generate a facility unavailable interrupt */
221 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
222 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
223 			kvmppc_book3s_queue_irqprio(vcpu,
224 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
225 			return RESUME_GUEST;
226 		}
227 		/* If transaction active or TEXASR[FS] = 0, bad thing */
228 		if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
229 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
230 			return RESUME_GUEST;
231 		}
232 
233 		copy_to_checkpoint(vcpu);
234 
235 		/* Set CR0 to indicate previous transactional state */
236 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
237 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
238 		vcpu->arch.shregs.msr = msr | MSR_TS_S;
239 		vcpu->arch.regs.nip += 4;
240 		return RESUME_GUEST;
241 	}
242 
243 	/* What should we do here? We didn't recognize the instruction */
244 	kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
245 	pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
246 
247 	return RESUME_GUEST;
248 }
249