xref: /openbmc/linux/arch/powerpc/kvm/book3s_hv_tm.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24bb3c7a0SPaul Mackerras /*
34bb3c7a0SPaul Mackerras  * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
44bb3c7a0SPaul Mackerras  */
54bb3c7a0SPaul Mackerras 
61dff3064SGustavo Romero #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71dff3064SGustavo Romero 
84bb3c7a0SPaul Mackerras #include <linux/kvm_host.h>
94bb3c7a0SPaul Mackerras 
104bb3c7a0SPaul Mackerras #include <asm/kvm_ppc.h>
114bb3c7a0SPaul Mackerras #include <asm/kvm_book3s.h>
124bb3c7a0SPaul Mackerras #include <asm/kvm_book3s_64.h>
134bb3c7a0SPaul Mackerras #include <asm/reg.h>
144bb3c7a0SPaul Mackerras #include <asm/ppc-opcode.h>
154bb3c7a0SPaul Mackerras 
emulate_tx_failure(struct kvm_vcpu * vcpu,u64 failure_cause)164bb3c7a0SPaul Mackerras static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
174bb3c7a0SPaul Mackerras {
184bb3c7a0SPaul Mackerras 	u64 texasr, tfiar;
194bb3c7a0SPaul Mackerras 	u64 msr = vcpu->arch.shregs.msr;
204bb3c7a0SPaul Mackerras 
21173c520aSSimon Guo 	tfiar = vcpu->arch.regs.nip & ~0x3ull;
224bb3c7a0SPaul Mackerras 	texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
234bb3c7a0SPaul Mackerras 	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
244bb3c7a0SPaul Mackerras 		texasr |= TEXASR_SUSP;
254bb3c7a0SPaul Mackerras 	if (msr & MSR_PR) {
264bb3c7a0SPaul Mackerras 		texasr |= TEXASR_PR;
274bb3c7a0SPaul Mackerras 		tfiar |= 1;
284bb3c7a0SPaul Mackerras 	}
294bb3c7a0SPaul Mackerras 	vcpu->arch.tfiar = tfiar;
304bb3c7a0SPaul Mackerras 	/* Preserve ROT and TL fields of existing TEXASR */
314bb3c7a0SPaul Mackerras 	vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
324bb3c7a0SPaul Mackerras }
334bb3c7a0SPaul Mackerras 
344bb3c7a0SPaul Mackerras /*
354bb3c7a0SPaul Mackerras  * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
364bb3c7a0SPaul Mackerras  * We expect to find a TM-related instruction to be emulated.  The
374bb3c7a0SPaul Mackerras  * instruction image is in vcpu->arch.emul_inst.  If the guest was in
384bb3c7a0SPaul Mackerras  * TM suspended or transactional state, the checkpointed state has been
394bb3c7a0SPaul Mackerras  * reclaimed and is in the vcpu struct.  The CPU is in virtual mode in
404bb3c7a0SPaul Mackerras  * host context.
414bb3c7a0SPaul Mackerras  */
kvmhv_p9_tm_emulation(struct kvm_vcpu * vcpu)424bb3c7a0SPaul Mackerras int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
434bb3c7a0SPaul Mackerras {
444bb3c7a0SPaul Mackerras 	u32 instr = vcpu->arch.emul_inst;
454bb3c7a0SPaul Mackerras 	u64 msr = vcpu->arch.shregs.msr;
464bb3c7a0SPaul Mackerras 	u64 newmsr, bescr;
474bb3c7a0SPaul Mackerras 	int ra, rs;
484bb3c7a0SPaul Mackerras 
491dff3064SGustavo Romero 	/*
504782e0cdSNicholas Piggin 	 * The TM softpatch interrupt sets NIP to the instruction following
514782e0cdSNicholas Piggin 	 * the faulting instruction, which is not executed. Rewind nip to the
524782e0cdSNicholas Piggin 	 * faulting instruction so it looks like a normal synchronous
534782e0cdSNicholas Piggin 	 * interrupt, then update nip in the places where the instruction is
544782e0cdSNicholas Piggin 	 * emulated.
554782e0cdSNicholas Piggin 	 */
564782e0cdSNicholas Piggin 	vcpu->arch.regs.nip -= 4;
574782e0cdSNicholas Piggin 
584782e0cdSNicholas Piggin 	/*
591dff3064SGustavo Romero 	 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
601dff3064SGustavo Romero 	 * in these instructions, so masking bit 31 out doesn't change these
611dff3064SGustavo Romero 	 * instructions. For treclaim., tsr., and trechkpt. instructions if bit
621dff3064SGustavo Romero 	 * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
631dff3064SGustavo Romero 	 * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
641dff3064SGustavo Romero 	 * 31 is an acceptable way to handle these invalid forms that have
651dff3064SGustavo Romero 	 * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
661dff3064SGustavo Romero 	 * bit 31 set) can generate a softpatch interrupt. Hence both forms
671dff3064SGustavo Romero 	 * are handled below for these instructions so they behave the same way.
681dff3064SGustavo Romero 	 */
691dff3064SGustavo Romero 	switch (instr & PO_XOP_OPCODE_MASK) {
704bb3c7a0SPaul Mackerras 	case PPC_INST_RFID:
714bb3c7a0SPaul Mackerras 		/* XXX do we need to check for PR=0 here? */
724bb3c7a0SPaul Mackerras 		newmsr = vcpu->arch.shregs.srr1;
734bb3c7a0SPaul Mackerras 		/* should only get here for Sx -> T1 transition */
744bb3c7a0SPaul Mackerras 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
754bb3c7a0SPaul Mackerras 			       MSR_TM_TRANSACTIONAL(newmsr) &&
764bb3c7a0SPaul Mackerras 			       (newmsr & MSR_TM)));
774bb3c7a0SPaul Mackerras 		newmsr = sanitize_msr(newmsr);
784bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr = newmsr;
794782e0cdSNicholas Piggin 		vcpu->arch.cfar = vcpu->arch.regs.nip;
80173c520aSSimon Guo 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
814bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
824bb3c7a0SPaul Mackerras 
834bb3c7a0SPaul Mackerras 	case PPC_INST_RFEBB:
844bb3c7a0SPaul Mackerras 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
854bb3c7a0SPaul Mackerras 			/* generate an illegal instruction interrupt */
864bb3c7a0SPaul Mackerras 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
874bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
884bb3c7a0SPaul Mackerras 		}
894bb3c7a0SPaul Mackerras 		/* check EBB facility is available */
904bb3c7a0SPaul Mackerras 		if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
91*d82b392dSNicholas Piggin 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
92*d82b392dSNicholas Piggin 			vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
93*d82b392dSNicholas Piggin 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
94*d82b392dSNicholas Piggin 			return -1; /* rerun host interrupt handler */
954bb3c7a0SPaul Mackerras 		}
964bb3c7a0SPaul Mackerras 		if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
974bb3c7a0SPaul Mackerras 			/* generate a facility unavailable interrupt */
98*d82b392dSNicholas Piggin 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
99*d82b392dSNicholas Piggin 			vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
1004bb3c7a0SPaul Mackerras 			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
1014bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
1024bb3c7a0SPaul Mackerras 		}
1034bb3c7a0SPaul Mackerras 		bescr = vcpu->arch.bescr;
1044bb3c7a0SPaul Mackerras 		/* expect to see a S->T transition requested */
1054bb3c7a0SPaul Mackerras 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
1064bb3c7a0SPaul Mackerras 			       ((bescr >> 30) & 3) == 2));
1074bb3c7a0SPaul Mackerras 		bescr &= ~BESCR_GE;
1084bb3c7a0SPaul Mackerras 		if (instr & (1 << 11))
1094bb3c7a0SPaul Mackerras 			bescr |= BESCR_GE;
1104bb3c7a0SPaul Mackerras 		vcpu->arch.bescr = bescr;
1114bb3c7a0SPaul Mackerras 		msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
1124bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr = msr;
1134782e0cdSNicholas Piggin 		vcpu->arch.cfar = vcpu->arch.regs.nip;
114173c520aSSimon Guo 		vcpu->arch.regs.nip = vcpu->arch.ebbrr;
1154bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
1164bb3c7a0SPaul Mackerras 
1174bb3c7a0SPaul Mackerras 	case PPC_INST_MTMSRD:
1184bb3c7a0SPaul Mackerras 		/* XXX do we need to check for PR=0 here? */
1194bb3c7a0SPaul Mackerras 		rs = (instr >> 21) & 0x1f;
1204bb3c7a0SPaul Mackerras 		newmsr = kvmppc_get_gpr(vcpu, rs);
1214bb3c7a0SPaul Mackerras 		/* check this is a Sx -> T1 transition */
1224bb3c7a0SPaul Mackerras 		WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
1234bb3c7a0SPaul Mackerras 			       MSR_TM_TRANSACTIONAL(newmsr) &&
1244bb3c7a0SPaul Mackerras 			       (newmsr & MSR_TM)));
1254bb3c7a0SPaul Mackerras 		/* mtmsrd doesn't change LE */
1264bb3c7a0SPaul Mackerras 		newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
1274bb3c7a0SPaul Mackerras 		newmsr = sanitize_msr(newmsr);
1284bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr = newmsr;
1294782e0cdSNicholas Piggin 		vcpu->arch.regs.nip += 4;
1304bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
1314bb3c7a0SPaul Mackerras 
1321dff3064SGustavo Romero 	/* ignore bit 31, see comment above */
1331dff3064SGustavo Romero 	case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
1344bb3c7a0SPaul Mackerras 		/* check for PR=1 and arch 2.06 bit set in PCR */
1354bb3c7a0SPaul Mackerras 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
1364bb3c7a0SPaul Mackerras 			/* generate an illegal instruction interrupt */
1374bb3c7a0SPaul Mackerras 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1384bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
1394bb3c7a0SPaul Mackerras 		}
1404bb3c7a0SPaul Mackerras 		/* check for TM disabled in the HFSCR or MSR */
1414bb3c7a0SPaul Mackerras 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
142*d82b392dSNicholas Piggin 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
143*d82b392dSNicholas Piggin 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
144*d82b392dSNicholas Piggin 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
145*d82b392dSNicholas Piggin 			return -1; /* rerun host interrupt handler */
1464bb3c7a0SPaul Mackerras 		}
1474bb3c7a0SPaul Mackerras 		if (!(msr & MSR_TM)) {
1484bb3c7a0SPaul Mackerras 			/* generate a facility unavailable interrupt */
149*d82b392dSNicholas Piggin 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
150*d82b392dSNicholas Piggin 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
1514bb3c7a0SPaul Mackerras 			kvmppc_book3s_queue_irqprio(vcpu,
1524bb3c7a0SPaul Mackerras 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
1534bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
1544bb3c7a0SPaul Mackerras 		}
1554bb3c7a0SPaul Mackerras 		/* Set CR0 to indicate previous transactional state */
156fd0944baSPaul Mackerras 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1573fefd1cdSMichael Neuling 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1584bb3c7a0SPaul Mackerras 		/* L=1 => tresume, L=0 => tsuspend */
1594bb3c7a0SPaul Mackerras 		if (instr & (1 << 21)) {
1604bb3c7a0SPaul Mackerras 			if (MSR_TM_SUSPENDED(msr))
1614bb3c7a0SPaul Mackerras 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
1624bb3c7a0SPaul Mackerras 		} else {
1634bb3c7a0SPaul Mackerras 			if (MSR_TM_TRANSACTIONAL(msr))
1644bb3c7a0SPaul Mackerras 				msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
1654bb3c7a0SPaul Mackerras 		}
1664bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr = msr;
1674782e0cdSNicholas Piggin 		vcpu->arch.regs.nip += 4;
1684bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
1694bb3c7a0SPaul Mackerras 
1701dff3064SGustavo Romero 	/* ignore bit 31, see comment above */
1711dff3064SGustavo Romero 	case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
1724bb3c7a0SPaul Mackerras 		/* check for TM disabled in the HFSCR or MSR */
1734bb3c7a0SPaul Mackerras 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
174*d82b392dSNicholas Piggin 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
175*d82b392dSNicholas Piggin 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
176*d82b392dSNicholas Piggin 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
177*d82b392dSNicholas Piggin 			return -1; /* rerun host interrupt handler */
1784bb3c7a0SPaul Mackerras 		}
1794bb3c7a0SPaul Mackerras 		if (!(msr & MSR_TM)) {
1804bb3c7a0SPaul Mackerras 			/* generate a facility unavailable interrupt */
181*d82b392dSNicholas Piggin 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
182*d82b392dSNicholas Piggin 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
1834bb3c7a0SPaul Mackerras 			kvmppc_book3s_queue_irqprio(vcpu,
1844bb3c7a0SPaul Mackerras 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
1854bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
1864bb3c7a0SPaul Mackerras 		}
1874bb3c7a0SPaul Mackerras 		/* If no transaction active, generate TM bad thing */
1884bb3c7a0SPaul Mackerras 		if (!MSR_TM_ACTIVE(msr)) {
1894bb3c7a0SPaul Mackerras 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
1904bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
1914bb3c7a0SPaul Mackerras 		}
1924bb3c7a0SPaul Mackerras 		/* If failure was not previously recorded, recompute TEXASR */
1934bb3c7a0SPaul Mackerras 		if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
1944bb3c7a0SPaul Mackerras 			ra = (instr >> 16) & 0x1f;
1954bb3c7a0SPaul Mackerras 			if (ra)
1964bb3c7a0SPaul Mackerras 				ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
1974bb3c7a0SPaul Mackerras 			emulate_tx_failure(vcpu, ra);
1984bb3c7a0SPaul Mackerras 		}
1994bb3c7a0SPaul Mackerras 
2004bb3c7a0SPaul Mackerras 		copy_from_checkpoint(vcpu);
2014bb3c7a0SPaul Mackerras 
2024bb3c7a0SPaul Mackerras 		/* Set CR0 to indicate previous transactional state */
203fd0944baSPaul Mackerras 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
2043fefd1cdSMichael Neuling 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
2054bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
2064782e0cdSNicholas Piggin 		vcpu->arch.regs.nip += 4;
2074bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
2084bb3c7a0SPaul Mackerras 
2091dff3064SGustavo Romero 	/* ignore bit 31, see comment above */
2101dff3064SGustavo Romero 	case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
2114bb3c7a0SPaul Mackerras 		/* XXX do we need to check for PR=0 here? */
2124bb3c7a0SPaul Mackerras 		/* check for TM disabled in the HFSCR or MSR */
2134bb3c7a0SPaul Mackerras 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
214*d82b392dSNicholas Piggin 			vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
215*d82b392dSNicholas Piggin 			vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
216*d82b392dSNicholas Piggin 			vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
217*d82b392dSNicholas Piggin 			return -1; /* rerun host interrupt handler */
2184bb3c7a0SPaul Mackerras 		}
2194bb3c7a0SPaul Mackerras 		if (!(msr & MSR_TM)) {
2204bb3c7a0SPaul Mackerras 			/* generate a facility unavailable interrupt */
221*d82b392dSNicholas Piggin 			vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
222*d82b392dSNicholas Piggin 			vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
2234bb3c7a0SPaul Mackerras 			kvmppc_book3s_queue_irqprio(vcpu,
2244bb3c7a0SPaul Mackerras 						BOOK3S_INTERRUPT_FAC_UNAVAIL);
2254bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
2264bb3c7a0SPaul Mackerras 		}
2274bb3c7a0SPaul Mackerras 		/* If transaction active or TEXASR[FS] = 0, bad thing */
2284bb3c7a0SPaul Mackerras 		if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
2294bb3c7a0SPaul Mackerras 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
2304bb3c7a0SPaul Mackerras 			return RESUME_GUEST;
2314bb3c7a0SPaul Mackerras 		}
2324bb3c7a0SPaul Mackerras 
2334bb3c7a0SPaul Mackerras 		copy_to_checkpoint(vcpu);
2344bb3c7a0SPaul Mackerras 
2354bb3c7a0SPaul Mackerras 		/* Set CR0 to indicate previous transactional state */
236fd0944baSPaul Mackerras 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
2373fefd1cdSMichael Neuling 			(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
2384bb3c7a0SPaul Mackerras 		vcpu->arch.shregs.msr = msr | MSR_TS_S;
2394782e0cdSNicholas Piggin 		vcpu->arch.regs.nip += 4;
2404bb3c7a0SPaul Mackerras 		return RESUME_GUEST;
2414bb3c7a0SPaul Mackerras 	}
2424bb3c7a0SPaul Mackerras 
2434bb3c7a0SPaul Mackerras 	/* What should we do here? We didn't recognize the instruction */
2441dff3064SGustavo Romero 	kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
2451dff3064SGustavo Romero 	pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
2461dff3064SGustavo Romero 
2474bb3c7a0SPaul Mackerras 	return RESUME_GUEST;
2484bb3c7a0SPaul Mackerras }
249