1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2aa04b4ccSPaul Mackerras /*
3aa04b4ccSPaul Mackerras  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4aa04b4ccSPaul Mackerras  */
5aa04b4ccSPaul Mackerras 
6441c19c8SMichael Ellerman #include <linux/cpu.h>
7aa04b4ccSPaul Mackerras #include <linux/kvm_host.h>
8aa04b4ccSPaul Mackerras #include <linux/preempt.h>
966b15db6SPaul Gortmaker #include <linux/export.h>
10aa04b4ccSPaul Mackerras #include <linux/sched.h>
11aa04b4ccSPaul Mackerras #include <linux/spinlock.h>
12aa04b4ccSPaul Mackerras #include <linux/init.h>
13fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h>
14fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h>
15fc95ca72SJoonsoo Kim #include <linux/cma.h>
1690fd09f8SSam Bobroff #include <linux/bitops.h>
17aa04b4ccSPaul Mackerras 
18aa04b4ccSPaul Mackerras #include <asm/cputable.h>
193a96570fSNicholas Piggin #include <asm/interrupt.h>
20aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h>
21aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h>
227ef3d06fSJason A. Donenfeld #include <asm/machdep.h>
23eddb60fbSPaul Mackerras #include <asm/xics.h>
24243e2511SBenjamin Herrenschmidt #include <asm/xive.h>
2566feed61SPaul Mackerras #include <asm/dbell.h>
2666feed61SPaul Mackerras #include <asm/cputhreads.h>
2737f55d30SSuresh Warrier #include <asm/io.h>
28f725758bSPaul Mackerras #include <asm/opal.h>
29e2702871SPaul Mackerras #include <asm/smp.h>
30aa04b4ccSPaul Mackerras 
31fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER	18
32fc95ca72SJoonsoo Kim 
335af50993SBenjamin Herrenschmidt #include "book3s_xics.h"
345af50993SBenjamin Herrenschmidt #include "book3s_xive.h"
35*267980eaSJordan Niethe #include "book3s_hv.h"
365af50993SBenjamin Herrenschmidt 
375af50993SBenjamin Herrenschmidt /*
38fa61a4e3SAneesh Kumar K.V  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
39fa61a4e3SAneesh Kumar K.V  * should be power of 2.
40fa61a4e3SAneesh Kumar K.V  */
41fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
42fa61a4e3SAneesh Kumar K.V /*
43fa61a4e3SAneesh Kumar K.V  * By default we reserve 5% of memory for hash pagetable allocation.
44fa61a4e3SAneesh Kumar K.V  */
45fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5;
46aa04b4ccSPaul Mackerras 
47fc95ca72SJoonsoo Kim static struct cma *kvm_cma;
48fc95ca72SJoonsoo Kim 
early_parse_kvm_cma_resv(char * p)49fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p)
50d2a1b483SAlexander Graf {
51fa61a4e3SAneesh Kumar K.V 	pr_debug("%s(%s)\n", __func__, p);
52d2a1b483SAlexander Graf 	if (!p)
53fa61a4e3SAneesh Kumar K.V 		return -EINVAL;
54fa61a4e3SAneesh Kumar K.V 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
55d2a1b483SAlexander Graf }
56fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
57d2a1b483SAlexander Graf 
kvm_alloc_hpt_cma(unsigned long nr_pages)58db9a290dSDavid Gibson struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
59d2a1b483SAlexander Graf {
60c04fa583SAlexey Kardashevskiy 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
61fc95ca72SJoonsoo Kim 
62e2f466e3SLucas Stach 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
6365182029SMarek Szyprowski 			 false);
64d2a1b483SAlexander Graf }
65db9a290dSDavid Gibson EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
66d2a1b483SAlexander Graf 
kvm_free_hpt_cma(struct page * page,unsigned long nr_pages)67db9a290dSDavid Gibson void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
68d2a1b483SAlexander Graf {
69fc95ca72SJoonsoo Kim 	cma_release(kvm_cma, page, nr_pages);
70d2a1b483SAlexander Graf }
71db9a290dSDavid Gibson EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
72d2a1b483SAlexander Graf 
73fa61a4e3SAneesh Kumar K.V /**
74fa61a4e3SAneesh Kumar K.V  * kvm_cma_reserve() - reserve area for kvm hash pagetable
75fa61a4e3SAneesh Kumar K.V  *
76fa61a4e3SAneesh Kumar K.V  * This function reserves memory from early allocator. It should be
7714ed7409SAnton Blanchard  * called by arch specific code once the memblock allocator
78fa61a4e3SAneesh Kumar K.V  * has been activated and all other subsystems have already allocated/reserved
79fa61a4e3SAneesh Kumar K.V  * memory.
80fa61a4e3SAneesh Kumar K.V  */
kvm_cma_reserve(void)81fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void)
82fa61a4e3SAneesh Kumar K.V {
83fa61a4e3SAneesh Kumar K.V 	unsigned long align_size;
8404ba0a92SMike Rapoport 	phys_addr_t selected_size;
85cec26bc3SAneesh Kumar K.V 
86cec26bc3SAneesh Kumar K.V 	/*
87cec26bc3SAneesh Kumar K.V 	 * We need CMA reservation only when we are in HV mode
88cec26bc3SAneesh Kumar K.V 	 */
89cec26bc3SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_HVMODE))
90cec26bc3SAneesh Kumar K.V 		return;
91fa61a4e3SAneesh Kumar K.V 
9204ba0a92SMike Rapoport 	selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
93fa61a4e3SAneesh Kumar K.V 	if (selected_size) {
94a5a8b258SAneesh Kumar K.V 		pr_info("%s: reserving %ld MiB for global area\n", __func__,
95fa61a4e3SAneesh Kumar K.V 			 (unsigned long)selected_size / SZ_1M);
96fa61a4e3SAneesh Kumar K.V 		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
97c1f733aaSJoonsoo Kim 		cma_declare_contiguous(0, selected_size, 0, align_size,
98f318dd08SLaura Abbott 			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
99f318dd08SLaura Abbott 			&kvm_cma);
100fa61a4e3SAneesh Kumar K.V 	}
101fa61a4e3SAneesh Kumar K.V }
102441c19c8SMichael Ellerman 
103441c19c8SMichael Ellerman /*
10490fd09f8SSam Bobroff  * Real-mode H_CONFER implementation.
10590fd09f8SSam Bobroff  * We check if we are the only vcpu out of this virtual core
10690fd09f8SSam Bobroff  * still running in the guest and not ceded.  If so, we pop up
10790fd09f8SSam Bobroff  * to the virtual-mode implementation; if not, just return to
10890fd09f8SSam Bobroff  * the guest.
10990fd09f8SSam Bobroff  */
kvmppc_rm_h_confer(struct kvm_vcpu * vcpu,int target,unsigned int yield_count)11090fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
11190fd09f8SSam Bobroff 			    unsigned int yield_count)
11290fd09f8SSam Bobroff {
113ec257165SPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
114ec257165SPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
11590fd09f8SSam Bobroff 	int threads_running;
11690fd09f8SSam Bobroff 	int threads_ceded;
11790fd09f8SSam Bobroff 	int threads_conferring;
11890fd09f8SSam Bobroff 	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
11990fd09f8SSam Bobroff 	int rv = H_SUCCESS; /* => don't yield */
12090fd09f8SSam Bobroff 
121ec257165SPaul Mackerras 	set_bit(ptid, &vc->conferring_threads);
1227d6c40daSPaul Mackerras 	while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
1237d6c40daSPaul Mackerras 		threads_running = VCORE_ENTRY_MAP(vc);
1247d6c40daSPaul Mackerras 		threads_ceded = vc->napping_threads;
1257d6c40daSPaul Mackerras 		threads_conferring = vc->conferring_threads;
1267d6c40daSPaul Mackerras 		if ((threads_ceded | threads_conferring) == threads_running) {
12790fd09f8SSam Bobroff 			rv = H_TOO_HARD; /* => do yield */
12890fd09f8SSam Bobroff 			break;
12990fd09f8SSam Bobroff 		}
13090fd09f8SSam Bobroff 	}
131ec257165SPaul Mackerras 	clear_bit(ptid, &vc->conferring_threads);
13290fd09f8SSam Bobroff 	return rv;
13390fd09f8SSam Bobroff }
13490fd09f8SSam Bobroff 
13590fd09f8SSam Bobroff /*
136441c19c8SMichael Ellerman  * When running HV mode KVM we need to block certain operations while KVM VMs
137441c19c8SMichael Ellerman  * exist in the system. We use a counter of VMs to track this.
138441c19c8SMichael Ellerman  *
139441c19c8SMichael Ellerman  * One of the operations we need to block is onlining of secondaries, so we
1405ae36401SSebastian Andrzej Siewior  * protect hv_vm_count with cpus_read_lock/unlock().
141441c19c8SMichael Ellerman  */
142441c19c8SMichael Ellerman static atomic_t hv_vm_count;
143441c19c8SMichael Ellerman 
kvm_hv_vm_activated(void)144441c19c8SMichael Ellerman void kvm_hv_vm_activated(void)
145441c19c8SMichael Ellerman {
1465ae36401SSebastian Andrzej Siewior 	cpus_read_lock();
147441c19c8SMichael Ellerman 	atomic_inc(&hv_vm_count);
1485ae36401SSebastian Andrzej Siewior 	cpus_read_unlock();
149441c19c8SMichael Ellerman }
150441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
151441c19c8SMichael Ellerman 
kvm_hv_vm_deactivated(void)152441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void)
153441c19c8SMichael Ellerman {
1545ae36401SSebastian Andrzej Siewior 	cpus_read_lock();
155441c19c8SMichael Ellerman 	atomic_dec(&hv_vm_count);
1565ae36401SSebastian Andrzej Siewior 	cpus_read_unlock();
157441c19c8SMichael Ellerman }
158441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
159441c19c8SMichael Ellerman 
kvm_hv_mode_active(void)160441c19c8SMichael Ellerman bool kvm_hv_mode_active(void)
161441c19c8SMichael Ellerman {
162441c19c8SMichael Ellerman 	return atomic_read(&hv_vm_count) != 0;
163441c19c8SMichael Ellerman }
164ae2113a4SPaul Mackerras 
165ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[];
166ae2113a4SPaul Mackerras 
kvmppc_hcall_impl_hv_realmode(unsigned long cmd)167ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
168ae2113a4SPaul Mackerras {
169ae2113a4SPaul Mackerras 	cmd /= 4;
170ae2113a4SPaul Mackerras 	if (cmd < hcall_real_table_end - hcall_real_table &&
171ae2113a4SPaul Mackerras 	    hcall_real_table[cmd])
172ae2113a4SPaul Mackerras 		return 1;
173ae2113a4SPaul Mackerras 
174ae2113a4SPaul Mackerras 	return 0;
175ae2113a4SPaul Mackerras }
176ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
177e928e9cbSMichael Ellerman 
kvmppc_hwrng_present(void)178e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void)
179e928e9cbSMichael Ellerman {
1807ef3d06fSJason A. Donenfeld 	return ppc_md.get_random_seed != NULL;
181e928e9cbSMichael Ellerman }
182e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
183e928e9cbSMichael Ellerman 
kvmppc_rm_h_random(struct kvm_vcpu * vcpu)184dcbac73aSNicholas Piggin long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
185e928e9cbSMichael Ellerman {
1867ef3d06fSJason A. Donenfeld 	if (ppc_md.get_random_seed &&
1877ef3d06fSJason A. Donenfeld 	    ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
188e928e9cbSMichael Ellerman 		return H_SUCCESS;
189e928e9cbSMichael Ellerman 
190e928e9cbSMichael Ellerman 	return H_HARDWARE;
191e928e9cbSMichael Ellerman }
192eddb60fbSPaul Mackerras 
193eddb60fbSPaul Mackerras /*
19466feed61SPaul Mackerras  * Send an interrupt or message to another CPU.
195eddb60fbSPaul Mackerras  * The caller needs to include any barrier needed to order writes
196eddb60fbSPaul Mackerras  * to memory vs. the IPI/message.
197eddb60fbSPaul Mackerras  */
kvmhv_rm_send_ipi(int cpu)198eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu)
199eddb60fbSPaul Mackerras {
200d381d7caSBenjamin Herrenschmidt 	void __iomem *xics_phys;
2011704a81cSPaul Mackerras 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
202eddb60fbSPaul Mackerras 
2031704a81cSPaul Mackerras 	/* On POWER9 we can use msgsnd for any destination cpu. */
2041704a81cSPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2051704a81cSPaul Mackerras 		msg |= get_hard_smp_processor_id(cpu);
2061704a81cSPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
2071704a81cSPaul Mackerras 		return;
2081704a81cSPaul Mackerras 	}
2095af50993SBenjamin Herrenschmidt 
2101704a81cSPaul Mackerras 	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
21166feed61SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
21266feed61SPaul Mackerras 	    cpu_first_thread_sibling(cpu) ==
21366feed61SPaul Mackerras 	    cpu_first_thread_sibling(raw_smp_processor_id())) {
21466feed61SPaul Mackerras 		msg |= cpu_thread_in_core(cpu);
21566feed61SPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
21666feed61SPaul Mackerras 		return;
21766feed61SPaul Mackerras 	}
21866feed61SPaul Mackerras 
219243e2511SBenjamin Herrenschmidt 	/* We should never reach this */
22003f95332SPaul Mackerras 	if (WARN_ON_ONCE(xics_on_xive()))
221243e2511SBenjamin Herrenschmidt 	    return;
222243e2511SBenjamin Herrenschmidt 
22366feed61SPaul Mackerras 	/* Else poke the target with an IPI */
224d2e60075SNicholas Piggin 	xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
225ab9bad0eSBenjamin Herrenschmidt 	if (xics_phys)
226d381d7caSBenjamin Herrenschmidt 		__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
227f725758bSPaul Mackerras 	else
228ab9bad0eSBenjamin Herrenschmidt 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
229eddb60fbSPaul Mackerras }
230eddb60fbSPaul Mackerras 
231eddb60fbSPaul Mackerras /*
232eddb60fbSPaul Mackerras  * The following functions are called from the assembly code
233eddb60fbSPaul Mackerras  * in book3s_hv_rmhandlers.S.
234eddb60fbSPaul Mackerras  */
kvmhv_interrupt_vcore(struct kvmppc_vcore * vc,int active)235eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
236eddb60fbSPaul Mackerras {
237eddb60fbSPaul Mackerras 	int cpu = vc->pcpu;
238eddb60fbSPaul Mackerras 
239eddb60fbSPaul Mackerras 	/* Order setting of exit map vs. msgsnd/IPI */
240eddb60fbSPaul Mackerras 	smp_mb();
241eddb60fbSPaul Mackerras 	for (; active; active >>= 1, ++cpu)
242eddb60fbSPaul Mackerras 		if (active & 1)
243eddb60fbSPaul Mackerras 			kvmhv_rm_send_ipi(cpu);
244eddb60fbSPaul Mackerras }
245eddb60fbSPaul Mackerras 
kvmhv_commence_exit(int trap)246eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap)
247eddb60fbSPaul Mackerras {
248eddb60fbSPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
249eddb60fbSPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
250b4deba5cSPaul Mackerras 	struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
251b1b1697aSNicholas Piggin 	int me, ee, i;
252eddb60fbSPaul Mackerras 
253eddb60fbSPaul Mackerras 	/* Set our bit in the threads-exiting-guest map in the 0xff00
254eddb60fbSPaul Mackerras 	   bits of vcore->entry_exit_map */
255eddb60fbSPaul Mackerras 	me = 0x100 << ptid;
256eddb60fbSPaul Mackerras 	do {
257eddb60fbSPaul Mackerras 		ee = vc->entry_exit_map;
258eddb60fbSPaul Mackerras 	} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
259eddb60fbSPaul Mackerras 
260eddb60fbSPaul Mackerras 	/* Are we the first here? */
261eddb60fbSPaul Mackerras 	if ((ee >> 8) != 0)
262eddb60fbSPaul Mackerras 		return;
263eddb60fbSPaul Mackerras 
264eddb60fbSPaul Mackerras 	/*
265eddb60fbSPaul Mackerras 	 * Trigger the other threads in this vcore to exit the guest.
266eddb60fbSPaul Mackerras 	 * If this is a hypervisor decrementer interrupt then they
267eddb60fbSPaul Mackerras 	 * will be already on their way out of the guest.
268eddb60fbSPaul Mackerras 	 */
269eddb60fbSPaul Mackerras 	if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
270eddb60fbSPaul Mackerras 		kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
271b4deba5cSPaul Mackerras 
272b4deba5cSPaul Mackerras 	/*
273b4deba5cSPaul Mackerras 	 * If we are doing dynamic micro-threading, interrupt the other
274b4deba5cSPaul Mackerras 	 * subcores to pull them out of their guests too.
275b4deba5cSPaul Mackerras 	 */
276b4deba5cSPaul Mackerras 	if (!sip)
277b4deba5cSPaul Mackerras 		return;
278b4deba5cSPaul Mackerras 
279b4deba5cSPaul Mackerras 	for (i = 0; i < MAX_SUBCORES; ++i) {
280898b25b2SPaul Mackerras 		vc = sip->vc[i];
281b4deba5cSPaul Mackerras 		if (!vc)
282b4deba5cSPaul Mackerras 			break;
283b4deba5cSPaul Mackerras 		do {
284b4deba5cSPaul Mackerras 			ee = vc->entry_exit_map;
285b4deba5cSPaul Mackerras 			/* Already asked to exit? */
286b4deba5cSPaul Mackerras 			if ((ee >> 8) != 0)
287b4deba5cSPaul Mackerras 				break;
288b4deba5cSPaul Mackerras 		} while (cmpxchg(&vc->entry_exit_map, ee,
289b4deba5cSPaul Mackerras 				 ee | VCORE_EXIT_REQ) != ee);
290b4deba5cSPaul Mackerras 		if ((ee >> 8) == 0)
291b4deba5cSPaul Mackerras 			kvmhv_interrupt_vcore(vc, ee);
292b4deba5cSPaul Mackerras 	}
293eddb60fbSPaul Mackerras }
29479b6c247SSuresh Warrier 
29579b6c247SSuresh Warrier struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
29679b6c247SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
29737f55d30SSuresh Warrier 
298e3c13e56SSuresh Warrier #ifdef CONFIG_KVM_XICS
get_irqmap(struct kvmppc_passthru_irqmap * pimap,u32 xisr)299e3c13e56SSuresh Warrier static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
300e3c13e56SSuresh Warrier 					 u32 xisr)
301e3c13e56SSuresh Warrier {
302e3c13e56SSuresh Warrier 	int i;
303e3c13e56SSuresh Warrier 
304e3c13e56SSuresh Warrier 	/*
305e3c13e56SSuresh Warrier 	 * We access the mapped array here without a lock.  That
306e3c13e56SSuresh Warrier 	 * is safe because we never reduce the number of entries
307e3c13e56SSuresh Warrier 	 * in the array and we never change the v_hwirq field of
308e3c13e56SSuresh Warrier 	 * an entry once it is set.
309e3c13e56SSuresh Warrier 	 *
310e3c13e56SSuresh Warrier 	 * We have also carefully ordered the stores in the writer
311e3c13e56SSuresh Warrier 	 * and the loads here in the reader, so that if we find a matching
312e3c13e56SSuresh Warrier 	 * hwirq here, the associated GSI and irq_desc fields are valid.
313e3c13e56SSuresh Warrier 	 */
314e3c13e56SSuresh Warrier 	for (i = 0; i < pimap->n_mapped; i++)  {
315e3c13e56SSuresh Warrier 		if (xisr == pimap->mapped[i].r_hwirq) {
316e3c13e56SSuresh Warrier 			/*
317e3c13e56SSuresh Warrier 			 * Order subsequent reads in the caller to serialize
318e3c13e56SSuresh Warrier 			 * with the writer.
319e3c13e56SSuresh Warrier 			 */
320e3c13e56SSuresh Warrier 			smp_rmb();
321e3c13e56SSuresh Warrier 			return &pimap->mapped[i];
322e3c13e56SSuresh Warrier 		}
323e3c13e56SSuresh Warrier 	}
324e3c13e56SSuresh Warrier 	return NULL;
325e3c13e56SSuresh Warrier }
326e3c13e56SSuresh Warrier 
327e3c13e56SSuresh Warrier /*
328e3c13e56SSuresh Warrier  * If we have an interrupt that's not an IPI, check if we have a
329e3c13e56SSuresh Warrier  * passthrough adapter and if so, check if this external interrupt
330e3c13e56SSuresh Warrier  * is for the adapter.
331e3c13e56SSuresh Warrier  * We will attempt to deliver the IRQ directly to the target VCPU's
332e3c13e56SSuresh Warrier  * ICP, the virtual ICP (based on affinity - the xive value in ICS).
333e3c13e56SSuresh Warrier  *
334e3c13e56SSuresh Warrier  * If the delivery fails or if this is not for a passthrough adapter,
335e3c13e56SSuresh Warrier  * return to the host to handle this interrupt. We earlier
336e3c13e56SSuresh Warrier  * saved a copy of the XIRR in the PACA, it will be picked up by
337e3c13e56SSuresh Warrier  * the host ICP driver.
338e3c13e56SSuresh Warrier  */
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)339f725758bSPaul Mackerras static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
340e3c13e56SSuresh Warrier {
341e3c13e56SSuresh Warrier 	struct kvmppc_passthru_irqmap *pimap;
342e3c13e56SSuresh Warrier 	struct kvmppc_irq_map *irq_map;
343e3c13e56SSuresh Warrier 	struct kvm_vcpu *vcpu;
344e3c13e56SSuresh Warrier 
345e3c13e56SSuresh Warrier 	vcpu = local_paca->kvm_hstate.kvm_vcpu;
346e3c13e56SSuresh Warrier 	if (!vcpu)
347e3c13e56SSuresh Warrier 		return 1;
348e3c13e56SSuresh Warrier 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
349e3c13e56SSuresh Warrier 	if (!pimap)
350e3c13e56SSuresh Warrier 		return 1;
351e3c13e56SSuresh Warrier 	irq_map = get_irqmap(pimap, xisr);
352e3c13e56SSuresh Warrier 	if (!irq_map)
353e3c13e56SSuresh Warrier 		return 1;
354e3c13e56SSuresh Warrier 
355e3c13e56SSuresh Warrier 	/* We're handling this interrupt, generic code doesn't need to */
356e3c13e56SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = 0;
357e3c13e56SSuresh Warrier 
358f725758bSPaul Mackerras 	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
359e3c13e56SSuresh Warrier }
360e3c13e56SSuresh Warrier 
361e3c13e56SSuresh Warrier #else
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)362e2702871SPaul Mackerras static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
363e3c13e56SSuresh Warrier {
364e3c13e56SSuresh Warrier 	return 1;
365e3c13e56SSuresh Warrier }
366e3c13e56SSuresh Warrier #endif
367e3c13e56SSuresh Warrier 
36837f55d30SSuresh Warrier /*
36937f55d30SSuresh Warrier  * Determine what sort of external interrupt is pending (if any).
37037f55d30SSuresh Warrier  * Returns:
37137f55d30SSuresh Warrier  *	0 if no interrupt is pending
37237f55d30SSuresh Warrier  *	1 if an interrupt is pending that needs to be handled by the host
373f7af5209SSuresh Warrier  *	2 Passthrough that needs completion in the host
37437f55d30SSuresh Warrier  *	-1 if there was a guest wakeup IPI (which has now been cleared)
375e3c13e56SSuresh Warrier  *	-2 if there is PCI passthrough external interrupt that was handled
37637f55d30SSuresh Warrier  */
377f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again);
37837f55d30SSuresh Warrier 
kvmppc_read_intr(void)37937f55d30SSuresh Warrier long kvmppc_read_intr(void)
38037f55d30SSuresh Warrier {
381f725758bSPaul Mackerras 	long ret = 0;
382f725758bSPaul Mackerras 	long rc;
383f725758bSPaul Mackerras 	bool again;
384f725758bSPaul Mackerras 
385243e2511SBenjamin Herrenschmidt 	if (xive_enabled())
386243e2511SBenjamin Herrenschmidt 		return 1;
387243e2511SBenjamin Herrenschmidt 
388f725758bSPaul Mackerras 	do {
389f725758bSPaul Mackerras 		again = false;
390f725758bSPaul Mackerras 		rc = kvmppc_read_one_intr(&again);
391f725758bSPaul Mackerras 		if (rc && (ret == 0 || rc > ret))
392f725758bSPaul Mackerras 			ret = rc;
393f725758bSPaul Mackerras 	} while (again);
394f725758bSPaul Mackerras 	return ret;
395f725758bSPaul Mackerras }
396f725758bSPaul Mackerras 
kvmppc_read_one_intr(bool * again)397f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again)
398f725758bSPaul Mackerras {
399d381d7caSBenjamin Herrenschmidt 	void __iomem *xics_phys;
40037f55d30SSuresh Warrier 	u32 h_xirr;
40137f55d30SSuresh Warrier 	__be32 xirr;
40237f55d30SSuresh Warrier 	u32 xisr;
40337f55d30SSuresh Warrier 	u8 host_ipi;
404f725758bSPaul Mackerras 	int64_t rc;
40537f55d30SSuresh Warrier 
4065af50993SBenjamin Herrenschmidt 	if (xive_enabled())
4075af50993SBenjamin Herrenschmidt 		return 1;
4085af50993SBenjamin Herrenschmidt 
40937f55d30SSuresh Warrier 	/* see if a host IPI is pending */
41086dacd96SRohan McLure 	host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
41137f55d30SSuresh Warrier 	if (host_ipi)
41237f55d30SSuresh Warrier 		return 1;
41337f55d30SSuresh Warrier 
41437f55d30SSuresh Warrier 	/* Now read the interrupt from the ICP */
41537f55d30SSuresh Warrier 	xics_phys = local_paca->kvm_hstate.xics_phys;
41653af3ba2SPaul Mackerras 	rc = 0;
417ab9bad0eSBenjamin Herrenschmidt 	if (!xics_phys)
41853af3ba2SPaul Mackerras 		rc = opal_int_get_xirr(&xirr, false);
41953af3ba2SPaul Mackerras 	else
420d381d7caSBenjamin Herrenschmidt 		xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
421f725758bSPaul Mackerras 	if (rc < 0)
42237f55d30SSuresh Warrier 		return 1;
42337f55d30SSuresh Warrier 
42437f55d30SSuresh Warrier 	/*
42537f55d30SSuresh Warrier 	 * Save XIRR for later. Since we get control in reverse endian
42637f55d30SSuresh Warrier 	 * on LE systems, save it byte reversed and fetch it back in
42737f55d30SSuresh Warrier 	 * host endian. Note that xirr is the value read from the
42837f55d30SSuresh Warrier 	 * XIRR register, while h_xirr is the host endian version.
42937f55d30SSuresh Warrier 	 */
43037f55d30SSuresh Warrier 	h_xirr = be32_to_cpu(xirr);
43137f55d30SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = h_xirr;
43237f55d30SSuresh Warrier 	xisr = h_xirr & 0xffffff;
43337f55d30SSuresh Warrier 	/*
43437f55d30SSuresh Warrier 	 * Ensure that the store/load complete to guarantee all side
43537f55d30SSuresh Warrier 	 * effects of loading from XIRR has completed
43637f55d30SSuresh Warrier 	 */
43737f55d30SSuresh Warrier 	smp_mb();
43837f55d30SSuresh Warrier 
43937f55d30SSuresh Warrier 	/* if nothing pending in the ICP */
44037f55d30SSuresh Warrier 	if (!xisr)
44137f55d30SSuresh Warrier 		return 0;
44237f55d30SSuresh Warrier 
44337f55d30SSuresh Warrier 	/* We found something in the ICP...
44437f55d30SSuresh Warrier 	 *
44537f55d30SSuresh Warrier 	 * If it is an IPI, clear the MFRR and EOI it.
44637f55d30SSuresh Warrier 	 */
44737f55d30SSuresh Warrier 	if (xisr == XICS_IPI) {
44853af3ba2SPaul Mackerras 		rc = 0;
4492ce008c8SNicholas Piggin 		if (xics_phys) {
450d381d7caSBenjamin Herrenschmidt 			__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
451d381d7caSBenjamin Herrenschmidt 			__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
452f725758bSPaul Mackerras 		} else {
453ab9bad0eSBenjamin Herrenschmidt 			opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
454ab9bad0eSBenjamin Herrenschmidt 			rc = opal_int_eoi(h_xirr);
45553af3ba2SPaul Mackerras 		}
456f725758bSPaul Mackerras 		/* If rc > 0, there is another interrupt pending */
457f725758bSPaul Mackerras 		*again = rc > 0;
458f725758bSPaul Mackerras 
45937f55d30SSuresh Warrier 		/*
46037f55d30SSuresh Warrier 		 * Need to ensure side effects of above stores
46137f55d30SSuresh Warrier 		 * complete before proceeding.
46237f55d30SSuresh Warrier 		 */
46337f55d30SSuresh Warrier 		smp_mb();
46437f55d30SSuresh Warrier 
46537f55d30SSuresh Warrier 		/*
46637f55d30SSuresh Warrier 		 * We need to re-check host IPI now in case it got set in the
46737f55d30SSuresh Warrier 		 * meantime. If it's clear, we bounce the interrupt to the
46837f55d30SSuresh Warrier 		 * guest
46937f55d30SSuresh Warrier 		 */
47086dacd96SRohan McLure 		host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
47137f55d30SSuresh Warrier 		if (unlikely(host_ipi != 0)) {
47237f55d30SSuresh Warrier 			/* We raced with the host,
47337f55d30SSuresh Warrier 			 * we need to resend that IPI, bummer
47437f55d30SSuresh Warrier 			 */
4752ce008c8SNicholas Piggin 			if (xics_phys)
476d381d7caSBenjamin Herrenschmidt 				__raw_rm_writeb(IPI_PRIORITY,
477d381d7caSBenjamin Herrenschmidt 						xics_phys + XICS_MFRR);
478f725758bSPaul Mackerras 			else
479ab9bad0eSBenjamin Herrenschmidt 				opal_int_set_mfrr(hard_smp_processor_id(),
480f725758bSPaul Mackerras 						  IPI_PRIORITY);
48137f55d30SSuresh Warrier 			/* Let side effects complete */
48237f55d30SSuresh Warrier 			smp_mb();
48337f55d30SSuresh Warrier 			return 1;
48437f55d30SSuresh Warrier 		}
48537f55d30SSuresh Warrier 
48637f55d30SSuresh Warrier 		/* OK, it's an IPI for us */
48737f55d30SSuresh Warrier 		local_paca->kvm_hstate.saved_xirr = 0;
48837f55d30SSuresh Warrier 		return -1;
48937f55d30SSuresh Warrier 	}
49037f55d30SSuresh Warrier 
491f725758bSPaul Mackerras 	return kvmppc_check_passthru(xisr, xirr, again);
49237f55d30SSuresh Warrier }
4935af50993SBenjamin Herrenschmidt 
kvmppc_end_cede(struct kvm_vcpu * vcpu)494268f4ef9SNicholas Piggin static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
495268f4ef9SNicholas Piggin {
496268f4ef9SNicholas Piggin 	vcpu->arch.ceded = 0;
497268f4ef9SNicholas Piggin 	if (vcpu->arch.timer_running) {
498268f4ef9SNicholas Piggin 		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
499268f4ef9SNicholas Piggin 		vcpu->arch.timer_running = 0;
500268f4ef9SNicholas Piggin 	}
501268f4ef9SNicholas Piggin }
502268f4ef9SNicholas Piggin 
kvmppc_set_msr_hv(struct kvm_vcpu * vcpu,u64 msr)503268f4ef9SNicholas Piggin void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
504268f4ef9SNicholas Piggin {
505732f21a3SNicholas Piggin 	/* Guest must always run with ME enabled, HV disabled. */
506732f21a3SNicholas Piggin 	msr = (msr | MSR_ME) & ~MSR_HV;
507946cf44aSNicholas Piggin 
508268f4ef9SNicholas Piggin 	/*
509268f4ef9SNicholas Piggin 	 * Check for illegal transactional state bit combination
510268f4ef9SNicholas Piggin 	 * and if we find it, force the TS field to a safe state.
511268f4ef9SNicholas Piggin 	 */
512268f4ef9SNicholas Piggin 	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
513268f4ef9SNicholas Piggin 		msr &= ~MSR_TS_MASK;
514*267980eaSJordan Niethe 	__kvmppc_set_msr_hv(vcpu, msr);
515268f4ef9SNicholas Piggin 	kvmppc_end_cede(vcpu);
516268f4ef9SNicholas Piggin }
517268f4ef9SNicholas Piggin EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
518268f4ef9SNicholas Piggin 
inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)519268f4ef9SNicholas Piggin static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
520268f4ef9SNicholas Piggin {
521268f4ef9SNicholas Piggin 	unsigned long msr, pc, new_msr, new_pc;
522268f4ef9SNicholas Piggin 
523268f4ef9SNicholas Piggin 	msr = kvmppc_get_msr(vcpu);
524268f4ef9SNicholas Piggin 	pc = kvmppc_get_pc(vcpu);
525268f4ef9SNicholas Piggin 	new_msr = vcpu->arch.intr_msr;
526268f4ef9SNicholas Piggin 	new_pc = vec;
527268f4ef9SNicholas Piggin 
528268f4ef9SNicholas Piggin 	/* If transactional, change to suspend mode on IRQ delivery */
529268f4ef9SNicholas Piggin 	if (MSR_TM_TRANSACTIONAL(msr))
530268f4ef9SNicholas Piggin 		new_msr |= MSR_TS_S;
531268f4ef9SNicholas Piggin 	else
532268f4ef9SNicholas Piggin 		new_msr |= msr & MSR_TS_MASK;
533268f4ef9SNicholas Piggin 
5346a13cb0cSNicholas Piggin 	/*
5356a13cb0cSNicholas Piggin 	 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
5366a13cb0cSNicholas Piggin 	 * applicable. AIL=2 is not supported.
5376a13cb0cSNicholas Piggin 	 *
5386a13cb0cSNicholas Piggin 	 * AIL does not apply to SRESET, MCE, or HMI (which is never
5396a13cb0cSNicholas Piggin 	 * delivered to the guest), and does not apply if IR=0 or DR=0.
5406a13cb0cSNicholas Piggin 	 */
5416a13cb0cSNicholas Piggin 	if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
5426a13cb0cSNicholas Piggin 	    vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
5436a13cb0cSNicholas Piggin 	    (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
5446a13cb0cSNicholas Piggin 	    (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
5456a13cb0cSNicholas Piggin 		new_msr |= MSR_IR | MSR_DR;
5466a13cb0cSNicholas Piggin 		new_pc += 0xC000000000004000ULL;
5476a13cb0cSNicholas Piggin 	}
5486a13cb0cSNicholas Piggin 
549268f4ef9SNicholas Piggin 	kvmppc_set_srr0(vcpu, pc);
550268f4ef9SNicholas Piggin 	kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
551268f4ef9SNicholas Piggin 	kvmppc_set_pc(vcpu, new_pc);
552*267980eaSJordan Niethe 	__kvmppc_set_msr_hv(vcpu, new_msr);
553268f4ef9SNicholas Piggin }
554268f4ef9SNicholas Piggin 
kvmppc_inject_interrupt_hv(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)555268f4ef9SNicholas Piggin void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
556268f4ef9SNicholas Piggin {
557268f4ef9SNicholas Piggin 	inject_interrupt(vcpu, vec, srr1_flags);
558268f4ef9SNicholas Piggin 	kvmppc_end_cede(vcpu);
559268f4ef9SNicholas Piggin }
560268f4ef9SNicholas Piggin EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
561268f4ef9SNicholas Piggin 
562f7035ce9SPaul Mackerras /*
563f7035ce9SPaul Mackerras  * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
564f7035ce9SPaul Mackerras  * Can we inject a Decrementer or a External interrupt?
565f7035ce9SPaul Mackerras  */
kvmppc_guest_entry_inject_int(struct kvm_vcpu * vcpu)566f7035ce9SPaul Mackerras void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
567f7035ce9SPaul Mackerras {
568f7035ce9SPaul Mackerras 	int ext;
569f7035ce9SPaul Mackerras 	unsigned long lpcr;
570f7035ce9SPaul Mackerras 
5716398326bSNicholas Piggin 	WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
5726398326bSNicholas Piggin 
573f7035ce9SPaul Mackerras 	/* Insert EXTERNAL bit into LPCR at the MER bit position */
574f7035ce9SPaul Mackerras 	ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
575f7035ce9SPaul Mackerras 	lpcr = mfspr(SPRN_LPCR);
576f7035ce9SPaul Mackerras 	lpcr |= ext << LPCR_MER_SH;
577f7035ce9SPaul Mackerras 	mtspr(SPRN_LPCR, lpcr);
578f7035ce9SPaul Mackerras 	isync();
579f7035ce9SPaul Mackerras 
580f7035ce9SPaul Mackerras 	if (vcpu->arch.shregs.msr & MSR_EE) {
581f7035ce9SPaul Mackerras 		if (ext) {
582268f4ef9SNicholas Piggin 			inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
583f7035ce9SPaul Mackerras 		} else {
584f7035ce9SPaul Mackerras 			long int dec = mfspr(SPRN_DEC);
585f7035ce9SPaul Mackerras 			if (!(lpcr & LPCR_LD))
586f7035ce9SPaul Mackerras 				dec = (int) dec;
587f7035ce9SPaul Mackerras 			if (dec < 0)
588268f4ef9SNicholas Piggin 				inject_interrupt(vcpu,
589268f4ef9SNicholas Piggin 					BOOK3S_INTERRUPT_DECREMENTER, 0);
590f7035ce9SPaul Mackerras 		}
591f7035ce9SPaul Mackerras 	}
592f7035ce9SPaul Mackerras 
593f7035ce9SPaul Mackerras 	if (vcpu->arch.doorbell_request) {
594f7035ce9SPaul Mackerras 		mtspr(SPRN_DPDES, 1);
595f7035ce9SPaul Mackerras 		vcpu->arch.vcore->dpdes = 1;
596f7035ce9SPaul Mackerras 		smp_wmb();
597f7035ce9SPaul Mackerras 		vcpu->arch.doorbell_request = 0;
598f7035ce9SPaul Mackerras 	}
599f7035ce9SPaul Mackerras }
6002940ba0cSPaul Mackerras 
flush_guest_tlb(struct kvm * kvm)60170ea13f6SPaul Mackerras static void flush_guest_tlb(struct kvm *kvm)
6022940ba0cSPaul Mackerras {
6032940ba0cSPaul Mackerras 	unsigned long rb, set;
6042940ba0cSPaul Mackerras 
60570ea13f6SPaul Mackerras 	rb = PPC_BIT(52);	/* IS = 2 */
60670ea13f6SPaul Mackerras 	for (set = 0; set < kvm->arch.tlb_sets; ++set) {
60770ea13f6SPaul Mackerras 		/* R=0 PRS=0 RIC=0 */
60870ea13f6SPaul Mackerras 		asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
60970ea13f6SPaul Mackerras 			     : : "r" (rb), "i" (0), "i" (0), "i" (0),
61070ea13f6SPaul Mackerras 			       "r" (0) : "memory");
61170ea13f6SPaul Mackerras 		rb += PPC_BIT(51);	/* increment set number */
61270ea13f6SPaul Mackerras 	}
61370ea13f6SPaul Mackerras 	asm volatile("ptesync": : :"memory");
61470ea13f6SPaul Mackerras }
61570ea13f6SPaul Mackerras 
kvmppc_check_need_tlb_flush(struct kvm * kvm,int pcpu)6160ba0e5d5SNicholas Piggin void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
61770ea13f6SPaul Mackerras {
6180ba0e5d5SNicholas Piggin 	if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
61970ea13f6SPaul Mackerras 		flush_guest_tlb(kvm);
6202940ba0cSPaul Mackerras 
6212940ba0cSPaul Mackerras 		/* Clear the bit after the TLB flush */
6220ba0e5d5SNicholas Piggin 		cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
6232940ba0cSPaul Mackerras 	}
6242940ba0cSPaul Mackerras }
62570ea13f6SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
626