1aa04b4ccSPaul Mackerras /*
2aa04b4ccSPaul Mackerras  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3aa04b4ccSPaul Mackerras  *
4aa04b4ccSPaul Mackerras  * This program is free software; you can redistribute it and/or modify
5aa04b4ccSPaul Mackerras  * it under the terms of the GNU General Public License, version 2, as
6aa04b4ccSPaul Mackerras  * published by the Free Software Foundation.
7aa04b4ccSPaul Mackerras  */
8aa04b4ccSPaul Mackerras 
9441c19c8SMichael Ellerman #include <linux/cpu.h>
10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h>
11aa04b4ccSPaul Mackerras #include <linux/preempt.h>
1266b15db6SPaul Gortmaker #include <linux/export.h>
13aa04b4ccSPaul Mackerras #include <linux/sched.h>
14aa04b4ccSPaul Mackerras #include <linux/spinlock.h>
15aa04b4ccSPaul Mackerras #include <linux/init.h>
16fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h>
17fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h>
18fc95ca72SJoonsoo Kim #include <linux/cma.h>
1990fd09f8SSam Bobroff #include <linux/bitops.h>
20aa04b4ccSPaul Mackerras 
21aa04b4ccSPaul Mackerras #include <asm/cputable.h>
22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h>
23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h>
24e928e9cbSMichael Ellerman #include <asm/archrandom.h>
25eddb60fbSPaul Mackerras #include <asm/xics.h>
2666feed61SPaul Mackerras #include <asm/dbell.h>
2766feed61SPaul Mackerras #include <asm/cputhreads.h>
2837f55d30SSuresh Warrier #include <asm/io.h>
29ebe4535fSDaniel Axtens #include <asm/asm-prototypes.h>
30aa04b4ccSPaul Mackerras 
31fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER	18
32fc95ca72SJoonsoo Kim 
33fa61a4e3SAneesh Kumar K.V /*
34fa61a4e3SAneesh Kumar K.V  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
35fa61a4e3SAneesh Kumar K.V  * should be power of 2.
36fa61a4e3SAneesh Kumar K.V  */
37fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
38fa61a4e3SAneesh Kumar K.V /*
39fa61a4e3SAneesh Kumar K.V  * By default we reserve 5% of memory for hash pagetable allocation.
40fa61a4e3SAneesh Kumar K.V  */
41fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5;
42aa04b4ccSPaul Mackerras 
43fc95ca72SJoonsoo Kim static struct cma *kvm_cma;
44fc95ca72SJoonsoo Kim 
45fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p)
46d2a1b483SAlexander Graf {
47fa61a4e3SAneesh Kumar K.V 	pr_debug("%s(%s)\n", __func__, p);
48d2a1b483SAlexander Graf 	if (!p)
49fa61a4e3SAneesh Kumar K.V 		return -EINVAL;
50fa61a4e3SAneesh Kumar K.V 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
51d2a1b483SAlexander Graf }
52fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
53d2a1b483SAlexander Graf 
54fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages)
55d2a1b483SAlexander Graf {
56c04fa583SAlexey Kardashevskiy 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
57fc95ca72SJoonsoo Kim 
58c17b98cfSPaul Mackerras 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
59d2a1b483SAlexander Graf }
60d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
61d2a1b483SAlexander Graf 
62fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages)
63d2a1b483SAlexander Graf {
64fc95ca72SJoonsoo Kim 	cma_release(kvm_cma, page, nr_pages);
65d2a1b483SAlexander Graf }
66d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt);
67d2a1b483SAlexander Graf 
68fa61a4e3SAneesh Kumar K.V /**
69fa61a4e3SAneesh Kumar K.V  * kvm_cma_reserve() - reserve area for kvm hash pagetable
70fa61a4e3SAneesh Kumar K.V  *
71fa61a4e3SAneesh Kumar K.V  * This function reserves memory from early allocator. It should be
7214ed7409SAnton Blanchard  * called by arch specific code once the memblock allocator
73fa61a4e3SAneesh Kumar K.V  * has been activated and all other subsystems have already allocated/reserved
74fa61a4e3SAneesh Kumar K.V  * memory.
75fa61a4e3SAneesh Kumar K.V  */
76fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void)
77fa61a4e3SAneesh Kumar K.V {
78fa61a4e3SAneesh Kumar K.V 	unsigned long align_size;
79fa61a4e3SAneesh Kumar K.V 	struct memblock_region *reg;
80fa61a4e3SAneesh Kumar K.V 	phys_addr_t selected_size = 0;
81cec26bc3SAneesh Kumar K.V 
82cec26bc3SAneesh Kumar K.V 	/*
83cec26bc3SAneesh Kumar K.V 	 * We need CMA reservation only when we are in HV mode
84cec26bc3SAneesh Kumar K.V 	 */
85cec26bc3SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_HVMODE))
86cec26bc3SAneesh Kumar K.V 		return;
87fa61a4e3SAneesh Kumar K.V 	/*
88fa61a4e3SAneesh Kumar K.V 	 * We cannot use memblock_phys_mem_size() here, because
89fa61a4e3SAneesh Kumar K.V 	 * memblock_analyze() has not been called yet.
90fa61a4e3SAneesh Kumar K.V 	 */
91fa61a4e3SAneesh Kumar K.V 	for_each_memblock(memory, reg)
92fa61a4e3SAneesh Kumar K.V 		selected_size += memblock_region_memory_end_pfn(reg) -
93fa61a4e3SAneesh Kumar K.V 				 memblock_region_memory_base_pfn(reg);
94fa61a4e3SAneesh Kumar K.V 
95fa61a4e3SAneesh Kumar K.V 	selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
96fa61a4e3SAneesh Kumar K.V 	if (selected_size) {
97fa61a4e3SAneesh Kumar K.V 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
98fa61a4e3SAneesh Kumar K.V 			 (unsigned long)selected_size / SZ_1M);
99fa61a4e3SAneesh Kumar K.V 		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
100c1f733aaSJoonsoo Kim 		cma_declare_contiguous(0, selected_size, 0, align_size,
101c1f733aaSJoonsoo Kim 			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
102fa61a4e3SAneesh Kumar K.V 	}
103fa61a4e3SAneesh Kumar K.V }
104441c19c8SMichael Ellerman 
105441c19c8SMichael Ellerman /*
10690fd09f8SSam Bobroff  * Real-mode H_CONFER implementation.
10790fd09f8SSam Bobroff  * We check if we are the only vcpu out of this virtual core
10890fd09f8SSam Bobroff  * still running in the guest and not ceded.  If so, we pop up
10990fd09f8SSam Bobroff  * to the virtual-mode implementation; if not, just return to
11090fd09f8SSam Bobroff  * the guest.
11190fd09f8SSam Bobroff  */
11290fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
11390fd09f8SSam Bobroff 			    unsigned int yield_count)
11490fd09f8SSam Bobroff {
115ec257165SPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
116ec257165SPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
11790fd09f8SSam Bobroff 	int threads_running;
11890fd09f8SSam Bobroff 	int threads_ceded;
11990fd09f8SSam Bobroff 	int threads_conferring;
12090fd09f8SSam Bobroff 	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
12190fd09f8SSam Bobroff 	int rv = H_SUCCESS; /* => don't yield */
12290fd09f8SSam Bobroff 
123ec257165SPaul Mackerras 	set_bit(ptid, &vc->conferring_threads);
1247d6c40daSPaul Mackerras 	while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
1257d6c40daSPaul Mackerras 		threads_running = VCORE_ENTRY_MAP(vc);
1267d6c40daSPaul Mackerras 		threads_ceded = vc->napping_threads;
1277d6c40daSPaul Mackerras 		threads_conferring = vc->conferring_threads;
1287d6c40daSPaul Mackerras 		if ((threads_ceded | threads_conferring) == threads_running) {
12990fd09f8SSam Bobroff 			rv = H_TOO_HARD; /* => do yield */
13090fd09f8SSam Bobroff 			break;
13190fd09f8SSam Bobroff 		}
13290fd09f8SSam Bobroff 	}
133ec257165SPaul Mackerras 	clear_bit(ptid, &vc->conferring_threads);
13490fd09f8SSam Bobroff 	return rv;
13590fd09f8SSam Bobroff }
13690fd09f8SSam Bobroff 
13790fd09f8SSam Bobroff /*
138441c19c8SMichael Ellerman  * When running HV mode KVM we need to block certain operations while KVM VMs
139441c19c8SMichael Ellerman  * exist in the system. We use a counter of VMs to track this.
140441c19c8SMichael Ellerman  *
141441c19c8SMichael Ellerman  * One of the operations we need to block is onlining of secondaries, so we
142441c19c8SMichael Ellerman  * protect hv_vm_count with get/put_online_cpus().
143441c19c8SMichael Ellerman  */
144441c19c8SMichael Ellerman static atomic_t hv_vm_count;
145441c19c8SMichael Ellerman 
146441c19c8SMichael Ellerman void kvm_hv_vm_activated(void)
147441c19c8SMichael Ellerman {
148441c19c8SMichael Ellerman 	get_online_cpus();
149441c19c8SMichael Ellerman 	atomic_inc(&hv_vm_count);
150441c19c8SMichael Ellerman 	put_online_cpus();
151441c19c8SMichael Ellerman }
152441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
153441c19c8SMichael Ellerman 
154441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void)
155441c19c8SMichael Ellerman {
156441c19c8SMichael Ellerman 	get_online_cpus();
157441c19c8SMichael Ellerman 	atomic_dec(&hv_vm_count);
158441c19c8SMichael Ellerman 	put_online_cpus();
159441c19c8SMichael Ellerman }
160441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
161441c19c8SMichael Ellerman 
162441c19c8SMichael Ellerman bool kvm_hv_mode_active(void)
163441c19c8SMichael Ellerman {
164441c19c8SMichael Ellerman 	return atomic_read(&hv_vm_count) != 0;
165441c19c8SMichael Ellerman }
166ae2113a4SPaul Mackerras 
167ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[];
168ae2113a4SPaul Mackerras 
169ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
170ae2113a4SPaul Mackerras {
171ae2113a4SPaul Mackerras 	cmd /= 4;
172ae2113a4SPaul Mackerras 	if (cmd < hcall_real_table_end - hcall_real_table &&
173ae2113a4SPaul Mackerras 	    hcall_real_table[cmd])
174ae2113a4SPaul Mackerras 		return 1;
175ae2113a4SPaul Mackerras 
176ae2113a4SPaul Mackerras 	return 0;
177ae2113a4SPaul Mackerras }
178ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
179e928e9cbSMichael Ellerman 
180e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void)
181e928e9cbSMichael Ellerman {
182e928e9cbSMichael Ellerman 	return powernv_hwrng_present();
183e928e9cbSMichael Ellerman }
184e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
185e928e9cbSMichael Ellerman 
186e928e9cbSMichael Ellerman long kvmppc_h_random(struct kvm_vcpu *vcpu)
187e928e9cbSMichael Ellerman {
188e928e9cbSMichael Ellerman 	if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
189e928e9cbSMichael Ellerman 		return H_SUCCESS;
190e928e9cbSMichael Ellerman 
191e928e9cbSMichael Ellerman 	return H_HARDWARE;
192e928e9cbSMichael Ellerman }
193eddb60fbSPaul Mackerras 
194eddb60fbSPaul Mackerras static inline void rm_writeb(unsigned long paddr, u8 val)
195eddb60fbSPaul Mackerras {
196eddb60fbSPaul Mackerras 	__asm__ __volatile__("stbcix %0,0,%1"
197eddb60fbSPaul Mackerras 		: : "r" (val), "r" (paddr) : "memory");
198eddb60fbSPaul Mackerras }
199eddb60fbSPaul Mackerras 
200eddb60fbSPaul Mackerras /*
20166feed61SPaul Mackerras  * Send an interrupt or message to another CPU.
202eddb60fbSPaul Mackerras  * This can only be called in real mode.
203eddb60fbSPaul Mackerras  * The caller needs to include any barrier needed to order writes
204eddb60fbSPaul Mackerras  * to memory vs. the IPI/message.
205eddb60fbSPaul Mackerras  */
206eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu)
207eddb60fbSPaul Mackerras {
208eddb60fbSPaul Mackerras 	unsigned long xics_phys;
2091704a81cSPaul Mackerras 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
210eddb60fbSPaul Mackerras 
2111704a81cSPaul Mackerras 	/* On POWER9 we can use msgsnd for any destination cpu. */
2121704a81cSPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2131704a81cSPaul Mackerras 		msg |= get_hard_smp_processor_id(cpu);
2141704a81cSPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
2151704a81cSPaul Mackerras 		return;
2161704a81cSPaul Mackerras 	}
2171704a81cSPaul Mackerras 	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
21866feed61SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
21966feed61SPaul Mackerras 	    cpu_first_thread_sibling(cpu) ==
22066feed61SPaul Mackerras 	    cpu_first_thread_sibling(raw_smp_processor_id())) {
22166feed61SPaul Mackerras 		msg |= cpu_thread_in_core(cpu);
22266feed61SPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
22366feed61SPaul Mackerras 		return;
22466feed61SPaul Mackerras 	}
22566feed61SPaul Mackerras 
22666feed61SPaul Mackerras 	/* Else poke the target with an IPI */
227eddb60fbSPaul Mackerras 	xics_phys = paca[cpu].kvm_hstate.xics_phys;
228eddb60fbSPaul Mackerras 	rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
229eddb60fbSPaul Mackerras }
230eddb60fbSPaul Mackerras 
231eddb60fbSPaul Mackerras /*
232eddb60fbSPaul Mackerras  * The following functions are called from the assembly code
233eddb60fbSPaul Mackerras  * in book3s_hv_rmhandlers.S.
234eddb60fbSPaul Mackerras  */
235eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
236eddb60fbSPaul Mackerras {
237eddb60fbSPaul Mackerras 	int cpu = vc->pcpu;
238eddb60fbSPaul Mackerras 
239eddb60fbSPaul Mackerras 	/* Order setting of exit map vs. msgsnd/IPI */
240eddb60fbSPaul Mackerras 	smp_mb();
241eddb60fbSPaul Mackerras 	for (; active; active >>= 1, ++cpu)
242eddb60fbSPaul Mackerras 		if (active & 1)
243eddb60fbSPaul Mackerras 			kvmhv_rm_send_ipi(cpu);
244eddb60fbSPaul Mackerras }
245eddb60fbSPaul Mackerras 
246eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap)
247eddb60fbSPaul Mackerras {
248eddb60fbSPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
249eddb60fbSPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
250b4deba5cSPaul Mackerras 	struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
251b4deba5cSPaul Mackerras 	int me, ee, i;
252eddb60fbSPaul Mackerras 
253eddb60fbSPaul Mackerras 	/* Set our bit in the threads-exiting-guest map in the 0xff00
254eddb60fbSPaul Mackerras 	   bits of vcore->entry_exit_map */
255eddb60fbSPaul Mackerras 	me = 0x100 << ptid;
256eddb60fbSPaul Mackerras 	do {
257eddb60fbSPaul Mackerras 		ee = vc->entry_exit_map;
258eddb60fbSPaul Mackerras 	} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
259eddb60fbSPaul Mackerras 
260eddb60fbSPaul Mackerras 	/* Are we the first here? */
261eddb60fbSPaul Mackerras 	if ((ee >> 8) != 0)
262eddb60fbSPaul Mackerras 		return;
263eddb60fbSPaul Mackerras 
264eddb60fbSPaul Mackerras 	/*
265eddb60fbSPaul Mackerras 	 * Trigger the other threads in this vcore to exit the guest.
266eddb60fbSPaul Mackerras 	 * If this is a hypervisor decrementer interrupt then they
267eddb60fbSPaul Mackerras 	 * will be already on their way out of the guest.
268eddb60fbSPaul Mackerras 	 */
269eddb60fbSPaul Mackerras 	if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
270eddb60fbSPaul Mackerras 		kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
271b4deba5cSPaul Mackerras 
272b4deba5cSPaul Mackerras 	/*
273b4deba5cSPaul Mackerras 	 * If we are doing dynamic micro-threading, interrupt the other
274b4deba5cSPaul Mackerras 	 * subcores to pull them out of their guests too.
275b4deba5cSPaul Mackerras 	 */
276b4deba5cSPaul Mackerras 	if (!sip)
277b4deba5cSPaul Mackerras 		return;
278b4deba5cSPaul Mackerras 
279b4deba5cSPaul Mackerras 	for (i = 0; i < MAX_SUBCORES; ++i) {
280b4deba5cSPaul Mackerras 		vc = sip->master_vcs[i];
281b4deba5cSPaul Mackerras 		if (!vc)
282b4deba5cSPaul Mackerras 			break;
283b4deba5cSPaul Mackerras 		do {
284b4deba5cSPaul Mackerras 			ee = vc->entry_exit_map;
285b4deba5cSPaul Mackerras 			/* Already asked to exit? */
286b4deba5cSPaul Mackerras 			if ((ee >> 8) != 0)
287b4deba5cSPaul Mackerras 				break;
288b4deba5cSPaul Mackerras 		} while (cmpxchg(&vc->entry_exit_map, ee,
289b4deba5cSPaul Mackerras 				 ee | VCORE_EXIT_REQ) != ee);
290b4deba5cSPaul Mackerras 		if ((ee >> 8) == 0)
291b4deba5cSPaul Mackerras 			kvmhv_interrupt_vcore(vc, ee);
292b4deba5cSPaul Mackerras 	}
293eddb60fbSPaul Mackerras }
29479b6c247SSuresh Warrier 
29579b6c247SSuresh Warrier struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
29679b6c247SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
29737f55d30SSuresh Warrier 
298e3c13e56SSuresh Warrier #ifdef CONFIG_KVM_XICS
299e3c13e56SSuresh Warrier static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
300e3c13e56SSuresh Warrier 					 u32 xisr)
301e3c13e56SSuresh Warrier {
302e3c13e56SSuresh Warrier 	int i;
303e3c13e56SSuresh Warrier 
304e3c13e56SSuresh Warrier 	/*
305e3c13e56SSuresh Warrier 	 * We access the mapped array here without a lock.  That
306e3c13e56SSuresh Warrier 	 * is safe because we never reduce the number of entries
307e3c13e56SSuresh Warrier 	 * in the array and we never change the v_hwirq field of
308e3c13e56SSuresh Warrier 	 * an entry once it is set.
309e3c13e56SSuresh Warrier 	 *
310e3c13e56SSuresh Warrier 	 * We have also carefully ordered the stores in the writer
311e3c13e56SSuresh Warrier 	 * and the loads here in the reader, so that if we find a matching
312e3c13e56SSuresh Warrier 	 * hwirq here, the associated GSI and irq_desc fields are valid.
313e3c13e56SSuresh Warrier 	 */
314e3c13e56SSuresh Warrier 	for (i = 0; i < pimap->n_mapped; i++)  {
315e3c13e56SSuresh Warrier 		if (xisr == pimap->mapped[i].r_hwirq) {
316e3c13e56SSuresh Warrier 			/*
317e3c13e56SSuresh Warrier 			 * Order subsequent reads in the caller to serialize
318e3c13e56SSuresh Warrier 			 * with the writer.
319e3c13e56SSuresh Warrier 			 */
320e3c13e56SSuresh Warrier 			smp_rmb();
321e3c13e56SSuresh Warrier 			return &pimap->mapped[i];
322e3c13e56SSuresh Warrier 		}
323e3c13e56SSuresh Warrier 	}
324e3c13e56SSuresh Warrier 	return NULL;
325e3c13e56SSuresh Warrier }
326e3c13e56SSuresh Warrier 
327e3c13e56SSuresh Warrier /*
328e3c13e56SSuresh Warrier  * If we have an interrupt that's not an IPI, check if we have a
329e3c13e56SSuresh Warrier  * passthrough adapter and if so, check if this external interrupt
330e3c13e56SSuresh Warrier  * is for the adapter.
331e3c13e56SSuresh Warrier  * We will attempt to deliver the IRQ directly to the target VCPU's
332e3c13e56SSuresh Warrier  * ICP, the virtual ICP (based on affinity - the xive value in ICS).
333e3c13e56SSuresh Warrier  *
334e3c13e56SSuresh Warrier  * If the delivery fails or if this is not for a passthrough adapter,
335e3c13e56SSuresh Warrier  * return to the host to handle this interrupt. We earlier
336e3c13e56SSuresh Warrier  * saved a copy of the XIRR in the PACA, it will be picked up by
337e3c13e56SSuresh Warrier  * the host ICP driver.
338e3c13e56SSuresh Warrier  */
339e3c13e56SSuresh Warrier static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
340e3c13e56SSuresh Warrier {
341e3c13e56SSuresh Warrier 	struct kvmppc_passthru_irqmap *pimap;
342e3c13e56SSuresh Warrier 	struct kvmppc_irq_map *irq_map;
343e3c13e56SSuresh Warrier 	struct kvm_vcpu *vcpu;
344e3c13e56SSuresh Warrier 
345e3c13e56SSuresh Warrier 	vcpu = local_paca->kvm_hstate.kvm_vcpu;
346e3c13e56SSuresh Warrier 	if (!vcpu)
347e3c13e56SSuresh Warrier 		return 1;
348e3c13e56SSuresh Warrier 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
349e3c13e56SSuresh Warrier 	if (!pimap)
350e3c13e56SSuresh Warrier 		return 1;
351e3c13e56SSuresh Warrier 	irq_map = get_irqmap(pimap, xisr);
352e3c13e56SSuresh Warrier 	if (!irq_map)
353e3c13e56SSuresh Warrier 		return 1;
354e3c13e56SSuresh Warrier 
355e3c13e56SSuresh Warrier 	/* We're handling this interrupt, generic code doesn't need to */
356e3c13e56SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = 0;
357e3c13e56SSuresh Warrier 
358e3c13e56SSuresh Warrier 	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap);
359e3c13e56SSuresh Warrier }
360e3c13e56SSuresh Warrier 
361e3c13e56SSuresh Warrier #else
362e3c13e56SSuresh Warrier static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
363e3c13e56SSuresh Warrier {
364e3c13e56SSuresh Warrier 	return 1;
365e3c13e56SSuresh Warrier }
366e3c13e56SSuresh Warrier #endif
367e3c13e56SSuresh Warrier 
36837f55d30SSuresh Warrier /*
36937f55d30SSuresh Warrier  * Determine what sort of external interrupt is pending (if any).
37037f55d30SSuresh Warrier  * Returns:
37137f55d30SSuresh Warrier  *	0 if no interrupt is pending
37237f55d30SSuresh Warrier  *	1 if an interrupt is pending that needs to be handled by the host
373f7af5209SSuresh Warrier  *	2 Passthrough that needs completion in the host
37437f55d30SSuresh Warrier  *	-1 if there was a guest wakeup IPI (which has now been cleared)
375e3c13e56SSuresh Warrier  *	-2 if there is PCI passthrough external interrupt that was handled
37637f55d30SSuresh Warrier  */
37737f55d30SSuresh Warrier 
37837f55d30SSuresh Warrier long kvmppc_read_intr(void)
37937f55d30SSuresh Warrier {
38037f55d30SSuresh Warrier 	unsigned long xics_phys;
38137f55d30SSuresh Warrier 	u32 h_xirr;
38237f55d30SSuresh Warrier 	__be32 xirr;
38337f55d30SSuresh Warrier 	u32 xisr;
38437f55d30SSuresh Warrier 	u8 host_ipi;
38537f55d30SSuresh Warrier 
38637f55d30SSuresh Warrier 	/* see if a host IPI is pending */
38737f55d30SSuresh Warrier 	host_ipi = local_paca->kvm_hstate.host_ipi;
38837f55d30SSuresh Warrier 	if (host_ipi)
38937f55d30SSuresh Warrier 		return 1;
39037f55d30SSuresh Warrier 
39137f55d30SSuresh Warrier 	/* Now read the interrupt from the ICP */
39237f55d30SSuresh Warrier 	xics_phys = local_paca->kvm_hstate.xics_phys;
39337f55d30SSuresh Warrier 	if (unlikely(!xics_phys))
39437f55d30SSuresh Warrier 		return 1;
39537f55d30SSuresh Warrier 
39637f55d30SSuresh Warrier 	/*
39737f55d30SSuresh Warrier 	 * Save XIRR for later. Since we get control in reverse endian
39837f55d30SSuresh Warrier 	 * on LE systems, save it byte reversed and fetch it back in
39937f55d30SSuresh Warrier 	 * host endian. Note that xirr is the value read from the
40037f55d30SSuresh Warrier 	 * XIRR register, while h_xirr is the host endian version.
40137f55d30SSuresh Warrier 	 */
40237f55d30SSuresh Warrier 	xirr = _lwzcix(xics_phys + XICS_XIRR);
40337f55d30SSuresh Warrier 	h_xirr = be32_to_cpu(xirr);
40437f55d30SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = h_xirr;
40537f55d30SSuresh Warrier 	xisr = h_xirr & 0xffffff;
40637f55d30SSuresh Warrier 	/*
40737f55d30SSuresh Warrier 	 * Ensure that the store/load complete to guarantee all side
40837f55d30SSuresh Warrier 	 * effects of loading from XIRR has completed
40937f55d30SSuresh Warrier 	 */
41037f55d30SSuresh Warrier 	smp_mb();
41137f55d30SSuresh Warrier 
41237f55d30SSuresh Warrier 	/* if nothing pending in the ICP */
41337f55d30SSuresh Warrier 	if (!xisr)
41437f55d30SSuresh Warrier 		return 0;
41537f55d30SSuresh Warrier 
41637f55d30SSuresh Warrier 	/* We found something in the ICP...
41737f55d30SSuresh Warrier 	 *
41837f55d30SSuresh Warrier 	 * If it is an IPI, clear the MFRR and EOI it.
41937f55d30SSuresh Warrier 	 */
42037f55d30SSuresh Warrier 	if (xisr == XICS_IPI) {
42137f55d30SSuresh Warrier 		_stbcix(xics_phys + XICS_MFRR, 0xff);
42237f55d30SSuresh Warrier 		_stwcix(xics_phys + XICS_XIRR, xirr);
42337f55d30SSuresh Warrier 		/*
42437f55d30SSuresh Warrier 		 * Need to ensure side effects of above stores
42537f55d30SSuresh Warrier 		 * complete before proceeding.
42637f55d30SSuresh Warrier 		 */
42737f55d30SSuresh Warrier 		smp_mb();
42837f55d30SSuresh Warrier 
42937f55d30SSuresh Warrier 		/*
43037f55d30SSuresh Warrier 		 * We need to re-check host IPI now in case it got set in the
43137f55d30SSuresh Warrier 		 * meantime. If it's clear, we bounce the interrupt to the
43237f55d30SSuresh Warrier 		 * guest
43337f55d30SSuresh Warrier 		 */
43437f55d30SSuresh Warrier 		host_ipi = local_paca->kvm_hstate.host_ipi;
43537f55d30SSuresh Warrier 		if (unlikely(host_ipi != 0)) {
43637f55d30SSuresh Warrier 			/* We raced with the host,
43737f55d30SSuresh Warrier 			 * we need to resend that IPI, bummer
43837f55d30SSuresh Warrier 			 */
43937f55d30SSuresh Warrier 			_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
44037f55d30SSuresh Warrier 			/* Let side effects complete */
44137f55d30SSuresh Warrier 			smp_mb();
44237f55d30SSuresh Warrier 			return 1;
44337f55d30SSuresh Warrier 		}
44437f55d30SSuresh Warrier 
44537f55d30SSuresh Warrier 		/* OK, it's an IPI for us */
44637f55d30SSuresh Warrier 		local_paca->kvm_hstate.saved_xirr = 0;
44737f55d30SSuresh Warrier 		return -1;
44837f55d30SSuresh Warrier 	}
44937f55d30SSuresh Warrier 
450e3c13e56SSuresh Warrier 	return kvmppc_check_passthru(xisr, xirr);
45137f55d30SSuresh Warrier }
452