1aa04b4ccSPaul Mackerras /*
2aa04b4ccSPaul Mackerras  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3aa04b4ccSPaul Mackerras  *
4aa04b4ccSPaul Mackerras  * This program is free software; you can redistribute it and/or modify
5aa04b4ccSPaul Mackerras  * it under the terms of the GNU General Public License, version 2, as
6aa04b4ccSPaul Mackerras  * published by the Free Software Foundation.
7aa04b4ccSPaul Mackerras  */
8aa04b4ccSPaul Mackerras 
9441c19c8SMichael Ellerman #include <linux/cpu.h>
10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h>
11aa04b4ccSPaul Mackerras #include <linux/preempt.h>
1266b15db6SPaul Gortmaker #include <linux/export.h>
13aa04b4ccSPaul Mackerras #include <linux/sched.h>
14aa04b4ccSPaul Mackerras #include <linux/spinlock.h>
15aa04b4ccSPaul Mackerras #include <linux/init.h>
16fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h>
17fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h>
18fc95ca72SJoonsoo Kim #include <linux/cma.h>
1990fd09f8SSam Bobroff #include <linux/bitops.h>
20aa04b4ccSPaul Mackerras 
21aa04b4ccSPaul Mackerras #include <asm/cputable.h>
22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h>
23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h>
24e928e9cbSMichael Ellerman #include <asm/archrandom.h>
25eddb60fbSPaul Mackerras #include <asm/xics.h>
2666feed61SPaul Mackerras #include <asm/dbell.h>
2766feed61SPaul Mackerras #include <asm/cputhreads.h>
2837f55d30SSuresh Warrier #include <asm/io.h>
29ebe4535fSDaniel Axtens #include <asm/asm-prototypes.h>
30f725758bSPaul Mackerras #include <asm/opal.h>
31e2702871SPaul Mackerras #include <asm/smp.h>
32aa04b4ccSPaul Mackerras 
33fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER	18
34fc95ca72SJoonsoo Kim 
35fa61a4e3SAneesh Kumar K.V /*
36fa61a4e3SAneesh Kumar K.V  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
37fa61a4e3SAneesh Kumar K.V  * should be power of 2.
38fa61a4e3SAneesh Kumar K.V  */
39fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
40fa61a4e3SAneesh Kumar K.V /*
41fa61a4e3SAneesh Kumar K.V  * By default we reserve 5% of memory for hash pagetable allocation.
42fa61a4e3SAneesh Kumar K.V  */
43fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5;
44aa04b4ccSPaul Mackerras 
45fc95ca72SJoonsoo Kim static struct cma *kvm_cma;
46fc95ca72SJoonsoo Kim 
47fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p)
48d2a1b483SAlexander Graf {
49fa61a4e3SAneesh Kumar K.V 	pr_debug("%s(%s)\n", __func__, p);
50d2a1b483SAlexander Graf 	if (!p)
51fa61a4e3SAneesh Kumar K.V 		return -EINVAL;
52fa61a4e3SAneesh Kumar K.V 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
53d2a1b483SAlexander Graf }
54fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
55d2a1b483SAlexander Graf 
56fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages)
57d2a1b483SAlexander Graf {
58c04fa583SAlexey Kardashevskiy 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
59fc95ca72SJoonsoo Kim 
60c17b98cfSPaul Mackerras 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
61d2a1b483SAlexander Graf }
62d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
63d2a1b483SAlexander Graf 
64fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages)
65d2a1b483SAlexander Graf {
66fc95ca72SJoonsoo Kim 	cma_release(kvm_cma, page, nr_pages);
67d2a1b483SAlexander Graf }
68d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt);
69d2a1b483SAlexander Graf 
70fa61a4e3SAneesh Kumar K.V /**
71fa61a4e3SAneesh Kumar K.V  * kvm_cma_reserve() - reserve area for kvm hash pagetable
72fa61a4e3SAneesh Kumar K.V  *
73fa61a4e3SAneesh Kumar K.V  * This function reserves memory from early allocator. It should be
7414ed7409SAnton Blanchard  * called by arch specific code once the memblock allocator
75fa61a4e3SAneesh Kumar K.V  * has been activated and all other subsystems have already allocated/reserved
76fa61a4e3SAneesh Kumar K.V  * memory.
77fa61a4e3SAneesh Kumar K.V  */
78fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void)
79fa61a4e3SAneesh Kumar K.V {
80fa61a4e3SAneesh Kumar K.V 	unsigned long align_size;
81fa61a4e3SAneesh Kumar K.V 	struct memblock_region *reg;
82fa61a4e3SAneesh Kumar K.V 	phys_addr_t selected_size = 0;
83cec26bc3SAneesh Kumar K.V 
84cec26bc3SAneesh Kumar K.V 	/*
85cec26bc3SAneesh Kumar K.V 	 * We need CMA reservation only when we are in HV mode
86cec26bc3SAneesh Kumar K.V 	 */
87cec26bc3SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_HVMODE))
88cec26bc3SAneesh Kumar K.V 		return;
89fa61a4e3SAneesh Kumar K.V 	/*
90fa61a4e3SAneesh Kumar K.V 	 * We cannot use memblock_phys_mem_size() here, because
91fa61a4e3SAneesh Kumar K.V 	 * memblock_analyze() has not been called yet.
92fa61a4e3SAneesh Kumar K.V 	 */
93fa61a4e3SAneesh Kumar K.V 	for_each_memblock(memory, reg)
94fa61a4e3SAneesh Kumar K.V 		selected_size += memblock_region_memory_end_pfn(reg) -
95fa61a4e3SAneesh Kumar K.V 				 memblock_region_memory_base_pfn(reg);
96fa61a4e3SAneesh Kumar K.V 
97fa61a4e3SAneesh Kumar K.V 	selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
98fa61a4e3SAneesh Kumar K.V 	if (selected_size) {
99fa61a4e3SAneesh Kumar K.V 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
100fa61a4e3SAneesh Kumar K.V 			 (unsigned long)selected_size / SZ_1M);
101fa61a4e3SAneesh Kumar K.V 		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
102c1f733aaSJoonsoo Kim 		cma_declare_contiguous(0, selected_size, 0, align_size,
103c1f733aaSJoonsoo Kim 			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
104fa61a4e3SAneesh Kumar K.V 	}
105fa61a4e3SAneesh Kumar K.V }
106441c19c8SMichael Ellerman 
107441c19c8SMichael Ellerman /*
10890fd09f8SSam Bobroff  * Real-mode H_CONFER implementation.
10990fd09f8SSam Bobroff  * We check if we are the only vcpu out of this virtual core
11090fd09f8SSam Bobroff  * still running in the guest and not ceded.  If so, we pop up
11190fd09f8SSam Bobroff  * to the virtual-mode implementation; if not, just return to
11290fd09f8SSam Bobroff  * the guest.
11390fd09f8SSam Bobroff  */
11490fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
11590fd09f8SSam Bobroff 			    unsigned int yield_count)
11690fd09f8SSam Bobroff {
117ec257165SPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
118ec257165SPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
11990fd09f8SSam Bobroff 	int threads_running;
12090fd09f8SSam Bobroff 	int threads_ceded;
12190fd09f8SSam Bobroff 	int threads_conferring;
12290fd09f8SSam Bobroff 	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
12390fd09f8SSam Bobroff 	int rv = H_SUCCESS; /* => don't yield */
12490fd09f8SSam Bobroff 
125ec257165SPaul Mackerras 	set_bit(ptid, &vc->conferring_threads);
1267d6c40daSPaul Mackerras 	while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
1277d6c40daSPaul Mackerras 		threads_running = VCORE_ENTRY_MAP(vc);
1287d6c40daSPaul Mackerras 		threads_ceded = vc->napping_threads;
1297d6c40daSPaul Mackerras 		threads_conferring = vc->conferring_threads;
1307d6c40daSPaul Mackerras 		if ((threads_ceded | threads_conferring) == threads_running) {
13190fd09f8SSam Bobroff 			rv = H_TOO_HARD; /* => do yield */
13290fd09f8SSam Bobroff 			break;
13390fd09f8SSam Bobroff 		}
13490fd09f8SSam Bobroff 	}
135ec257165SPaul Mackerras 	clear_bit(ptid, &vc->conferring_threads);
13690fd09f8SSam Bobroff 	return rv;
13790fd09f8SSam Bobroff }
13890fd09f8SSam Bobroff 
13990fd09f8SSam Bobroff /*
140441c19c8SMichael Ellerman  * When running HV mode KVM we need to block certain operations while KVM VMs
141441c19c8SMichael Ellerman  * exist in the system. We use a counter of VMs to track this.
142441c19c8SMichael Ellerman  *
143441c19c8SMichael Ellerman  * One of the operations we need to block is onlining of secondaries, so we
144441c19c8SMichael Ellerman  * protect hv_vm_count with get/put_online_cpus().
145441c19c8SMichael Ellerman  */
146441c19c8SMichael Ellerman static atomic_t hv_vm_count;
147441c19c8SMichael Ellerman 
148441c19c8SMichael Ellerman void kvm_hv_vm_activated(void)
149441c19c8SMichael Ellerman {
150441c19c8SMichael Ellerman 	get_online_cpus();
151441c19c8SMichael Ellerman 	atomic_inc(&hv_vm_count);
152441c19c8SMichael Ellerman 	put_online_cpus();
153441c19c8SMichael Ellerman }
154441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
155441c19c8SMichael Ellerman 
156441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void)
157441c19c8SMichael Ellerman {
158441c19c8SMichael Ellerman 	get_online_cpus();
159441c19c8SMichael Ellerman 	atomic_dec(&hv_vm_count);
160441c19c8SMichael Ellerman 	put_online_cpus();
161441c19c8SMichael Ellerman }
162441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
163441c19c8SMichael Ellerman 
164441c19c8SMichael Ellerman bool kvm_hv_mode_active(void)
165441c19c8SMichael Ellerman {
166441c19c8SMichael Ellerman 	return atomic_read(&hv_vm_count) != 0;
167441c19c8SMichael Ellerman }
168ae2113a4SPaul Mackerras 
169ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[];
170ae2113a4SPaul Mackerras 
171ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
172ae2113a4SPaul Mackerras {
173ae2113a4SPaul Mackerras 	cmd /= 4;
174ae2113a4SPaul Mackerras 	if (cmd < hcall_real_table_end - hcall_real_table &&
175ae2113a4SPaul Mackerras 	    hcall_real_table[cmd])
176ae2113a4SPaul Mackerras 		return 1;
177ae2113a4SPaul Mackerras 
178ae2113a4SPaul Mackerras 	return 0;
179ae2113a4SPaul Mackerras }
180ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
181e928e9cbSMichael Ellerman 
182e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void)
183e928e9cbSMichael Ellerman {
184e928e9cbSMichael Ellerman 	return powernv_hwrng_present();
185e928e9cbSMichael Ellerman }
186e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
187e928e9cbSMichael Ellerman 
188e928e9cbSMichael Ellerman long kvmppc_h_random(struct kvm_vcpu *vcpu)
189e928e9cbSMichael Ellerman {
190e928e9cbSMichael Ellerman 	if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
191e928e9cbSMichael Ellerman 		return H_SUCCESS;
192e928e9cbSMichael Ellerman 
193e928e9cbSMichael Ellerman 	return H_HARDWARE;
194e928e9cbSMichael Ellerman }
195eddb60fbSPaul Mackerras 
196eddb60fbSPaul Mackerras static inline void rm_writeb(unsigned long paddr, u8 val)
197eddb60fbSPaul Mackerras {
198eddb60fbSPaul Mackerras 	__asm__ __volatile__("stbcix %0,0,%1"
199eddb60fbSPaul Mackerras 		: : "r" (val), "r" (paddr) : "memory");
200eddb60fbSPaul Mackerras }
201eddb60fbSPaul Mackerras 
202eddb60fbSPaul Mackerras /*
20366feed61SPaul Mackerras  * Send an interrupt or message to another CPU.
204eddb60fbSPaul Mackerras  * This can only be called in real mode.
205eddb60fbSPaul Mackerras  * The caller needs to include any barrier needed to order writes
206eddb60fbSPaul Mackerras  * to memory vs. the IPI/message.
207eddb60fbSPaul Mackerras  */
208eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu)
209eddb60fbSPaul Mackerras {
210eddb60fbSPaul Mackerras 	unsigned long xics_phys;
2111704a81cSPaul Mackerras 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
212eddb60fbSPaul Mackerras 
2131704a81cSPaul Mackerras 	/* On POWER9 we can use msgsnd for any destination cpu. */
2141704a81cSPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2151704a81cSPaul Mackerras 		msg |= get_hard_smp_processor_id(cpu);
2161704a81cSPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
2171704a81cSPaul Mackerras 		return;
2181704a81cSPaul Mackerras 	}
2191704a81cSPaul Mackerras 	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
22066feed61SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
22166feed61SPaul Mackerras 	    cpu_first_thread_sibling(cpu) ==
22266feed61SPaul Mackerras 	    cpu_first_thread_sibling(raw_smp_processor_id())) {
22366feed61SPaul Mackerras 		msg |= cpu_thread_in_core(cpu);
22466feed61SPaul Mackerras 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
22566feed61SPaul Mackerras 		return;
22666feed61SPaul Mackerras 	}
22766feed61SPaul Mackerras 
22866feed61SPaul Mackerras 	/* Else poke the target with an IPI */
229eddb60fbSPaul Mackerras 	xics_phys = paca[cpu].kvm_hstate.xics_phys;
230f725758bSPaul Mackerras 	if (xics_phys)
231eddb60fbSPaul Mackerras 		rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
232f725758bSPaul Mackerras 	else
233f725758bSPaul Mackerras 		opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu),
234f725758bSPaul Mackerras 				     IPI_PRIORITY);
235eddb60fbSPaul Mackerras }
236eddb60fbSPaul Mackerras 
237eddb60fbSPaul Mackerras /*
238eddb60fbSPaul Mackerras  * The following functions are called from the assembly code
239eddb60fbSPaul Mackerras  * in book3s_hv_rmhandlers.S.
240eddb60fbSPaul Mackerras  */
241eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
242eddb60fbSPaul Mackerras {
243eddb60fbSPaul Mackerras 	int cpu = vc->pcpu;
244eddb60fbSPaul Mackerras 
245eddb60fbSPaul Mackerras 	/* Order setting of exit map vs. msgsnd/IPI */
246eddb60fbSPaul Mackerras 	smp_mb();
247eddb60fbSPaul Mackerras 	for (; active; active >>= 1, ++cpu)
248eddb60fbSPaul Mackerras 		if (active & 1)
249eddb60fbSPaul Mackerras 			kvmhv_rm_send_ipi(cpu);
250eddb60fbSPaul Mackerras }
251eddb60fbSPaul Mackerras 
252eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap)
253eddb60fbSPaul Mackerras {
254eddb60fbSPaul Mackerras 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
255eddb60fbSPaul Mackerras 	int ptid = local_paca->kvm_hstate.ptid;
256b4deba5cSPaul Mackerras 	struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
257b4deba5cSPaul Mackerras 	int me, ee, i;
258eddb60fbSPaul Mackerras 
259eddb60fbSPaul Mackerras 	/* Set our bit in the threads-exiting-guest map in the 0xff00
260eddb60fbSPaul Mackerras 	   bits of vcore->entry_exit_map */
261eddb60fbSPaul Mackerras 	me = 0x100 << ptid;
262eddb60fbSPaul Mackerras 	do {
263eddb60fbSPaul Mackerras 		ee = vc->entry_exit_map;
264eddb60fbSPaul Mackerras 	} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
265eddb60fbSPaul Mackerras 
266eddb60fbSPaul Mackerras 	/* Are we the first here? */
267eddb60fbSPaul Mackerras 	if ((ee >> 8) != 0)
268eddb60fbSPaul Mackerras 		return;
269eddb60fbSPaul Mackerras 
270eddb60fbSPaul Mackerras 	/*
271eddb60fbSPaul Mackerras 	 * Trigger the other threads in this vcore to exit the guest.
272eddb60fbSPaul Mackerras 	 * If this is a hypervisor decrementer interrupt then they
273eddb60fbSPaul Mackerras 	 * will be already on their way out of the guest.
274eddb60fbSPaul Mackerras 	 */
275eddb60fbSPaul Mackerras 	if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
276eddb60fbSPaul Mackerras 		kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
277b4deba5cSPaul Mackerras 
278b4deba5cSPaul Mackerras 	/*
279b4deba5cSPaul Mackerras 	 * If we are doing dynamic micro-threading, interrupt the other
280b4deba5cSPaul Mackerras 	 * subcores to pull them out of their guests too.
281b4deba5cSPaul Mackerras 	 */
282b4deba5cSPaul Mackerras 	if (!sip)
283b4deba5cSPaul Mackerras 		return;
284b4deba5cSPaul Mackerras 
285b4deba5cSPaul Mackerras 	for (i = 0; i < MAX_SUBCORES; ++i) {
286b4deba5cSPaul Mackerras 		vc = sip->master_vcs[i];
287b4deba5cSPaul Mackerras 		if (!vc)
288b4deba5cSPaul Mackerras 			break;
289b4deba5cSPaul Mackerras 		do {
290b4deba5cSPaul Mackerras 			ee = vc->entry_exit_map;
291b4deba5cSPaul Mackerras 			/* Already asked to exit? */
292b4deba5cSPaul Mackerras 			if ((ee >> 8) != 0)
293b4deba5cSPaul Mackerras 				break;
294b4deba5cSPaul Mackerras 		} while (cmpxchg(&vc->entry_exit_map, ee,
295b4deba5cSPaul Mackerras 				 ee | VCORE_EXIT_REQ) != ee);
296b4deba5cSPaul Mackerras 		if ((ee >> 8) == 0)
297b4deba5cSPaul Mackerras 			kvmhv_interrupt_vcore(vc, ee);
298b4deba5cSPaul Mackerras 	}
299eddb60fbSPaul Mackerras }
30079b6c247SSuresh Warrier 
30179b6c247SSuresh Warrier struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
30279b6c247SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
30337f55d30SSuresh Warrier 
304e3c13e56SSuresh Warrier #ifdef CONFIG_KVM_XICS
305e3c13e56SSuresh Warrier static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
306e3c13e56SSuresh Warrier 					 u32 xisr)
307e3c13e56SSuresh Warrier {
308e3c13e56SSuresh Warrier 	int i;
309e3c13e56SSuresh Warrier 
310e3c13e56SSuresh Warrier 	/*
311e3c13e56SSuresh Warrier 	 * We access the mapped array here without a lock.  That
312e3c13e56SSuresh Warrier 	 * is safe because we never reduce the number of entries
313e3c13e56SSuresh Warrier 	 * in the array and we never change the v_hwirq field of
314e3c13e56SSuresh Warrier 	 * an entry once it is set.
315e3c13e56SSuresh Warrier 	 *
316e3c13e56SSuresh Warrier 	 * We have also carefully ordered the stores in the writer
317e3c13e56SSuresh Warrier 	 * and the loads here in the reader, so that if we find a matching
318e3c13e56SSuresh Warrier 	 * hwirq here, the associated GSI and irq_desc fields are valid.
319e3c13e56SSuresh Warrier 	 */
320e3c13e56SSuresh Warrier 	for (i = 0; i < pimap->n_mapped; i++)  {
321e3c13e56SSuresh Warrier 		if (xisr == pimap->mapped[i].r_hwirq) {
322e3c13e56SSuresh Warrier 			/*
323e3c13e56SSuresh Warrier 			 * Order subsequent reads in the caller to serialize
324e3c13e56SSuresh Warrier 			 * with the writer.
325e3c13e56SSuresh Warrier 			 */
326e3c13e56SSuresh Warrier 			smp_rmb();
327e3c13e56SSuresh Warrier 			return &pimap->mapped[i];
328e3c13e56SSuresh Warrier 		}
329e3c13e56SSuresh Warrier 	}
330e3c13e56SSuresh Warrier 	return NULL;
331e3c13e56SSuresh Warrier }
332e3c13e56SSuresh Warrier 
333e3c13e56SSuresh Warrier /*
334e3c13e56SSuresh Warrier  * If we have an interrupt that's not an IPI, check if we have a
335e3c13e56SSuresh Warrier  * passthrough adapter and if so, check if this external interrupt
336e3c13e56SSuresh Warrier  * is for the adapter.
337e3c13e56SSuresh Warrier  * We will attempt to deliver the IRQ directly to the target VCPU's
338e3c13e56SSuresh Warrier  * ICP, the virtual ICP (based on affinity - the xive value in ICS).
339e3c13e56SSuresh Warrier  *
340e3c13e56SSuresh Warrier  * If the delivery fails or if this is not for a passthrough adapter,
341e3c13e56SSuresh Warrier  * return to the host to handle this interrupt. We earlier
342e3c13e56SSuresh Warrier  * saved a copy of the XIRR in the PACA, it will be picked up by
343e3c13e56SSuresh Warrier  * the host ICP driver.
344e3c13e56SSuresh Warrier  */
345f725758bSPaul Mackerras static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
346e3c13e56SSuresh Warrier {
347e3c13e56SSuresh Warrier 	struct kvmppc_passthru_irqmap *pimap;
348e3c13e56SSuresh Warrier 	struct kvmppc_irq_map *irq_map;
349e3c13e56SSuresh Warrier 	struct kvm_vcpu *vcpu;
350e3c13e56SSuresh Warrier 
351e3c13e56SSuresh Warrier 	vcpu = local_paca->kvm_hstate.kvm_vcpu;
352e3c13e56SSuresh Warrier 	if (!vcpu)
353e3c13e56SSuresh Warrier 		return 1;
354e3c13e56SSuresh Warrier 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
355e3c13e56SSuresh Warrier 	if (!pimap)
356e3c13e56SSuresh Warrier 		return 1;
357e3c13e56SSuresh Warrier 	irq_map = get_irqmap(pimap, xisr);
358e3c13e56SSuresh Warrier 	if (!irq_map)
359e3c13e56SSuresh Warrier 		return 1;
360e3c13e56SSuresh Warrier 
361e3c13e56SSuresh Warrier 	/* We're handling this interrupt, generic code doesn't need to */
362e3c13e56SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = 0;
363e3c13e56SSuresh Warrier 
364f725758bSPaul Mackerras 	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
365e3c13e56SSuresh Warrier }
366e3c13e56SSuresh Warrier 
367e3c13e56SSuresh Warrier #else
368e2702871SPaul Mackerras static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
369e3c13e56SSuresh Warrier {
370e3c13e56SSuresh Warrier 	return 1;
371e3c13e56SSuresh Warrier }
372e3c13e56SSuresh Warrier #endif
373e3c13e56SSuresh Warrier 
37437f55d30SSuresh Warrier /*
37537f55d30SSuresh Warrier  * Determine what sort of external interrupt is pending (if any).
37637f55d30SSuresh Warrier  * Returns:
37737f55d30SSuresh Warrier  *	0 if no interrupt is pending
37837f55d30SSuresh Warrier  *	1 if an interrupt is pending that needs to be handled by the host
379f7af5209SSuresh Warrier  *	2 Passthrough that needs completion in the host
38037f55d30SSuresh Warrier  *	-1 if there was a guest wakeup IPI (which has now been cleared)
381e3c13e56SSuresh Warrier  *	-2 if there is PCI passthrough external interrupt that was handled
38237f55d30SSuresh Warrier  */
383f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again);
38437f55d30SSuresh Warrier 
38537f55d30SSuresh Warrier long kvmppc_read_intr(void)
38637f55d30SSuresh Warrier {
387f725758bSPaul Mackerras 	long ret = 0;
388f725758bSPaul Mackerras 	long rc;
389f725758bSPaul Mackerras 	bool again;
390f725758bSPaul Mackerras 
391f725758bSPaul Mackerras 	do {
392f725758bSPaul Mackerras 		again = false;
393f725758bSPaul Mackerras 		rc = kvmppc_read_one_intr(&again);
394f725758bSPaul Mackerras 		if (rc && (ret == 0 || rc > ret))
395f725758bSPaul Mackerras 			ret = rc;
396f725758bSPaul Mackerras 	} while (again);
397f725758bSPaul Mackerras 	return ret;
398f725758bSPaul Mackerras }
399f725758bSPaul Mackerras 
400f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again)
401f725758bSPaul Mackerras {
40237f55d30SSuresh Warrier 	unsigned long xics_phys;
40337f55d30SSuresh Warrier 	u32 h_xirr;
40437f55d30SSuresh Warrier 	__be32 xirr;
40537f55d30SSuresh Warrier 	u32 xisr;
40637f55d30SSuresh Warrier 	u8 host_ipi;
407f725758bSPaul Mackerras 	int64_t rc;
40837f55d30SSuresh Warrier 
40937f55d30SSuresh Warrier 	/* see if a host IPI is pending */
41037f55d30SSuresh Warrier 	host_ipi = local_paca->kvm_hstate.host_ipi;
41137f55d30SSuresh Warrier 	if (host_ipi)
41237f55d30SSuresh Warrier 		return 1;
41337f55d30SSuresh Warrier 
41437f55d30SSuresh Warrier 	/* Now read the interrupt from the ICP */
41537f55d30SSuresh Warrier 	xics_phys = local_paca->kvm_hstate.xics_phys;
416f725758bSPaul Mackerras 	if (!xics_phys) {
417f725758bSPaul Mackerras 		/* Use OPAL to read the XIRR */
418f725758bSPaul Mackerras 		rc = opal_rm_int_get_xirr(&xirr, false);
419f725758bSPaul Mackerras 		if (rc < 0)
42037f55d30SSuresh Warrier 			return 1;
421f725758bSPaul Mackerras 	} else {
422f725758bSPaul Mackerras 		xirr = _lwzcix(xics_phys + XICS_XIRR);
423f725758bSPaul Mackerras 	}
42437f55d30SSuresh Warrier 
42537f55d30SSuresh Warrier 	/*
42637f55d30SSuresh Warrier 	 * Save XIRR for later. Since we get control in reverse endian
42737f55d30SSuresh Warrier 	 * on LE systems, save it byte reversed and fetch it back in
42837f55d30SSuresh Warrier 	 * host endian. Note that xirr is the value read from the
42937f55d30SSuresh Warrier 	 * XIRR register, while h_xirr is the host endian version.
43037f55d30SSuresh Warrier 	 */
43137f55d30SSuresh Warrier 	h_xirr = be32_to_cpu(xirr);
43237f55d30SSuresh Warrier 	local_paca->kvm_hstate.saved_xirr = h_xirr;
43337f55d30SSuresh Warrier 	xisr = h_xirr & 0xffffff;
43437f55d30SSuresh Warrier 	/*
43537f55d30SSuresh Warrier 	 * Ensure that the store/load complete to guarantee all side
43637f55d30SSuresh Warrier 	 * effects of loading from XIRR has completed
43737f55d30SSuresh Warrier 	 */
43837f55d30SSuresh Warrier 	smp_mb();
43937f55d30SSuresh Warrier 
44037f55d30SSuresh Warrier 	/* if nothing pending in the ICP */
44137f55d30SSuresh Warrier 	if (!xisr)
44237f55d30SSuresh Warrier 		return 0;
44337f55d30SSuresh Warrier 
44437f55d30SSuresh Warrier 	/* We found something in the ICP...
44537f55d30SSuresh Warrier 	 *
44637f55d30SSuresh Warrier 	 * If it is an IPI, clear the MFRR and EOI it.
44737f55d30SSuresh Warrier 	 */
44837f55d30SSuresh Warrier 	if (xisr == XICS_IPI) {
449f725758bSPaul Mackerras 		if (xics_phys) {
45037f55d30SSuresh Warrier 			_stbcix(xics_phys + XICS_MFRR, 0xff);
45137f55d30SSuresh Warrier 			_stwcix(xics_phys + XICS_XIRR, xirr);
452f725758bSPaul Mackerras 		} else {
453f725758bSPaul Mackerras 			opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
454f725758bSPaul Mackerras 			rc = opal_rm_int_eoi(h_xirr);
455f725758bSPaul Mackerras 			/* If rc > 0, there is another interrupt pending */
456f725758bSPaul Mackerras 			*again = rc > 0;
457f725758bSPaul Mackerras 		}
458f725758bSPaul Mackerras 
45937f55d30SSuresh Warrier 		/*
46037f55d30SSuresh Warrier 		 * Need to ensure side effects of above stores
46137f55d30SSuresh Warrier 		 * complete before proceeding.
46237f55d30SSuresh Warrier 		 */
46337f55d30SSuresh Warrier 		smp_mb();
46437f55d30SSuresh Warrier 
46537f55d30SSuresh Warrier 		/*
46637f55d30SSuresh Warrier 		 * We need to re-check host IPI now in case it got set in the
46737f55d30SSuresh Warrier 		 * meantime. If it's clear, we bounce the interrupt to the
46837f55d30SSuresh Warrier 		 * guest
46937f55d30SSuresh Warrier 		 */
47037f55d30SSuresh Warrier 		host_ipi = local_paca->kvm_hstate.host_ipi;
47137f55d30SSuresh Warrier 		if (unlikely(host_ipi != 0)) {
47237f55d30SSuresh Warrier 			/* We raced with the host,
47337f55d30SSuresh Warrier 			 * we need to resend that IPI, bummer
47437f55d30SSuresh Warrier 			 */
475f725758bSPaul Mackerras 			if (xics_phys)
47637f55d30SSuresh Warrier 				_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
477f725758bSPaul Mackerras 			else
478f725758bSPaul Mackerras 				opal_rm_int_set_mfrr(hard_smp_processor_id(),
479f725758bSPaul Mackerras 						     IPI_PRIORITY);
48037f55d30SSuresh Warrier 			/* Let side effects complete */
48137f55d30SSuresh Warrier 			smp_mb();
48237f55d30SSuresh Warrier 			return 1;
48337f55d30SSuresh Warrier 		}
48437f55d30SSuresh Warrier 
48537f55d30SSuresh Warrier 		/* OK, it's an IPI for us */
48637f55d30SSuresh Warrier 		local_paca->kvm_hstate.saved_xirr = 0;
48737f55d30SSuresh Warrier 		return -1;
48837f55d30SSuresh Warrier 	}
48937f55d30SSuresh Warrier 
490f725758bSPaul Mackerras 	return kvmppc_check_passthru(xisr, xirr, again);
49137f55d30SSuresh Warrier }
492