1aa04b4ccSPaul Mackerras /* 2aa04b4ccSPaul Mackerras * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3aa04b4ccSPaul Mackerras * 4aa04b4ccSPaul Mackerras * This program is free software; you can redistribute it and/or modify 5aa04b4ccSPaul Mackerras * it under the terms of the GNU General Public License, version 2, as 6aa04b4ccSPaul Mackerras * published by the Free Software Foundation. 7aa04b4ccSPaul Mackerras */ 8aa04b4ccSPaul Mackerras 9441c19c8SMichael Ellerman #include <linux/cpu.h> 10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h> 11aa04b4ccSPaul Mackerras #include <linux/preempt.h> 1266b15db6SPaul Gortmaker #include <linux/export.h> 13aa04b4ccSPaul Mackerras #include <linux/sched.h> 14aa04b4ccSPaul Mackerras #include <linux/spinlock.h> 15aa04b4ccSPaul Mackerras #include <linux/init.h> 16fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h> 17fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h> 18fc95ca72SJoonsoo Kim #include <linux/cma.h> 1990fd09f8SSam Bobroff #include <linux/bitops.h> 20aa04b4ccSPaul Mackerras 21aa04b4ccSPaul Mackerras #include <asm/cputable.h> 22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h> 23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h> 24e928e9cbSMichael Ellerman #include <asm/archrandom.h> 25eddb60fbSPaul Mackerras #include <asm/xics.h> 2666feed61SPaul Mackerras #include <asm/dbell.h> 2766feed61SPaul Mackerras #include <asm/cputhreads.h> 2837f55d30SSuresh Warrier #include <asm/io.h> 29aa04b4ccSPaul Mackerras 30fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER 18 31fc95ca72SJoonsoo Kim 32fa61a4e3SAneesh Kumar K.V /* 33fa61a4e3SAneesh Kumar K.V * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 34fa61a4e3SAneesh Kumar K.V * should be power of 2. 35fa61a4e3SAneesh Kumar K.V */ 36fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 37fa61a4e3SAneesh Kumar K.V /* 38fa61a4e3SAneesh Kumar K.V * By default we reserve 5% of memory for hash pagetable allocation. 39fa61a4e3SAneesh Kumar K.V */ 40fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5; 41aa04b4ccSPaul Mackerras 42fc95ca72SJoonsoo Kim static struct cma *kvm_cma; 43fc95ca72SJoonsoo Kim 44fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p) 45d2a1b483SAlexander Graf { 46fa61a4e3SAneesh Kumar K.V pr_debug("%s(%s)\n", __func__, p); 47d2a1b483SAlexander Graf if (!p) 48fa61a4e3SAneesh Kumar K.V return -EINVAL; 49fa61a4e3SAneesh Kumar K.V return kstrtoul(p, 0, &kvm_cma_resv_ratio); 50d2a1b483SAlexander Graf } 51fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 52d2a1b483SAlexander Graf 53fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages) 54d2a1b483SAlexander Graf { 55c04fa583SAlexey Kardashevskiy VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 56fc95ca72SJoonsoo Kim 57c17b98cfSPaul Mackerras return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 58d2a1b483SAlexander Graf } 59d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 60d2a1b483SAlexander Graf 61fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages) 62d2a1b483SAlexander Graf { 63fc95ca72SJoonsoo Kim cma_release(kvm_cma, page, nr_pages); 64d2a1b483SAlexander Graf } 65d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt); 66d2a1b483SAlexander Graf 67fa61a4e3SAneesh Kumar K.V /** 68fa61a4e3SAneesh Kumar K.V * kvm_cma_reserve() - reserve area for kvm hash pagetable 69fa61a4e3SAneesh Kumar K.V * 70fa61a4e3SAneesh Kumar K.V * This function reserves memory from early allocator. It should be 7114ed7409SAnton Blanchard * called by arch specific code once the memblock allocator 72fa61a4e3SAneesh Kumar K.V * has been activated and all other subsystems have already allocated/reserved 73fa61a4e3SAneesh Kumar K.V * memory. 74fa61a4e3SAneesh Kumar K.V */ 75fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void) 76fa61a4e3SAneesh Kumar K.V { 77fa61a4e3SAneesh Kumar K.V unsigned long align_size; 78fa61a4e3SAneesh Kumar K.V struct memblock_region *reg; 79fa61a4e3SAneesh Kumar K.V phys_addr_t selected_size = 0; 80cec26bc3SAneesh Kumar K.V 81cec26bc3SAneesh Kumar K.V /* 82cec26bc3SAneesh Kumar K.V * We need CMA reservation only when we are in HV mode 83cec26bc3SAneesh Kumar K.V */ 84cec26bc3SAneesh Kumar K.V if (!cpu_has_feature(CPU_FTR_HVMODE)) 85cec26bc3SAneesh Kumar K.V return; 86fa61a4e3SAneesh Kumar K.V /* 87fa61a4e3SAneesh Kumar K.V * We cannot use memblock_phys_mem_size() here, because 88fa61a4e3SAneesh Kumar K.V * memblock_analyze() has not been called yet. 89fa61a4e3SAneesh Kumar K.V */ 90fa61a4e3SAneesh Kumar K.V for_each_memblock(memory, reg) 91fa61a4e3SAneesh Kumar K.V selected_size += memblock_region_memory_end_pfn(reg) - 92fa61a4e3SAneesh Kumar K.V memblock_region_memory_base_pfn(reg); 93fa61a4e3SAneesh Kumar K.V 94fa61a4e3SAneesh Kumar K.V selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 95fa61a4e3SAneesh Kumar K.V if (selected_size) { 96fa61a4e3SAneesh Kumar K.V pr_debug("%s: reserving %ld MiB for global area\n", __func__, 97fa61a4e3SAneesh Kumar K.V (unsigned long)selected_size / SZ_1M); 98fa61a4e3SAneesh Kumar K.V align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 99c1f733aaSJoonsoo Kim cma_declare_contiguous(0, selected_size, 0, align_size, 100c1f733aaSJoonsoo Kim KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 101fa61a4e3SAneesh Kumar K.V } 102fa61a4e3SAneesh Kumar K.V } 103441c19c8SMichael Ellerman 104441c19c8SMichael Ellerman /* 10590fd09f8SSam Bobroff * Real-mode H_CONFER implementation. 10690fd09f8SSam Bobroff * We check if we are the only vcpu out of this virtual core 10790fd09f8SSam Bobroff * still running in the guest and not ceded. If so, we pop up 10890fd09f8SSam Bobroff * to the virtual-mode implementation; if not, just return to 10990fd09f8SSam Bobroff * the guest. 11090fd09f8SSam Bobroff */ 11190fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 11290fd09f8SSam Bobroff unsigned int yield_count) 11390fd09f8SSam Bobroff { 114ec257165SPaul Mackerras struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 115ec257165SPaul Mackerras int ptid = local_paca->kvm_hstate.ptid; 11690fd09f8SSam Bobroff int threads_running; 11790fd09f8SSam Bobroff int threads_ceded; 11890fd09f8SSam Bobroff int threads_conferring; 11990fd09f8SSam Bobroff u64 stop = get_tb() + 10 * tb_ticks_per_usec; 12090fd09f8SSam Bobroff int rv = H_SUCCESS; /* => don't yield */ 12190fd09f8SSam Bobroff 122ec257165SPaul Mackerras set_bit(ptid, &vc->conferring_threads); 1237d6c40daSPaul Mackerras while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 1247d6c40daSPaul Mackerras threads_running = VCORE_ENTRY_MAP(vc); 1257d6c40daSPaul Mackerras threads_ceded = vc->napping_threads; 1267d6c40daSPaul Mackerras threads_conferring = vc->conferring_threads; 1277d6c40daSPaul Mackerras if ((threads_ceded | threads_conferring) == threads_running) { 12890fd09f8SSam Bobroff rv = H_TOO_HARD; /* => do yield */ 12990fd09f8SSam Bobroff break; 13090fd09f8SSam Bobroff } 13190fd09f8SSam Bobroff } 132ec257165SPaul Mackerras clear_bit(ptid, &vc->conferring_threads); 13390fd09f8SSam Bobroff return rv; 13490fd09f8SSam Bobroff } 13590fd09f8SSam Bobroff 13690fd09f8SSam Bobroff /* 137441c19c8SMichael Ellerman * When running HV mode KVM we need to block certain operations while KVM VMs 138441c19c8SMichael Ellerman * exist in the system. We use a counter of VMs to track this. 139441c19c8SMichael Ellerman * 140441c19c8SMichael Ellerman * One of the operations we need to block is onlining of secondaries, so we 141441c19c8SMichael Ellerman * protect hv_vm_count with get/put_online_cpus(). 142441c19c8SMichael Ellerman */ 143441c19c8SMichael Ellerman static atomic_t hv_vm_count; 144441c19c8SMichael Ellerman 145441c19c8SMichael Ellerman void kvm_hv_vm_activated(void) 146441c19c8SMichael Ellerman { 147441c19c8SMichael Ellerman get_online_cpus(); 148441c19c8SMichael Ellerman atomic_inc(&hv_vm_count); 149441c19c8SMichael Ellerman put_online_cpus(); 150441c19c8SMichael Ellerman } 151441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 152441c19c8SMichael Ellerman 153441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void) 154441c19c8SMichael Ellerman { 155441c19c8SMichael Ellerman get_online_cpus(); 156441c19c8SMichael Ellerman atomic_dec(&hv_vm_count); 157441c19c8SMichael Ellerman put_online_cpus(); 158441c19c8SMichael Ellerman } 159441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 160441c19c8SMichael Ellerman 161441c19c8SMichael Ellerman bool kvm_hv_mode_active(void) 162441c19c8SMichael Ellerman { 163441c19c8SMichael Ellerman return atomic_read(&hv_vm_count) != 0; 164441c19c8SMichael Ellerman } 165ae2113a4SPaul Mackerras 166ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[]; 167ae2113a4SPaul Mackerras 168ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 169ae2113a4SPaul Mackerras { 170ae2113a4SPaul Mackerras cmd /= 4; 171ae2113a4SPaul Mackerras if (cmd < hcall_real_table_end - hcall_real_table && 172ae2113a4SPaul Mackerras hcall_real_table[cmd]) 173ae2113a4SPaul Mackerras return 1; 174ae2113a4SPaul Mackerras 175ae2113a4SPaul Mackerras return 0; 176ae2113a4SPaul Mackerras } 177ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 178e928e9cbSMichael Ellerman 179e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void) 180e928e9cbSMichael Ellerman { 181e928e9cbSMichael Ellerman return powernv_hwrng_present(); 182e928e9cbSMichael Ellerman } 183e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 184e928e9cbSMichael Ellerman 185e928e9cbSMichael Ellerman long kvmppc_h_random(struct kvm_vcpu *vcpu) 186e928e9cbSMichael Ellerman { 187e928e9cbSMichael Ellerman if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 188e928e9cbSMichael Ellerman return H_SUCCESS; 189e928e9cbSMichael Ellerman 190e928e9cbSMichael Ellerman return H_HARDWARE; 191e928e9cbSMichael Ellerman } 192eddb60fbSPaul Mackerras 193eddb60fbSPaul Mackerras static inline void rm_writeb(unsigned long paddr, u8 val) 194eddb60fbSPaul Mackerras { 195eddb60fbSPaul Mackerras __asm__ __volatile__("stbcix %0,0,%1" 196eddb60fbSPaul Mackerras : : "r" (val), "r" (paddr) : "memory"); 197eddb60fbSPaul Mackerras } 198eddb60fbSPaul Mackerras 199eddb60fbSPaul Mackerras /* 20066feed61SPaul Mackerras * Send an interrupt or message to another CPU. 201eddb60fbSPaul Mackerras * This can only be called in real mode. 202eddb60fbSPaul Mackerras * The caller needs to include any barrier needed to order writes 203eddb60fbSPaul Mackerras * to memory vs. the IPI/message. 204eddb60fbSPaul Mackerras */ 205eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu) 206eddb60fbSPaul Mackerras { 207eddb60fbSPaul Mackerras unsigned long xics_phys; 208eddb60fbSPaul Mackerras 20966feed61SPaul Mackerras /* On POWER8 for IPIs to threads in the same core, use msgsnd */ 21066feed61SPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_207S) && 21166feed61SPaul Mackerras cpu_first_thread_sibling(cpu) == 21266feed61SPaul Mackerras cpu_first_thread_sibling(raw_smp_processor_id())) { 21366feed61SPaul Mackerras unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 21466feed61SPaul Mackerras msg |= cpu_thread_in_core(cpu); 21566feed61SPaul Mackerras __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 21666feed61SPaul Mackerras return; 21766feed61SPaul Mackerras } 21866feed61SPaul Mackerras 21966feed61SPaul Mackerras /* Else poke the target with an IPI */ 220eddb60fbSPaul Mackerras xics_phys = paca[cpu].kvm_hstate.xics_phys; 221eddb60fbSPaul Mackerras rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 222eddb60fbSPaul Mackerras } 223eddb60fbSPaul Mackerras 224eddb60fbSPaul Mackerras /* 225eddb60fbSPaul Mackerras * The following functions are called from the assembly code 226eddb60fbSPaul Mackerras * in book3s_hv_rmhandlers.S. 227eddb60fbSPaul Mackerras */ 228eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 229eddb60fbSPaul Mackerras { 230eddb60fbSPaul Mackerras int cpu = vc->pcpu; 231eddb60fbSPaul Mackerras 232eddb60fbSPaul Mackerras /* Order setting of exit map vs. msgsnd/IPI */ 233eddb60fbSPaul Mackerras smp_mb(); 234eddb60fbSPaul Mackerras for (; active; active >>= 1, ++cpu) 235eddb60fbSPaul Mackerras if (active & 1) 236eddb60fbSPaul Mackerras kvmhv_rm_send_ipi(cpu); 237eddb60fbSPaul Mackerras } 238eddb60fbSPaul Mackerras 239eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap) 240eddb60fbSPaul Mackerras { 241eddb60fbSPaul Mackerras struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 242eddb60fbSPaul Mackerras int ptid = local_paca->kvm_hstate.ptid; 243b4deba5cSPaul Mackerras struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 244b4deba5cSPaul Mackerras int me, ee, i; 245eddb60fbSPaul Mackerras 246eddb60fbSPaul Mackerras /* Set our bit in the threads-exiting-guest map in the 0xff00 247eddb60fbSPaul Mackerras bits of vcore->entry_exit_map */ 248eddb60fbSPaul Mackerras me = 0x100 << ptid; 249eddb60fbSPaul Mackerras do { 250eddb60fbSPaul Mackerras ee = vc->entry_exit_map; 251eddb60fbSPaul Mackerras } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 252eddb60fbSPaul Mackerras 253eddb60fbSPaul Mackerras /* Are we the first here? */ 254eddb60fbSPaul Mackerras if ((ee >> 8) != 0) 255eddb60fbSPaul Mackerras return; 256eddb60fbSPaul Mackerras 257eddb60fbSPaul Mackerras /* 258eddb60fbSPaul Mackerras * Trigger the other threads in this vcore to exit the guest. 259eddb60fbSPaul Mackerras * If this is a hypervisor decrementer interrupt then they 260eddb60fbSPaul Mackerras * will be already on their way out of the guest. 261eddb60fbSPaul Mackerras */ 262eddb60fbSPaul Mackerras if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 263eddb60fbSPaul Mackerras kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 264b4deba5cSPaul Mackerras 265b4deba5cSPaul Mackerras /* 266b4deba5cSPaul Mackerras * If we are doing dynamic micro-threading, interrupt the other 267b4deba5cSPaul Mackerras * subcores to pull them out of their guests too. 268b4deba5cSPaul Mackerras */ 269b4deba5cSPaul Mackerras if (!sip) 270b4deba5cSPaul Mackerras return; 271b4deba5cSPaul Mackerras 272b4deba5cSPaul Mackerras for (i = 0; i < MAX_SUBCORES; ++i) { 273b4deba5cSPaul Mackerras vc = sip->master_vcs[i]; 274b4deba5cSPaul Mackerras if (!vc) 275b4deba5cSPaul Mackerras break; 276b4deba5cSPaul Mackerras do { 277b4deba5cSPaul Mackerras ee = vc->entry_exit_map; 278b4deba5cSPaul Mackerras /* Already asked to exit? */ 279b4deba5cSPaul Mackerras if ((ee >> 8) != 0) 280b4deba5cSPaul Mackerras break; 281b4deba5cSPaul Mackerras } while (cmpxchg(&vc->entry_exit_map, ee, 282b4deba5cSPaul Mackerras ee | VCORE_EXIT_REQ) != ee); 283b4deba5cSPaul Mackerras if ((ee >> 8) == 0) 284b4deba5cSPaul Mackerras kvmhv_interrupt_vcore(vc, ee); 285b4deba5cSPaul Mackerras } 286eddb60fbSPaul Mackerras } 28779b6c247SSuresh Warrier 28879b6c247SSuresh Warrier struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 28979b6c247SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 29037f55d30SSuresh Warrier 291e3c13e56SSuresh Warrier #ifdef CONFIG_KVM_XICS 292e3c13e56SSuresh Warrier static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 293e3c13e56SSuresh Warrier u32 xisr) 294e3c13e56SSuresh Warrier { 295e3c13e56SSuresh Warrier int i; 296e3c13e56SSuresh Warrier 297e3c13e56SSuresh Warrier /* 298e3c13e56SSuresh Warrier * We access the mapped array here without a lock. That 299e3c13e56SSuresh Warrier * is safe because we never reduce the number of entries 300e3c13e56SSuresh Warrier * in the array and we never change the v_hwirq field of 301e3c13e56SSuresh Warrier * an entry once it is set. 302e3c13e56SSuresh Warrier * 303e3c13e56SSuresh Warrier * We have also carefully ordered the stores in the writer 304e3c13e56SSuresh Warrier * and the loads here in the reader, so that if we find a matching 305e3c13e56SSuresh Warrier * hwirq here, the associated GSI and irq_desc fields are valid. 306e3c13e56SSuresh Warrier */ 307e3c13e56SSuresh Warrier for (i = 0; i < pimap->n_mapped; i++) { 308e3c13e56SSuresh Warrier if (xisr == pimap->mapped[i].r_hwirq) { 309e3c13e56SSuresh Warrier /* 310e3c13e56SSuresh Warrier * Order subsequent reads in the caller to serialize 311e3c13e56SSuresh Warrier * with the writer. 312e3c13e56SSuresh Warrier */ 313e3c13e56SSuresh Warrier smp_rmb(); 314e3c13e56SSuresh Warrier return &pimap->mapped[i]; 315e3c13e56SSuresh Warrier } 316e3c13e56SSuresh Warrier } 317e3c13e56SSuresh Warrier return NULL; 318e3c13e56SSuresh Warrier } 319e3c13e56SSuresh Warrier 320e3c13e56SSuresh Warrier /* 321e3c13e56SSuresh Warrier * If we have an interrupt that's not an IPI, check if we have a 322e3c13e56SSuresh Warrier * passthrough adapter and if so, check if this external interrupt 323e3c13e56SSuresh Warrier * is for the adapter. 324e3c13e56SSuresh Warrier * We will attempt to deliver the IRQ directly to the target VCPU's 325e3c13e56SSuresh Warrier * ICP, the virtual ICP (based on affinity - the xive value in ICS). 326e3c13e56SSuresh Warrier * 327e3c13e56SSuresh Warrier * If the delivery fails or if this is not for a passthrough adapter, 328e3c13e56SSuresh Warrier * return to the host to handle this interrupt. We earlier 329e3c13e56SSuresh Warrier * saved a copy of the XIRR in the PACA, it will be picked up by 330e3c13e56SSuresh Warrier * the host ICP driver. 331e3c13e56SSuresh Warrier */ 332e3c13e56SSuresh Warrier static int kvmppc_check_passthru(u32 xisr, __be32 xirr) 333e3c13e56SSuresh Warrier { 334e3c13e56SSuresh Warrier struct kvmppc_passthru_irqmap *pimap; 335e3c13e56SSuresh Warrier struct kvmppc_irq_map *irq_map; 336e3c13e56SSuresh Warrier struct kvm_vcpu *vcpu; 337e3c13e56SSuresh Warrier 338e3c13e56SSuresh Warrier vcpu = local_paca->kvm_hstate.kvm_vcpu; 339e3c13e56SSuresh Warrier if (!vcpu) 340e3c13e56SSuresh Warrier return 1; 341e3c13e56SSuresh Warrier pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 342e3c13e56SSuresh Warrier if (!pimap) 343e3c13e56SSuresh Warrier return 1; 344e3c13e56SSuresh Warrier irq_map = get_irqmap(pimap, xisr); 345e3c13e56SSuresh Warrier if (!irq_map) 346e3c13e56SSuresh Warrier return 1; 347e3c13e56SSuresh Warrier 348e3c13e56SSuresh Warrier /* We're handling this interrupt, generic code doesn't need to */ 349e3c13e56SSuresh Warrier local_paca->kvm_hstate.saved_xirr = 0; 350e3c13e56SSuresh Warrier 351e3c13e56SSuresh Warrier return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap); 352e3c13e56SSuresh Warrier } 353e3c13e56SSuresh Warrier 354e3c13e56SSuresh Warrier #else 355e3c13e56SSuresh Warrier static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr) 356e3c13e56SSuresh Warrier { 357e3c13e56SSuresh Warrier return 1; 358e3c13e56SSuresh Warrier } 359e3c13e56SSuresh Warrier #endif 360e3c13e56SSuresh Warrier 36137f55d30SSuresh Warrier /* 36237f55d30SSuresh Warrier * Determine what sort of external interrupt is pending (if any). 36337f55d30SSuresh Warrier * Returns: 36437f55d30SSuresh Warrier * 0 if no interrupt is pending 36537f55d30SSuresh Warrier * 1 if an interrupt is pending that needs to be handled by the host 366f7af5209SSuresh Warrier * 2 Passthrough that needs completion in the host 36737f55d30SSuresh Warrier * -1 if there was a guest wakeup IPI (which has now been cleared) 368e3c13e56SSuresh Warrier * -2 if there is PCI passthrough external interrupt that was handled 36937f55d30SSuresh Warrier */ 37037f55d30SSuresh Warrier 37137f55d30SSuresh Warrier long kvmppc_read_intr(void) 37237f55d30SSuresh Warrier { 37337f55d30SSuresh Warrier unsigned long xics_phys; 37437f55d30SSuresh Warrier u32 h_xirr; 37537f55d30SSuresh Warrier __be32 xirr; 37637f55d30SSuresh Warrier u32 xisr; 37737f55d30SSuresh Warrier u8 host_ipi; 37837f55d30SSuresh Warrier 37937f55d30SSuresh Warrier /* see if a host IPI is pending */ 38037f55d30SSuresh Warrier host_ipi = local_paca->kvm_hstate.host_ipi; 38137f55d30SSuresh Warrier if (host_ipi) 38237f55d30SSuresh Warrier return 1; 38337f55d30SSuresh Warrier 38437f55d30SSuresh Warrier /* Now read the interrupt from the ICP */ 38537f55d30SSuresh Warrier xics_phys = local_paca->kvm_hstate.xics_phys; 38637f55d30SSuresh Warrier if (unlikely(!xics_phys)) 38737f55d30SSuresh Warrier return 1; 38837f55d30SSuresh Warrier 38937f55d30SSuresh Warrier /* 39037f55d30SSuresh Warrier * Save XIRR for later. Since we get control in reverse endian 39137f55d30SSuresh Warrier * on LE systems, save it byte reversed and fetch it back in 39237f55d30SSuresh Warrier * host endian. Note that xirr is the value read from the 39337f55d30SSuresh Warrier * XIRR register, while h_xirr is the host endian version. 39437f55d30SSuresh Warrier */ 39537f55d30SSuresh Warrier xirr = _lwzcix(xics_phys + XICS_XIRR); 39637f55d30SSuresh Warrier h_xirr = be32_to_cpu(xirr); 39737f55d30SSuresh Warrier local_paca->kvm_hstate.saved_xirr = h_xirr; 39837f55d30SSuresh Warrier xisr = h_xirr & 0xffffff; 39937f55d30SSuresh Warrier /* 40037f55d30SSuresh Warrier * Ensure that the store/load complete to guarantee all side 40137f55d30SSuresh Warrier * effects of loading from XIRR has completed 40237f55d30SSuresh Warrier */ 40337f55d30SSuresh Warrier smp_mb(); 40437f55d30SSuresh Warrier 40537f55d30SSuresh Warrier /* if nothing pending in the ICP */ 40637f55d30SSuresh Warrier if (!xisr) 40737f55d30SSuresh Warrier return 0; 40837f55d30SSuresh Warrier 40937f55d30SSuresh Warrier /* We found something in the ICP... 41037f55d30SSuresh Warrier * 41137f55d30SSuresh Warrier * If it is an IPI, clear the MFRR and EOI it. 41237f55d30SSuresh Warrier */ 41337f55d30SSuresh Warrier if (xisr == XICS_IPI) { 41437f55d30SSuresh Warrier _stbcix(xics_phys + XICS_MFRR, 0xff); 41537f55d30SSuresh Warrier _stwcix(xics_phys + XICS_XIRR, xirr); 41637f55d30SSuresh Warrier /* 41737f55d30SSuresh Warrier * Need to ensure side effects of above stores 41837f55d30SSuresh Warrier * complete before proceeding. 41937f55d30SSuresh Warrier */ 42037f55d30SSuresh Warrier smp_mb(); 42137f55d30SSuresh Warrier 42237f55d30SSuresh Warrier /* 42337f55d30SSuresh Warrier * We need to re-check host IPI now in case it got set in the 42437f55d30SSuresh Warrier * meantime. If it's clear, we bounce the interrupt to the 42537f55d30SSuresh Warrier * guest 42637f55d30SSuresh Warrier */ 42737f55d30SSuresh Warrier host_ipi = local_paca->kvm_hstate.host_ipi; 42837f55d30SSuresh Warrier if (unlikely(host_ipi != 0)) { 42937f55d30SSuresh Warrier /* We raced with the host, 43037f55d30SSuresh Warrier * we need to resend that IPI, bummer 43137f55d30SSuresh Warrier */ 43237f55d30SSuresh Warrier _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 43337f55d30SSuresh Warrier /* Let side effects complete */ 43437f55d30SSuresh Warrier smp_mb(); 43537f55d30SSuresh Warrier return 1; 43637f55d30SSuresh Warrier } 43737f55d30SSuresh Warrier 43837f55d30SSuresh Warrier /* OK, it's an IPI for us */ 43937f55d30SSuresh Warrier local_paca->kvm_hstate.saved_xirr = 0; 44037f55d30SSuresh Warrier return -1; 44137f55d30SSuresh Warrier } 44237f55d30SSuresh Warrier 443e3c13e56SSuresh Warrier return kvmppc_check_passthru(xisr, xirr); 44437f55d30SSuresh Warrier } 445