1aa04b4ccSPaul Mackerras /* 2aa04b4ccSPaul Mackerras * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3aa04b4ccSPaul Mackerras * 4aa04b4ccSPaul Mackerras * This program is free software; you can redistribute it and/or modify 5aa04b4ccSPaul Mackerras * it under the terms of the GNU General Public License, version 2, as 6aa04b4ccSPaul Mackerras * published by the Free Software Foundation. 7aa04b4ccSPaul Mackerras */ 8aa04b4ccSPaul Mackerras 9441c19c8SMichael Ellerman #include <linux/cpu.h> 10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h> 11aa04b4ccSPaul Mackerras #include <linux/preempt.h> 1266b15db6SPaul Gortmaker #include <linux/export.h> 13aa04b4ccSPaul Mackerras #include <linux/sched.h> 14aa04b4ccSPaul Mackerras #include <linux/spinlock.h> 15aa04b4ccSPaul Mackerras #include <linux/init.h> 16fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h> 17fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h> 18fc95ca72SJoonsoo Kim #include <linux/cma.h> 1990fd09f8SSam Bobroff #include <linux/bitops.h> 20aa04b4ccSPaul Mackerras 21aa04b4ccSPaul Mackerras #include <asm/cputable.h> 22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h> 23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h> 24e928e9cbSMichael Ellerman #include <asm/archrandom.h> 25eddb60fbSPaul Mackerras #include <asm/xics.h> 2666feed61SPaul Mackerras #include <asm/dbell.h> 2766feed61SPaul Mackerras #include <asm/cputhreads.h> 2837f55d30SSuresh Warrier #include <asm/io.h> 29f725758bSPaul Mackerras #include <asm/opal.h> 30e2702871SPaul Mackerras #include <asm/smp.h> 31aa04b4ccSPaul Mackerras 3253af3ba2SPaul Mackerras static bool in_realmode(void) 3353af3ba2SPaul Mackerras { 3453af3ba2SPaul Mackerras return !(mfmsr() & MSR_IR); 3553af3ba2SPaul Mackerras } 3653af3ba2SPaul Mackerras 37fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER 18 38fc95ca72SJoonsoo Kim 39fa61a4e3SAneesh Kumar K.V /* 40fa61a4e3SAneesh Kumar K.V * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 41fa61a4e3SAneesh Kumar K.V * should be power of 2. 42fa61a4e3SAneesh Kumar K.V */ 43fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 44fa61a4e3SAneesh Kumar K.V /* 45fa61a4e3SAneesh Kumar K.V * By default we reserve 5% of memory for hash pagetable allocation. 46fa61a4e3SAneesh Kumar K.V */ 47fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5; 48aa04b4ccSPaul Mackerras 49fc95ca72SJoonsoo Kim static struct cma *kvm_cma; 50fc95ca72SJoonsoo Kim 51fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p) 52d2a1b483SAlexander Graf { 53fa61a4e3SAneesh Kumar K.V pr_debug("%s(%s)\n", __func__, p); 54d2a1b483SAlexander Graf if (!p) 55fa61a4e3SAneesh Kumar K.V return -EINVAL; 56fa61a4e3SAneesh Kumar K.V return kstrtoul(p, 0, &kvm_cma_resv_ratio); 57d2a1b483SAlexander Graf } 58fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 59d2a1b483SAlexander Graf 60fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages) 61d2a1b483SAlexander Graf { 62c04fa583SAlexey Kardashevskiy VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 63fc95ca72SJoonsoo Kim 64c17b98cfSPaul Mackerras return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 65d2a1b483SAlexander Graf } 66d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 67d2a1b483SAlexander Graf 68fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages) 69d2a1b483SAlexander Graf { 70fc95ca72SJoonsoo Kim cma_release(kvm_cma, page, nr_pages); 71d2a1b483SAlexander Graf } 72d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt); 73d2a1b483SAlexander Graf 74fa61a4e3SAneesh Kumar K.V /** 75fa61a4e3SAneesh Kumar K.V * kvm_cma_reserve() - reserve area for kvm hash pagetable 76fa61a4e3SAneesh Kumar K.V * 77fa61a4e3SAneesh Kumar K.V * This function reserves memory from early allocator. It should be 7814ed7409SAnton Blanchard * called by arch specific code once the memblock allocator 79fa61a4e3SAneesh Kumar K.V * has been activated and all other subsystems have already allocated/reserved 80fa61a4e3SAneesh Kumar K.V * memory. 81fa61a4e3SAneesh Kumar K.V */ 82fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void) 83fa61a4e3SAneesh Kumar K.V { 84fa61a4e3SAneesh Kumar K.V unsigned long align_size; 85fa61a4e3SAneesh Kumar K.V struct memblock_region *reg; 86fa61a4e3SAneesh Kumar K.V phys_addr_t selected_size = 0; 87cec26bc3SAneesh Kumar K.V 88cec26bc3SAneesh Kumar K.V /* 89cec26bc3SAneesh Kumar K.V * We need CMA reservation only when we are in HV mode 90cec26bc3SAneesh Kumar K.V */ 91cec26bc3SAneesh Kumar K.V if (!cpu_has_feature(CPU_FTR_HVMODE)) 92cec26bc3SAneesh Kumar K.V return; 93fa61a4e3SAneesh Kumar K.V /* 94fa61a4e3SAneesh Kumar K.V * We cannot use memblock_phys_mem_size() here, because 95fa61a4e3SAneesh Kumar K.V * memblock_analyze() has not been called yet. 96fa61a4e3SAneesh Kumar K.V */ 97fa61a4e3SAneesh Kumar K.V for_each_memblock(memory, reg) 98fa61a4e3SAneesh Kumar K.V selected_size += memblock_region_memory_end_pfn(reg) - 99fa61a4e3SAneesh Kumar K.V memblock_region_memory_base_pfn(reg); 100fa61a4e3SAneesh Kumar K.V 101fa61a4e3SAneesh Kumar K.V selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 102fa61a4e3SAneesh Kumar K.V if (selected_size) { 103fa61a4e3SAneesh Kumar K.V pr_debug("%s: reserving %ld MiB for global area\n", __func__, 104fa61a4e3SAneesh Kumar K.V (unsigned long)selected_size / SZ_1M); 105fa61a4e3SAneesh Kumar K.V align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 106c1f733aaSJoonsoo Kim cma_declare_contiguous(0, selected_size, 0, align_size, 107c1f733aaSJoonsoo Kim KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 108fa61a4e3SAneesh Kumar K.V } 109fa61a4e3SAneesh Kumar K.V } 110441c19c8SMichael Ellerman 111441c19c8SMichael Ellerman /* 11290fd09f8SSam Bobroff * Real-mode H_CONFER implementation. 11390fd09f8SSam Bobroff * We check if we are the only vcpu out of this virtual core 11490fd09f8SSam Bobroff * still running in the guest and not ceded. If so, we pop up 11590fd09f8SSam Bobroff * to the virtual-mode implementation; if not, just return to 11690fd09f8SSam Bobroff * the guest. 11790fd09f8SSam Bobroff */ 11890fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 11990fd09f8SSam Bobroff unsigned int yield_count) 12090fd09f8SSam Bobroff { 121ec257165SPaul Mackerras struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 122ec257165SPaul Mackerras int ptid = local_paca->kvm_hstate.ptid; 12390fd09f8SSam Bobroff int threads_running; 12490fd09f8SSam Bobroff int threads_ceded; 12590fd09f8SSam Bobroff int threads_conferring; 12690fd09f8SSam Bobroff u64 stop = get_tb() + 10 * tb_ticks_per_usec; 12790fd09f8SSam Bobroff int rv = H_SUCCESS; /* => don't yield */ 12890fd09f8SSam Bobroff 129ec257165SPaul Mackerras set_bit(ptid, &vc->conferring_threads); 1307d6c40daSPaul Mackerras while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 1317d6c40daSPaul Mackerras threads_running = VCORE_ENTRY_MAP(vc); 1327d6c40daSPaul Mackerras threads_ceded = vc->napping_threads; 1337d6c40daSPaul Mackerras threads_conferring = vc->conferring_threads; 1347d6c40daSPaul Mackerras if ((threads_ceded | threads_conferring) == threads_running) { 13590fd09f8SSam Bobroff rv = H_TOO_HARD; /* => do yield */ 13690fd09f8SSam Bobroff break; 13790fd09f8SSam Bobroff } 13890fd09f8SSam Bobroff } 139ec257165SPaul Mackerras clear_bit(ptid, &vc->conferring_threads); 14090fd09f8SSam Bobroff return rv; 14190fd09f8SSam Bobroff } 14290fd09f8SSam Bobroff 14390fd09f8SSam Bobroff /* 144441c19c8SMichael Ellerman * When running HV mode KVM we need to block certain operations while KVM VMs 145441c19c8SMichael Ellerman * exist in the system. We use a counter of VMs to track this. 146441c19c8SMichael Ellerman * 147441c19c8SMichael Ellerman * One of the operations we need to block is onlining of secondaries, so we 148441c19c8SMichael Ellerman * protect hv_vm_count with get/put_online_cpus(). 149441c19c8SMichael Ellerman */ 150441c19c8SMichael Ellerman static atomic_t hv_vm_count; 151441c19c8SMichael Ellerman 152441c19c8SMichael Ellerman void kvm_hv_vm_activated(void) 153441c19c8SMichael Ellerman { 154441c19c8SMichael Ellerman get_online_cpus(); 155441c19c8SMichael Ellerman atomic_inc(&hv_vm_count); 156441c19c8SMichael Ellerman put_online_cpus(); 157441c19c8SMichael Ellerman } 158441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 159441c19c8SMichael Ellerman 160441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void) 161441c19c8SMichael Ellerman { 162441c19c8SMichael Ellerman get_online_cpus(); 163441c19c8SMichael Ellerman atomic_dec(&hv_vm_count); 164441c19c8SMichael Ellerman put_online_cpus(); 165441c19c8SMichael Ellerman } 166441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 167441c19c8SMichael Ellerman 168441c19c8SMichael Ellerman bool kvm_hv_mode_active(void) 169441c19c8SMichael Ellerman { 170441c19c8SMichael Ellerman return atomic_read(&hv_vm_count) != 0; 171441c19c8SMichael Ellerman } 172ae2113a4SPaul Mackerras 173ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[]; 174ae2113a4SPaul Mackerras 175ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 176ae2113a4SPaul Mackerras { 177ae2113a4SPaul Mackerras cmd /= 4; 178ae2113a4SPaul Mackerras if (cmd < hcall_real_table_end - hcall_real_table && 179ae2113a4SPaul Mackerras hcall_real_table[cmd]) 180ae2113a4SPaul Mackerras return 1; 181ae2113a4SPaul Mackerras 182ae2113a4SPaul Mackerras return 0; 183ae2113a4SPaul Mackerras } 184ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 185e928e9cbSMichael Ellerman 186e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void) 187e928e9cbSMichael Ellerman { 188e928e9cbSMichael Ellerman return powernv_hwrng_present(); 189e928e9cbSMichael Ellerman } 190e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 191e928e9cbSMichael Ellerman 192e928e9cbSMichael Ellerman long kvmppc_h_random(struct kvm_vcpu *vcpu) 193e928e9cbSMichael Ellerman { 194e928e9cbSMichael Ellerman if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 195e928e9cbSMichael Ellerman return H_SUCCESS; 196e928e9cbSMichael Ellerman 197e928e9cbSMichael Ellerman return H_HARDWARE; 198e928e9cbSMichael Ellerman } 199eddb60fbSPaul Mackerras 200eddb60fbSPaul Mackerras static inline void rm_writeb(unsigned long paddr, u8 val) 201eddb60fbSPaul Mackerras { 202eddb60fbSPaul Mackerras __asm__ __volatile__("stbcix %0,0,%1" 203eddb60fbSPaul Mackerras : : "r" (val), "r" (paddr) : "memory"); 204eddb60fbSPaul Mackerras } 205eddb60fbSPaul Mackerras 206eddb60fbSPaul Mackerras /* 20766feed61SPaul Mackerras * Send an interrupt or message to another CPU. 208eddb60fbSPaul Mackerras * The caller needs to include any barrier needed to order writes 209eddb60fbSPaul Mackerras * to memory vs. the IPI/message. 210eddb60fbSPaul Mackerras */ 211eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu) 212eddb60fbSPaul Mackerras { 213eddb60fbSPaul Mackerras unsigned long xics_phys; 2141704a81cSPaul Mackerras unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 215eddb60fbSPaul Mackerras 2161704a81cSPaul Mackerras /* On POWER9 we can use msgsnd for any destination cpu. */ 2171704a81cSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300)) { 2181704a81cSPaul Mackerras msg |= get_hard_smp_processor_id(cpu); 2191704a81cSPaul Mackerras __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 2201704a81cSPaul Mackerras return; 2211704a81cSPaul Mackerras } 2221704a81cSPaul Mackerras /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 22366feed61SPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_207S) && 22466feed61SPaul Mackerras cpu_first_thread_sibling(cpu) == 22566feed61SPaul Mackerras cpu_first_thread_sibling(raw_smp_processor_id())) { 22666feed61SPaul Mackerras msg |= cpu_thread_in_core(cpu); 22766feed61SPaul Mackerras __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 22866feed61SPaul Mackerras return; 22966feed61SPaul Mackerras } 23066feed61SPaul Mackerras 23166feed61SPaul Mackerras /* Else poke the target with an IPI */ 232eddb60fbSPaul Mackerras xics_phys = paca[cpu].kvm_hstate.xics_phys; 23353af3ba2SPaul Mackerras if (!in_realmode()) 23453af3ba2SPaul Mackerras opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 23553af3ba2SPaul Mackerras else if (xics_phys) 236eddb60fbSPaul Mackerras rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 237f725758bSPaul Mackerras else 238f725758bSPaul Mackerras opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu), 239f725758bSPaul Mackerras IPI_PRIORITY); 240eddb60fbSPaul Mackerras } 241eddb60fbSPaul Mackerras 242eddb60fbSPaul Mackerras /* 243eddb60fbSPaul Mackerras * The following functions are called from the assembly code 244eddb60fbSPaul Mackerras * in book3s_hv_rmhandlers.S. 245eddb60fbSPaul Mackerras */ 246eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 247eddb60fbSPaul Mackerras { 248eddb60fbSPaul Mackerras int cpu = vc->pcpu; 249eddb60fbSPaul Mackerras 250eddb60fbSPaul Mackerras /* Order setting of exit map vs. msgsnd/IPI */ 251eddb60fbSPaul Mackerras smp_mb(); 252eddb60fbSPaul Mackerras for (; active; active >>= 1, ++cpu) 253eddb60fbSPaul Mackerras if (active & 1) 254eddb60fbSPaul Mackerras kvmhv_rm_send_ipi(cpu); 255eddb60fbSPaul Mackerras } 256eddb60fbSPaul Mackerras 257eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap) 258eddb60fbSPaul Mackerras { 259eddb60fbSPaul Mackerras struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 260eddb60fbSPaul Mackerras int ptid = local_paca->kvm_hstate.ptid; 261b4deba5cSPaul Mackerras struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 262b4deba5cSPaul Mackerras int me, ee, i; 263eddb60fbSPaul Mackerras 264eddb60fbSPaul Mackerras /* Set our bit in the threads-exiting-guest map in the 0xff00 265eddb60fbSPaul Mackerras bits of vcore->entry_exit_map */ 266eddb60fbSPaul Mackerras me = 0x100 << ptid; 267eddb60fbSPaul Mackerras do { 268eddb60fbSPaul Mackerras ee = vc->entry_exit_map; 269eddb60fbSPaul Mackerras } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 270eddb60fbSPaul Mackerras 271eddb60fbSPaul Mackerras /* Are we the first here? */ 272eddb60fbSPaul Mackerras if ((ee >> 8) != 0) 273eddb60fbSPaul Mackerras return; 274eddb60fbSPaul Mackerras 275eddb60fbSPaul Mackerras /* 276eddb60fbSPaul Mackerras * Trigger the other threads in this vcore to exit the guest. 277eddb60fbSPaul Mackerras * If this is a hypervisor decrementer interrupt then they 278eddb60fbSPaul Mackerras * will be already on their way out of the guest. 279eddb60fbSPaul Mackerras */ 280eddb60fbSPaul Mackerras if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 281eddb60fbSPaul Mackerras kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 282b4deba5cSPaul Mackerras 283b4deba5cSPaul Mackerras /* 284b4deba5cSPaul Mackerras * If we are doing dynamic micro-threading, interrupt the other 285b4deba5cSPaul Mackerras * subcores to pull them out of their guests too. 286b4deba5cSPaul Mackerras */ 287b4deba5cSPaul Mackerras if (!sip) 288b4deba5cSPaul Mackerras return; 289b4deba5cSPaul Mackerras 290b4deba5cSPaul Mackerras for (i = 0; i < MAX_SUBCORES; ++i) { 291b4deba5cSPaul Mackerras vc = sip->master_vcs[i]; 292b4deba5cSPaul Mackerras if (!vc) 293b4deba5cSPaul Mackerras break; 294b4deba5cSPaul Mackerras do { 295b4deba5cSPaul Mackerras ee = vc->entry_exit_map; 296b4deba5cSPaul Mackerras /* Already asked to exit? */ 297b4deba5cSPaul Mackerras if ((ee >> 8) != 0) 298b4deba5cSPaul Mackerras break; 299b4deba5cSPaul Mackerras } while (cmpxchg(&vc->entry_exit_map, ee, 300b4deba5cSPaul Mackerras ee | VCORE_EXIT_REQ) != ee); 301b4deba5cSPaul Mackerras if ((ee >> 8) == 0) 302b4deba5cSPaul Mackerras kvmhv_interrupt_vcore(vc, ee); 303b4deba5cSPaul Mackerras } 304eddb60fbSPaul Mackerras } 30579b6c247SSuresh Warrier 30679b6c247SSuresh Warrier struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 30779b6c247SSuresh Warrier EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 30837f55d30SSuresh Warrier 309e3c13e56SSuresh Warrier #ifdef CONFIG_KVM_XICS 310e3c13e56SSuresh Warrier static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 311e3c13e56SSuresh Warrier u32 xisr) 312e3c13e56SSuresh Warrier { 313e3c13e56SSuresh Warrier int i; 314e3c13e56SSuresh Warrier 315e3c13e56SSuresh Warrier /* 316e3c13e56SSuresh Warrier * We access the mapped array here without a lock. That 317e3c13e56SSuresh Warrier * is safe because we never reduce the number of entries 318e3c13e56SSuresh Warrier * in the array and we never change the v_hwirq field of 319e3c13e56SSuresh Warrier * an entry once it is set. 320e3c13e56SSuresh Warrier * 321e3c13e56SSuresh Warrier * We have also carefully ordered the stores in the writer 322e3c13e56SSuresh Warrier * and the loads here in the reader, so that if we find a matching 323e3c13e56SSuresh Warrier * hwirq here, the associated GSI and irq_desc fields are valid. 324e3c13e56SSuresh Warrier */ 325e3c13e56SSuresh Warrier for (i = 0; i < pimap->n_mapped; i++) { 326e3c13e56SSuresh Warrier if (xisr == pimap->mapped[i].r_hwirq) { 327e3c13e56SSuresh Warrier /* 328e3c13e56SSuresh Warrier * Order subsequent reads in the caller to serialize 329e3c13e56SSuresh Warrier * with the writer. 330e3c13e56SSuresh Warrier */ 331e3c13e56SSuresh Warrier smp_rmb(); 332e3c13e56SSuresh Warrier return &pimap->mapped[i]; 333e3c13e56SSuresh Warrier } 334e3c13e56SSuresh Warrier } 335e3c13e56SSuresh Warrier return NULL; 336e3c13e56SSuresh Warrier } 337e3c13e56SSuresh Warrier 338e3c13e56SSuresh Warrier /* 339e3c13e56SSuresh Warrier * If we have an interrupt that's not an IPI, check if we have a 340e3c13e56SSuresh Warrier * passthrough adapter and if so, check if this external interrupt 341e3c13e56SSuresh Warrier * is for the adapter. 342e3c13e56SSuresh Warrier * We will attempt to deliver the IRQ directly to the target VCPU's 343e3c13e56SSuresh Warrier * ICP, the virtual ICP (based on affinity - the xive value in ICS). 344e3c13e56SSuresh Warrier * 345e3c13e56SSuresh Warrier * If the delivery fails or if this is not for a passthrough adapter, 346e3c13e56SSuresh Warrier * return to the host to handle this interrupt. We earlier 347e3c13e56SSuresh Warrier * saved a copy of the XIRR in the PACA, it will be picked up by 348e3c13e56SSuresh Warrier * the host ICP driver. 349e3c13e56SSuresh Warrier */ 350f725758bSPaul Mackerras static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 351e3c13e56SSuresh Warrier { 352e3c13e56SSuresh Warrier struct kvmppc_passthru_irqmap *pimap; 353e3c13e56SSuresh Warrier struct kvmppc_irq_map *irq_map; 354e3c13e56SSuresh Warrier struct kvm_vcpu *vcpu; 355e3c13e56SSuresh Warrier 356e3c13e56SSuresh Warrier vcpu = local_paca->kvm_hstate.kvm_vcpu; 357e3c13e56SSuresh Warrier if (!vcpu) 358e3c13e56SSuresh Warrier return 1; 359e3c13e56SSuresh Warrier pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 360e3c13e56SSuresh Warrier if (!pimap) 361e3c13e56SSuresh Warrier return 1; 362e3c13e56SSuresh Warrier irq_map = get_irqmap(pimap, xisr); 363e3c13e56SSuresh Warrier if (!irq_map) 364e3c13e56SSuresh Warrier return 1; 365e3c13e56SSuresh Warrier 366e3c13e56SSuresh Warrier /* We're handling this interrupt, generic code doesn't need to */ 367e3c13e56SSuresh Warrier local_paca->kvm_hstate.saved_xirr = 0; 368e3c13e56SSuresh Warrier 369f725758bSPaul Mackerras return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 370e3c13e56SSuresh Warrier } 371e3c13e56SSuresh Warrier 372e3c13e56SSuresh Warrier #else 373e2702871SPaul Mackerras static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 374e3c13e56SSuresh Warrier { 375e3c13e56SSuresh Warrier return 1; 376e3c13e56SSuresh Warrier } 377e3c13e56SSuresh Warrier #endif 378e3c13e56SSuresh Warrier 37937f55d30SSuresh Warrier /* 38037f55d30SSuresh Warrier * Determine what sort of external interrupt is pending (if any). 38137f55d30SSuresh Warrier * Returns: 38237f55d30SSuresh Warrier * 0 if no interrupt is pending 38337f55d30SSuresh Warrier * 1 if an interrupt is pending that needs to be handled by the host 384f7af5209SSuresh Warrier * 2 Passthrough that needs completion in the host 38537f55d30SSuresh Warrier * -1 if there was a guest wakeup IPI (which has now been cleared) 386e3c13e56SSuresh Warrier * -2 if there is PCI passthrough external interrupt that was handled 38737f55d30SSuresh Warrier */ 388f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again); 38937f55d30SSuresh Warrier 39037f55d30SSuresh Warrier long kvmppc_read_intr(void) 39137f55d30SSuresh Warrier { 392f725758bSPaul Mackerras long ret = 0; 393f725758bSPaul Mackerras long rc; 394f725758bSPaul Mackerras bool again; 395f725758bSPaul Mackerras 396f725758bSPaul Mackerras do { 397f725758bSPaul Mackerras again = false; 398f725758bSPaul Mackerras rc = kvmppc_read_one_intr(&again); 399f725758bSPaul Mackerras if (rc && (ret == 0 || rc > ret)) 400f725758bSPaul Mackerras ret = rc; 401f725758bSPaul Mackerras } while (again); 402f725758bSPaul Mackerras return ret; 403f725758bSPaul Mackerras } 404f725758bSPaul Mackerras 405f725758bSPaul Mackerras static long kvmppc_read_one_intr(bool *again) 406f725758bSPaul Mackerras { 40737f55d30SSuresh Warrier unsigned long xics_phys; 40837f55d30SSuresh Warrier u32 h_xirr; 40937f55d30SSuresh Warrier __be32 xirr; 41037f55d30SSuresh Warrier u32 xisr; 41137f55d30SSuresh Warrier u8 host_ipi; 412f725758bSPaul Mackerras int64_t rc; 41337f55d30SSuresh Warrier 41437f55d30SSuresh Warrier /* see if a host IPI is pending */ 41537f55d30SSuresh Warrier host_ipi = local_paca->kvm_hstate.host_ipi; 41637f55d30SSuresh Warrier if (host_ipi) 41737f55d30SSuresh Warrier return 1; 41837f55d30SSuresh Warrier 41937f55d30SSuresh Warrier /* Now read the interrupt from the ICP */ 42037f55d30SSuresh Warrier xics_phys = local_paca->kvm_hstate.xics_phys; 42153af3ba2SPaul Mackerras rc = 0; 42253af3ba2SPaul Mackerras if (!in_realmode()) 42353af3ba2SPaul Mackerras rc = opal_int_get_xirr(&xirr, false); 42453af3ba2SPaul Mackerras else if (!xics_phys) 425f725758bSPaul Mackerras rc = opal_rm_int_get_xirr(&xirr, false); 42653af3ba2SPaul Mackerras else 42753af3ba2SPaul Mackerras xirr = _lwzcix(xics_phys + XICS_XIRR); 428f725758bSPaul Mackerras if (rc < 0) 42937f55d30SSuresh Warrier return 1; 43037f55d30SSuresh Warrier 43137f55d30SSuresh Warrier /* 43237f55d30SSuresh Warrier * Save XIRR for later. Since we get control in reverse endian 43337f55d30SSuresh Warrier * on LE systems, save it byte reversed and fetch it back in 43437f55d30SSuresh Warrier * host endian. Note that xirr is the value read from the 43537f55d30SSuresh Warrier * XIRR register, while h_xirr is the host endian version. 43637f55d30SSuresh Warrier */ 43737f55d30SSuresh Warrier h_xirr = be32_to_cpu(xirr); 43837f55d30SSuresh Warrier local_paca->kvm_hstate.saved_xirr = h_xirr; 43937f55d30SSuresh Warrier xisr = h_xirr & 0xffffff; 44037f55d30SSuresh Warrier /* 44137f55d30SSuresh Warrier * Ensure that the store/load complete to guarantee all side 44237f55d30SSuresh Warrier * effects of loading from XIRR has completed 44337f55d30SSuresh Warrier */ 44437f55d30SSuresh Warrier smp_mb(); 44537f55d30SSuresh Warrier 44637f55d30SSuresh Warrier /* if nothing pending in the ICP */ 44737f55d30SSuresh Warrier if (!xisr) 44837f55d30SSuresh Warrier return 0; 44937f55d30SSuresh Warrier 45037f55d30SSuresh Warrier /* We found something in the ICP... 45137f55d30SSuresh Warrier * 45237f55d30SSuresh Warrier * If it is an IPI, clear the MFRR and EOI it. 45337f55d30SSuresh Warrier */ 45437f55d30SSuresh Warrier if (xisr == XICS_IPI) { 45553af3ba2SPaul Mackerras rc = 0; 45653af3ba2SPaul Mackerras if (!in_realmode()) { 45753af3ba2SPaul Mackerras opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 45853af3ba2SPaul Mackerras rc = opal_int_eoi(h_xirr); 45953af3ba2SPaul Mackerras } else if (xics_phys) { 46037f55d30SSuresh Warrier _stbcix(xics_phys + XICS_MFRR, 0xff); 46137f55d30SSuresh Warrier _stwcix(xics_phys + XICS_XIRR, xirr); 462f725758bSPaul Mackerras } else { 463f725758bSPaul Mackerras opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff); 464f725758bSPaul Mackerras rc = opal_rm_int_eoi(h_xirr); 46553af3ba2SPaul Mackerras } 466f725758bSPaul Mackerras /* If rc > 0, there is another interrupt pending */ 467f725758bSPaul Mackerras *again = rc > 0; 468f725758bSPaul Mackerras 46937f55d30SSuresh Warrier /* 47037f55d30SSuresh Warrier * Need to ensure side effects of above stores 47137f55d30SSuresh Warrier * complete before proceeding. 47237f55d30SSuresh Warrier */ 47337f55d30SSuresh Warrier smp_mb(); 47437f55d30SSuresh Warrier 47537f55d30SSuresh Warrier /* 47637f55d30SSuresh Warrier * We need to re-check host IPI now in case it got set in the 47737f55d30SSuresh Warrier * meantime. If it's clear, we bounce the interrupt to the 47837f55d30SSuresh Warrier * guest 47937f55d30SSuresh Warrier */ 48037f55d30SSuresh Warrier host_ipi = local_paca->kvm_hstate.host_ipi; 48137f55d30SSuresh Warrier if (unlikely(host_ipi != 0)) { 48237f55d30SSuresh Warrier /* We raced with the host, 48337f55d30SSuresh Warrier * we need to resend that IPI, bummer 48437f55d30SSuresh Warrier */ 48553af3ba2SPaul Mackerras if (!in_realmode()) 48653af3ba2SPaul Mackerras opal_int_set_mfrr(hard_smp_processor_id(), 48753af3ba2SPaul Mackerras IPI_PRIORITY); 48853af3ba2SPaul Mackerras else if (xics_phys) 48937f55d30SSuresh Warrier _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 490f725758bSPaul Mackerras else 491f725758bSPaul Mackerras opal_rm_int_set_mfrr(hard_smp_processor_id(), 492f725758bSPaul Mackerras IPI_PRIORITY); 49337f55d30SSuresh Warrier /* Let side effects complete */ 49437f55d30SSuresh Warrier smp_mb(); 49537f55d30SSuresh Warrier return 1; 49637f55d30SSuresh Warrier } 49737f55d30SSuresh Warrier 49837f55d30SSuresh Warrier /* OK, it's an IPI for us */ 49937f55d30SSuresh Warrier local_paca->kvm_hstate.saved_xirr = 0; 50037f55d30SSuresh Warrier return -1; 50137f55d30SSuresh Warrier } 50237f55d30SSuresh Warrier 503f725758bSPaul Mackerras return kvmppc_check_passthru(xisr, xirr, again); 50437f55d30SSuresh Warrier } 505