12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2f204e0b8SIan Munsie /* 3f204e0b8SIan Munsie * Copyright 2014 IBM Corp. 4f204e0b8SIan Munsie */ 5f204e0b8SIan Munsie 6f204e0b8SIan Munsie #include <linux/interrupt.h> 7d8d2af70SChristophe Leroy #include <linux/irqdomain.h> 8f204e0b8SIan Munsie #include <linux/workqueue.h> 9f204e0b8SIan Munsie #include <linux/sched.h> 10f204e0b8SIan Munsie #include <linux/wait.h> 11f204e0b8SIan Munsie #include <linux/slab.h> 12f204e0b8SIan Munsie #include <linux/pid.h> 13f204e0b8SIan Munsie #include <asm/cputable.h> 14ec249dd8SMichael Neuling #include <misc/cxl-base.h> 15f204e0b8SIan Munsie 16f204e0b8SIan Munsie #include "cxl.h" 179bcf28cdSIan Munsie #include "trace.h" 18f204e0b8SIan Munsie 1973d55c3bSFrederic Barrat static int afu_irq_range_start(void) 2073d55c3bSFrederic Barrat { 2173d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 2273d55c3bSFrederic Barrat return 1; 2373d55c3bSFrederic Barrat return 0; 2473d55c3bSFrederic Barrat } 2573d55c3bSFrederic Barrat 26f204e0b8SIan Munsie static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) 27f204e0b8SIan Munsie { 28f204e0b8SIan Munsie ctx->dsisr = dsisr; 29f204e0b8SIan Munsie ctx->dar = dar; 30f204e0b8SIan Munsie schedule_work(&ctx->fault_work); 31f204e0b8SIan Munsie return IRQ_HANDLED; 32f204e0b8SIan Munsie } 33f204e0b8SIan Munsie 34f24be42aSChristophe Lombard irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) 35f24be42aSChristophe Lombard { 36f24be42aSChristophe Lombard u64 dsisr, dar; 37f24be42aSChristophe Lombard 38f24be42aSChristophe Lombard dsisr = irq_info->dsisr; 39f24be42aSChristophe Lombard dar = irq_info->dar; 40f24be42aSChristophe Lombard 41f24be42aSChristophe Lombard trace_cxl_psl9_irq(ctx, irq, dsisr, dar); 42f24be42aSChristophe Lombard 43f24be42aSChristophe Lombard pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 44f24be42aSChristophe Lombard 45f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_TF) { 46f24be42aSChristophe Lombard pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe); 47f24be42aSChristophe Lombard return schedule_cxl_fault(ctx, dsisr, dar); 48f24be42aSChristophe Lombard } 49f24be42aSChristophe Lombard 50f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_PE) 51f24be42aSChristophe Lombard return cxl_ops->handle_psl_slice_error(ctx, dsisr, 52f24be42aSChristophe Lombard irq_info->errstat); 53f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_AE) { 54f24be42aSChristophe Lombard pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); 55f24be42aSChristophe Lombard 56f24be42aSChristophe Lombard if (ctx->pending_afu_err) { 57f24be42aSChristophe Lombard /* 58f24be42aSChristophe Lombard * This shouldn't happen - the PSL treats these errors 59f24be42aSChristophe Lombard * as fatal and will have reset the AFU, so there's not 60f24be42aSChristophe Lombard * much point buffering multiple AFU errors. 61f24be42aSChristophe Lombard * OTOH if we DO ever see a storm of these come in it's 62f24be42aSChristophe Lombard * probably best that we log them somewhere: 63f24be42aSChristophe Lombard */ 64f24be42aSChristophe Lombard dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", 65f24be42aSChristophe Lombard ctx->pe, irq_info->afu_err); 66f24be42aSChristophe Lombard } else { 67f24be42aSChristophe Lombard spin_lock(&ctx->lock); 68f24be42aSChristophe Lombard ctx->afu_err = irq_info->afu_err; 69f24be42aSChristophe Lombard ctx->pending_afu_err = 1; 70f24be42aSChristophe Lombard spin_unlock(&ctx->lock); 71f24be42aSChristophe Lombard 72f24be42aSChristophe Lombard wake_up_all(&ctx->wq); 73f24be42aSChristophe Lombard } 74f24be42aSChristophe Lombard 75f24be42aSChristophe Lombard cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 76f24be42aSChristophe Lombard return IRQ_HANDLED; 77f24be42aSChristophe Lombard } 78f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_OC) 79f24be42aSChristophe Lombard pr_devel("CXL interrupt: OS Context Warning\n"); 80f24be42aSChristophe Lombard 81f24be42aSChristophe Lombard WARN(1, "Unhandled CXL PSL IRQ\n"); 82f24be42aSChristophe Lombard return IRQ_HANDLED; 83f24be42aSChristophe Lombard } 84f24be42aSChristophe Lombard 8564663f37SChristophe Lombard irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) 86f204e0b8SIan Munsie { 87f204e0b8SIan Munsie u64 dsisr, dar; 88f204e0b8SIan Munsie 89bc78b05bSIan Munsie dsisr = irq_info->dsisr; 90bc78b05bSIan Munsie dar = irq_info->dar; 91f204e0b8SIan Munsie 929bcf28cdSIan Munsie trace_cxl_psl_irq(ctx, irq, dsisr, dar); 939bcf28cdSIan Munsie 94f204e0b8SIan Munsie pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 95f204e0b8SIan Munsie 96f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DS) { 97f204e0b8SIan Munsie /* 98f204e0b8SIan Munsie * We don't inherently need to sleep to handle this, but we do 99f204e0b8SIan Munsie * need to get a ref to the task's mm, which we can't do from 100f204e0b8SIan Munsie * irq context without the potential for a deadlock since it 101f204e0b8SIan Munsie * takes the task_lock. An alternate option would be to keep a 102f204e0b8SIan Munsie * reference to the task's mm the entire time it has cxl open, 103f204e0b8SIan Munsie * but to do that we need to solve the issue where we hold a 104f204e0b8SIan Munsie * ref to the mm, but the mm can hold a ref to the fd after an 105f204e0b8SIan Munsie * mmap preventing anything from being cleaned up. 106f204e0b8SIan Munsie */ 107f204e0b8SIan Munsie pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); 108f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 109f204e0b8SIan Munsie } 110f204e0b8SIan Munsie 111f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_M) 112f204e0b8SIan Munsie pr_devel("CXL interrupt: PTE not found\n"); 113f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_P) 114f204e0b8SIan Munsie pr_devel("CXL interrupt: Storage protection violation\n"); 115f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_A) 116f204e0b8SIan Munsie pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); 117f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_S) 118f204e0b8SIan Munsie pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); 119f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_K) 120f204e0b8SIan Munsie pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); 121f204e0b8SIan Munsie 122f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DM) { 123f204e0b8SIan Munsie /* 124f204e0b8SIan Munsie * In some cases we might be able to handle the fault 125f204e0b8SIan Munsie * immediately if hash_page would succeed, but we still need 126f204e0b8SIan Munsie * the task's mm, which as above we can't get without a lock 127f204e0b8SIan Munsie */ 128f204e0b8SIan Munsie pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); 129f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 130f204e0b8SIan Munsie } 131f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_ST) 132f204e0b8SIan Munsie WARN(1, "CXL interrupt: Segment Table PTE not found\n"); 133f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_UR) 134f204e0b8SIan Munsie pr_devel("CXL interrupt: AURP PTE not found\n"); 135f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_PE) 1365be587b1SFrederic Barrat return cxl_ops->handle_psl_slice_error(ctx, dsisr, 1375be587b1SFrederic Barrat irq_info->errstat); 138f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_AE) { 139de369538SRasmus Villemoes pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); 140f204e0b8SIan Munsie 141f204e0b8SIan Munsie if (ctx->pending_afu_err) { 142f204e0b8SIan Munsie /* 143f204e0b8SIan Munsie * This shouldn't happen - the PSL treats these errors 144f204e0b8SIan Munsie * as fatal and will have reset the AFU, so there's not 145f204e0b8SIan Munsie * much point buffering multiple AFU errors. 146f204e0b8SIan Munsie * OTOH if we DO ever see a storm of these come in it's 147f204e0b8SIan Munsie * probably best that we log them somewhere: 148f204e0b8SIan Munsie */ 149f204e0b8SIan Munsie dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 150de369538SRasmus Villemoes "undelivered to pe %i: 0x%016llx\n", 151bc78b05bSIan Munsie ctx->pe, irq_info->afu_err); 152f204e0b8SIan Munsie } else { 153f204e0b8SIan Munsie spin_lock(&ctx->lock); 154bc78b05bSIan Munsie ctx->afu_err = irq_info->afu_err; 1553382a622SAndrew Donnellan ctx->pending_afu_err = true; 156f204e0b8SIan Munsie spin_unlock(&ctx->lock); 157f204e0b8SIan Munsie 158f204e0b8SIan Munsie wake_up_all(&ctx->wq); 159f204e0b8SIan Munsie } 160f204e0b8SIan Munsie 1615be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 162a6130ed2SIan Munsie return IRQ_HANDLED; 163f204e0b8SIan Munsie } 164f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_OC) 165f204e0b8SIan Munsie pr_devel("CXL interrupt: OS Context Warning\n"); 166f204e0b8SIan Munsie 167f204e0b8SIan Munsie WARN(1, "Unhandled CXL PSL IRQ\n"); 168f204e0b8SIan Munsie return IRQ_HANDLED; 169f204e0b8SIan Munsie } 170f204e0b8SIan Munsie 171f204e0b8SIan Munsie static irqreturn_t cxl_irq_afu(int irq, void *data) 172f204e0b8SIan Munsie { 173f204e0b8SIan Munsie struct cxl_context *ctx = data; 174f204e0b8SIan Munsie irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); 17573d55c3bSFrederic Barrat int irq_off, afu_irq = 0; 176f204e0b8SIan Munsie __u16 range; 177f204e0b8SIan Munsie int r; 178f204e0b8SIan Munsie 17973d55c3bSFrederic Barrat /* 18073d55c3bSFrederic Barrat * Look for the interrupt number. 18173d55c3bSFrederic Barrat * On bare-metal, we know range 0 only contains the PSL 18273d55c3bSFrederic Barrat * interrupt so we could start counting at range 1 and initialize 18373d55c3bSFrederic Barrat * afu_irq at 1. 18473d55c3bSFrederic Barrat * In a guest, range 0 also contains AFU interrupts, so it must 18573d55c3bSFrederic Barrat * be counted for. Therefore we initialize afu_irq at 0 to take into 18673d55c3bSFrederic Barrat * account the PSL interrupt. 18773d55c3bSFrederic Barrat * 18873d55c3bSFrederic Barrat * For code-readability, it just seems easier to go over all 18973d55c3bSFrederic Barrat * the ranges on bare-metal and guest. The end result is the same. 19073d55c3bSFrederic Barrat */ 19173d55c3bSFrederic Barrat for (r = 0; r < CXL_IRQ_RANGES; r++) { 192f204e0b8SIan Munsie irq_off = hwirq - ctx->irqs.offset[r]; 193f204e0b8SIan Munsie range = ctx->irqs.range[r]; 194f204e0b8SIan Munsie if (irq_off >= 0 && irq_off < range) { 195f204e0b8SIan Munsie afu_irq += irq_off; 196f204e0b8SIan Munsie break; 197f204e0b8SIan Munsie } 198f204e0b8SIan Munsie afu_irq += range; 199f204e0b8SIan Munsie } 200f204e0b8SIan Munsie if (unlikely(r >= CXL_IRQ_RANGES)) { 20173d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", 202f204e0b8SIan Munsie ctx->pe, irq, hwirq); 203f204e0b8SIan Munsie return IRQ_HANDLED; 204f204e0b8SIan Munsie } 205f204e0b8SIan Munsie 2069bcf28cdSIan Munsie trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); 207f204e0b8SIan Munsie pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", 208f204e0b8SIan Munsie afu_irq, ctx->pe, irq, hwirq); 209f204e0b8SIan Munsie 210f204e0b8SIan Munsie if (unlikely(!ctx->irq_bitmap)) { 21173d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); 212f204e0b8SIan Munsie return IRQ_HANDLED; 213f204e0b8SIan Munsie } 214f204e0b8SIan Munsie spin_lock(&ctx->lock); 215f204e0b8SIan Munsie set_bit(afu_irq - 1, ctx->irq_bitmap); 216f204e0b8SIan Munsie ctx->pending_irq = true; 217f204e0b8SIan Munsie spin_unlock(&ctx->lock); 218f204e0b8SIan Munsie 219f204e0b8SIan Munsie wake_up_all(&ctx->wq); 220f204e0b8SIan Munsie 221f204e0b8SIan Munsie return IRQ_HANDLED; 222f204e0b8SIan Munsie } 223f204e0b8SIan Munsie 224f204e0b8SIan Munsie unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 22580fa93fcSMichael Neuling irq_handler_t handler, void *cookie, const char *name) 226f204e0b8SIan Munsie { 227f204e0b8SIan Munsie unsigned int virq; 228f204e0b8SIan Munsie int result; 229f204e0b8SIan Munsie 230f204e0b8SIan Munsie /* IRQ Domain? */ 231f204e0b8SIan Munsie virq = irq_create_mapping(NULL, hwirq); 232f204e0b8SIan Munsie if (!virq) { 233f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); 234f204e0b8SIan Munsie return 0; 235f204e0b8SIan Munsie } 236f204e0b8SIan Munsie 2375be587b1SFrederic Barrat if (cxl_ops->setup_irq) 2385be587b1SFrederic Barrat cxl_ops->setup_irq(adapter, hwirq, virq); 239f204e0b8SIan Munsie 240f204e0b8SIan Munsie pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 241f204e0b8SIan Munsie 24280fa93fcSMichael Neuling result = request_irq(virq, handler, 0, name, cookie); 243f204e0b8SIan Munsie if (result) { 244f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 245f204e0b8SIan Munsie return 0; 246f204e0b8SIan Munsie } 247f204e0b8SIan Munsie 248f204e0b8SIan Munsie return virq; 249f204e0b8SIan Munsie } 250f204e0b8SIan Munsie 251f204e0b8SIan Munsie void cxl_unmap_irq(unsigned int virq, void *cookie) 252f204e0b8SIan Munsie { 253f204e0b8SIan Munsie free_irq(virq, cookie); 254f204e0b8SIan Munsie } 255f204e0b8SIan Munsie 25686331862SChristophe Lombard int cxl_register_one_irq(struct cxl *adapter, 257f204e0b8SIan Munsie irq_handler_t handler, 258f204e0b8SIan Munsie void *cookie, 259f204e0b8SIan Munsie irq_hw_number_t *dest_hwirq, 26080fa93fcSMichael Neuling unsigned int *dest_virq, 26180fa93fcSMichael Neuling const char *name) 262f204e0b8SIan Munsie { 263f204e0b8SIan Munsie int hwirq, virq; 264f204e0b8SIan Munsie 2655be587b1SFrederic Barrat if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) 266f204e0b8SIan Munsie return hwirq; 267f204e0b8SIan Munsie 26880fa93fcSMichael Neuling if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) 269f204e0b8SIan Munsie goto err; 270f204e0b8SIan Munsie 271f204e0b8SIan Munsie *dest_hwirq = hwirq; 272f204e0b8SIan Munsie *dest_virq = virq; 273f204e0b8SIan Munsie 274f204e0b8SIan Munsie return 0; 275f204e0b8SIan Munsie 276f204e0b8SIan Munsie err: 2775be587b1SFrederic Barrat cxl_ops->release_one_irq(adapter, hwirq); 278f204e0b8SIan Munsie return -ENOMEM; 279f204e0b8SIan Munsie } 280f204e0b8SIan Munsie 2818dde152eSAndrew Donnellan void afu_irq_name_free(struct cxl_context *ctx) 28280fa93fcSMichael Neuling { 28380fa93fcSMichael Neuling struct cxl_irq_name *irq_name, *tmp; 28480fa93fcSMichael Neuling 28580fa93fcSMichael Neuling list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { 28680fa93fcSMichael Neuling kfree(irq_name->name); 28780fa93fcSMichael Neuling list_del(&irq_name->list); 28880fa93fcSMichael Neuling kfree(irq_name); 28980fa93fcSMichael Neuling } 290f204e0b8SIan Munsie } 291f204e0b8SIan Munsie 292c358d84bSMichael Neuling int afu_allocate_irqs(struct cxl_context *ctx, u32 count) 293f204e0b8SIan Munsie { 29480fa93fcSMichael Neuling int rc, r, i, j = 1; 29580fa93fcSMichael Neuling struct cxl_irq_name *irq_name; 29673d55c3bSFrederic Barrat int alloc_count; 29773d55c3bSFrederic Barrat 29873d55c3bSFrederic Barrat /* 29973d55c3bSFrederic Barrat * In native mode, range 0 is reserved for the multiplexed 30073d55c3bSFrederic Barrat * PSL interrupt. It has been allocated when the AFU was initialized. 30173d55c3bSFrederic Barrat * 30273d55c3bSFrederic Barrat * In a guest, the PSL interrupt is not mutliplexed, but per-context, 30373d55c3bSFrederic Barrat * and is the first interrupt from range 0. It still needs to be 30473d55c3bSFrederic Barrat * allocated, so bump the count by one. 30573d55c3bSFrederic Barrat */ 30673d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 30773d55c3bSFrederic Barrat alloc_count = count; 30873d55c3bSFrederic Barrat else 30973d55c3bSFrederic Barrat alloc_count = count + 1; 310f204e0b8SIan Munsie 3115be587b1SFrederic Barrat if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, 31273d55c3bSFrederic Barrat alloc_count))) 313f204e0b8SIan Munsie return rc; 314f204e0b8SIan Munsie 31573d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) { 316f204e0b8SIan Munsie /* Multiplexed PSL Interrupt */ 317cbffa3a5SChristophe Lombard ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 318f204e0b8SIan Munsie ctx->irqs.range[0] = 1; 31973d55c3bSFrederic Barrat } 320f204e0b8SIan Munsie 321f204e0b8SIan Munsie ctx->irq_count = count; 322*4b00b176SChristophe JAILLET ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL); 323f204e0b8SIan Munsie if (!ctx->irq_bitmap) 324a6897f39SVaibhav Jain goto out; 32580fa93fcSMichael Neuling 32680fa93fcSMichael Neuling /* 32780fa93fcSMichael Neuling * Allocate names first. If any fail, bail out before allocating 32880fa93fcSMichael Neuling * actual hardware IRQs. 32980fa93fcSMichael Neuling */ 33073d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 331d3383aaaSColin Ian King for (i = 0; i < ctx->irqs.range[r]; i++) { 33280fa93fcSMichael Neuling irq_name = kmalloc(sizeof(struct cxl_irq_name), 33380fa93fcSMichael Neuling GFP_KERNEL); 33480fa93fcSMichael Neuling if (!irq_name) 33580fa93fcSMichael Neuling goto out; 33680fa93fcSMichael Neuling irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", 33780fa93fcSMichael Neuling dev_name(&ctx->afu->dev), 33880fa93fcSMichael Neuling ctx->pe, j); 33980fa93fcSMichael Neuling if (!irq_name->name) { 34080fa93fcSMichael Neuling kfree(irq_name); 34180fa93fcSMichael Neuling goto out; 34280fa93fcSMichael Neuling } 34380fa93fcSMichael Neuling /* Add to tail so next look get the correct order */ 34480fa93fcSMichael Neuling list_add_tail(&irq_name->list, &ctx->irq_names); 34580fa93fcSMichael Neuling j++; 34680fa93fcSMichael Neuling } 34780fa93fcSMichael Neuling } 348c358d84bSMichael Neuling return 0; 349c358d84bSMichael Neuling 350c358d84bSMichael Neuling out: 3515be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 352c358d84bSMichael Neuling afu_irq_name_free(ctx); 353c358d84bSMichael Neuling return -ENOMEM; 354c358d84bSMichael Neuling } 355c358d84bSMichael Neuling 3563d6b040eSDaniel Axtens static void afu_register_hwirqs(struct cxl_context *ctx) 357c358d84bSMichael Neuling { 358c358d84bSMichael Neuling irq_hw_number_t hwirq; 359c358d84bSMichael Neuling struct cxl_irq_name *irq_name; 360c358d84bSMichael Neuling int r, i; 36173d55c3bSFrederic Barrat irqreturn_t (*handler)(int irq, void *data); 36280fa93fcSMichael Neuling 36380fa93fcSMichael Neuling /* We've allocated all memory now, so let's do the irq allocations */ 36480fa93fcSMichael Neuling irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); 36573d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 366f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 367f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 36873d55c3bSFrederic Barrat if (r == 0 && i == 0) 36973d55c3bSFrederic Barrat /* 37073d55c3bSFrederic Barrat * The very first interrupt of range 0 is 37173d55c3bSFrederic Barrat * always the PSL interrupt, but we only 37273d55c3bSFrederic Barrat * need to connect a handler for guests, 37373d55c3bSFrederic Barrat * because there's one PSL interrupt per 37473d55c3bSFrederic Barrat * context. 37573d55c3bSFrederic Barrat * On bare-metal, the PSL interrupt is 37673d55c3bSFrederic Barrat * multiplexed and was setup when the AFU 37773d55c3bSFrederic Barrat * was configured. 37873d55c3bSFrederic Barrat */ 37973d55c3bSFrederic Barrat handler = cxl_ops->psl_interrupt; 38073d55c3bSFrederic Barrat else 38173d55c3bSFrederic Barrat handler = cxl_irq_afu; 38273d55c3bSFrederic Barrat cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, 38373d55c3bSFrederic Barrat irq_name->name); 38480fa93fcSMichael Neuling irq_name = list_next_entry(irq_name, list); 385f204e0b8SIan Munsie } 386f204e0b8SIan Munsie } 387c358d84bSMichael Neuling } 388f204e0b8SIan Munsie 389c358d84bSMichael Neuling int afu_register_irqs(struct cxl_context *ctx, u32 count) 390c358d84bSMichael Neuling { 391c358d84bSMichael Neuling int rc; 392c358d84bSMichael Neuling 393c358d84bSMichael Neuling rc = afu_allocate_irqs(ctx, count); 394c358d84bSMichael Neuling if (rc) 395c358d84bSMichael Neuling return rc; 396c358d84bSMichael Neuling 397c358d84bSMichael Neuling afu_register_hwirqs(ctx); 398f204e0b8SIan Munsie return 0; 399f204e0b8SIan Munsie } 400f204e0b8SIan Munsie 4016428832aSMichael Neuling void afu_release_irqs(struct cxl_context *ctx, void *cookie) 402f204e0b8SIan Munsie { 403f204e0b8SIan Munsie irq_hw_number_t hwirq; 404f204e0b8SIan Munsie unsigned int virq; 405f204e0b8SIan Munsie int r, i; 406f204e0b8SIan Munsie 40773d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 408f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 409f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 410f204e0b8SIan Munsie virq = irq_find_mapping(NULL, hwirq); 411f204e0b8SIan Munsie if (virq) 4126428832aSMichael Neuling cxl_unmap_irq(virq, cookie); 413f204e0b8SIan Munsie } 414f204e0b8SIan Munsie } 415f204e0b8SIan Munsie 41680fa93fcSMichael Neuling afu_irq_name_free(ctx); 4175be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 4188c7dd08aSVaibhav Jain 4198c7dd08aSVaibhav Jain ctx->irq_count = 0; 420f204e0b8SIan Munsie } 4216e0c50f9SPhilippe Bergheaud 4226e0c50f9SPhilippe Bergheaud void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) 4236e0c50f9SPhilippe Bergheaud { 4246e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4256e0c50f9SPhilippe Bergheaud "PSL Slice error received. Check AFU for root cause.\n"); 4266e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 4276e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuto) 4286e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Timeout\n"); 4296e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudis) 4306e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4316e0c50f9SPhilippe Bergheaud "MMIO targeted Accelerator that was not enabled\n"); 4326e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuov) 4336e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU CTAG Overflow\n"); 4346e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badsrc) 4356e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Interrupt Source\n"); 4366e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badctx) 4376e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Context Handle\n"); 4386e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdis) 4396e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); 4406e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdto) 4416e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); 4426e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afupar) 4436e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); 4446e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudup) 4456e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); 4466e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_AE) 4476e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4486e0c50f9SPhilippe Bergheaud "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); 4496e0c50f9SPhilippe Bergheaud } 450