1f204e0b8SIan Munsie /* 2f204e0b8SIan Munsie * Copyright 2014 IBM Corp. 3f204e0b8SIan Munsie * 4f204e0b8SIan Munsie * This program is free software; you can redistribute it and/or 5f204e0b8SIan Munsie * modify it under the terms of the GNU General Public License 6f204e0b8SIan Munsie * as published by the Free Software Foundation; either version 7f204e0b8SIan Munsie * 2 of the License, or (at your option) any later version. 8f204e0b8SIan Munsie */ 9f204e0b8SIan Munsie 10f204e0b8SIan Munsie #include <linux/interrupt.h> 11f204e0b8SIan Munsie #include <linux/workqueue.h> 12f204e0b8SIan Munsie #include <linux/sched.h> 13f204e0b8SIan Munsie #include <linux/wait.h> 14f204e0b8SIan Munsie #include <linux/slab.h> 15f204e0b8SIan Munsie #include <linux/pid.h> 16f204e0b8SIan Munsie #include <asm/cputable.h> 17ec249dd8SMichael Neuling #include <misc/cxl-base.h> 18f204e0b8SIan Munsie 19f204e0b8SIan Munsie #include "cxl.h" 209bcf28cdSIan Munsie #include "trace.h" 21f204e0b8SIan Munsie 2273d55c3bSFrederic Barrat static int afu_irq_range_start(void) 2373d55c3bSFrederic Barrat { 2473d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 2573d55c3bSFrederic Barrat return 1; 2673d55c3bSFrederic Barrat return 0; 2773d55c3bSFrederic Barrat } 2873d55c3bSFrederic Barrat 29f204e0b8SIan Munsie static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) 30f204e0b8SIan Munsie { 31f204e0b8SIan Munsie ctx->dsisr = dsisr; 32f204e0b8SIan Munsie ctx->dar = dar; 33f204e0b8SIan Munsie schedule_work(&ctx->fault_work); 34f204e0b8SIan Munsie return IRQ_HANDLED; 35f204e0b8SIan Munsie } 36f204e0b8SIan Munsie 37*f24be42aSChristophe Lombard irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) 38*f24be42aSChristophe Lombard { 39*f24be42aSChristophe Lombard u64 dsisr, dar; 40*f24be42aSChristophe Lombard 41*f24be42aSChristophe Lombard dsisr = irq_info->dsisr; 42*f24be42aSChristophe Lombard dar = irq_info->dar; 43*f24be42aSChristophe Lombard 44*f24be42aSChristophe Lombard trace_cxl_psl9_irq(ctx, irq, dsisr, dar); 45*f24be42aSChristophe Lombard 46*f24be42aSChristophe Lombard pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 47*f24be42aSChristophe Lombard 48*f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_TF) { 49*f24be42aSChristophe Lombard pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe); 50*f24be42aSChristophe Lombard return schedule_cxl_fault(ctx, dsisr, dar); 51*f24be42aSChristophe Lombard } 52*f24be42aSChristophe Lombard 53*f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_PE) 54*f24be42aSChristophe Lombard return cxl_ops->handle_psl_slice_error(ctx, dsisr, 55*f24be42aSChristophe Lombard irq_info->errstat); 56*f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_AE) { 57*f24be42aSChristophe Lombard pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); 58*f24be42aSChristophe Lombard 59*f24be42aSChristophe Lombard if (ctx->pending_afu_err) { 60*f24be42aSChristophe Lombard /* 61*f24be42aSChristophe Lombard * This shouldn't happen - the PSL treats these errors 62*f24be42aSChristophe Lombard * as fatal and will have reset the AFU, so there's not 63*f24be42aSChristophe Lombard * much point buffering multiple AFU errors. 64*f24be42aSChristophe Lombard * OTOH if we DO ever see a storm of these come in it's 65*f24be42aSChristophe Lombard * probably best that we log them somewhere: 66*f24be42aSChristophe Lombard */ 67*f24be42aSChristophe Lombard dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", 68*f24be42aSChristophe Lombard ctx->pe, irq_info->afu_err); 69*f24be42aSChristophe Lombard } else { 70*f24be42aSChristophe Lombard spin_lock(&ctx->lock); 71*f24be42aSChristophe Lombard ctx->afu_err = irq_info->afu_err; 72*f24be42aSChristophe Lombard ctx->pending_afu_err = 1; 73*f24be42aSChristophe Lombard spin_unlock(&ctx->lock); 74*f24be42aSChristophe Lombard 75*f24be42aSChristophe Lombard wake_up_all(&ctx->wq); 76*f24be42aSChristophe Lombard } 77*f24be42aSChristophe Lombard 78*f24be42aSChristophe Lombard cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 79*f24be42aSChristophe Lombard return IRQ_HANDLED; 80*f24be42aSChristophe Lombard } 81*f24be42aSChristophe Lombard if (dsisr & CXL_PSL9_DSISR_An_OC) 82*f24be42aSChristophe Lombard pr_devel("CXL interrupt: OS Context Warning\n"); 83*f24be42aSChristophe Lombard 84*f24be42aSChristophe Lombard WARN(1, "Unhandled CXL PSL IRQ\n"); 85*f24be42aSChristophe Lombard return IRQ_HANDLED; 86*f24be42aSChristophe Lombard } 87*f24be42aSChristophe Lombard 8864663f37SChristophe Lombard irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) 89f204e0b8SIan Munsie { 90f204e0b8SIan Munsie u64 dsisr, dar; 91f204e0b8SIan Munsie 92bc78b05bSIan Munsie dsisr = irq_info->dsisr; 93bc78b05bSIan Munsie dar = irq_info->dar; 94f204e0b8SIan Munsie 959bcf28cdSIan Munsie trace_cxl_psl_irq(ctx, irq, dsisr, dar); 969bcf28cdSIan Munsie 97f204e0b8SIan Munsie pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 98f204e0b8SIan Munsie 99f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DS) { 100f204e0b8SIan Munsie /* 101f204e0b8SIan Munsie * We don't inherently need to sleep to handle this, but we do 102f204e0b8SIan Munsie * need to get a ref to the task's mm, which we can't do from 103f204e0b8SIan Munsie * irq context without the potential for a deadlock since it 104f204e0b8SIan Munsie * takes the task_lock. An alternate option would be to keep a 105f204e0b8SIan Munsie * reference to the task's mm the entire time it has cxl open, 106f204e0b8SIan Munsie * but to do that we need to solve the issue where we hold a 107f204e0b8SIan Munsie * ref to the mm, but the mm can hold a ref to the fd after an 108f204e0b8SIan Munsie * mmap preventing anything from being cleaned up. 109f204e0b8SIan Munsie */ 110f204e0b8SIan Munsie pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); 111f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 112f204e0b8SIan Munsie } 113f204e0b8SIan Munsie 114f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_M) 115f204e0b8SIan Munsie pr_devel("CXL interrupt: PTE not found\n"); 116f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_P) 117f204e0b8SIan Munsie pr_devel("CXL interrupt: Storage protection violation\n"); 118f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_A) 119f204e0b8SIan Munsie pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); 120f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_S) 121f204e0b8SIan Munsie pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); 122f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_K) 123f204e0b8SIan Munsie pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); 124f204e0b8SIan Munsie 125f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DM) { 126f204e0b8SIan Munsie /* 127f204e0b8SIan Munsie * In some cases we might be able to handle the fault 128f204e0b8SIan Munsie * immediately if hash_page would succeed, but we still need 129f204e0b8SIan Munsie * the task's mm, which as above we can't get without a lock 130f204e0b8SIan Munsie */ 131f204e0b8SIan Munsie pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); 132f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 133f204e0b8SIan Munsie } 134f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_ST) 135f204e0b8SIan Munsie WARN(1, "CXL interrupt: Segment Table PTE not found\n"); 136f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_UR) 137f204e0b8SIan Munsie pr_devel("CXL interrupt: AURP PTE not found\n"); 138f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_PE) 1395be587b1SFrederic Barrat return cxl_ops->handle_psl_slice_error(ctx, dsisr, 1405be587b1SFrederic Barrat irq_info->errstat); 141f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_AE) { 142de369538SRasmus Villemoes pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); 143f204e0b8SIan Munsie 144f204e0b8SIan Munsie if (ctx->pending_afu_err) { 145f204e0b8SIan Munsie /* 146f204e0b8SIan Munsie * This shouldn't happen - the PSL treats these errors 147f204e0b8SIan Munsie * as fatal and will have reset the AFU, so there's not 148f204e0b8SIan Munsie * much point buffering multiple AFU errors. 149f204e0b8SIan Munsie * OTOH if we DO ever see a storm of these come in it's 150f204e0b8SIan Munsie * probably best that we log them somewhere: 151f204e0b8SIan Munsie */ 152f204e0b8SIan Munsie dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 153de369538SRasmus Villemoes "undelivered to pe %i: 0x%016llx\n", 154bc78b05bSIan Munsie ctx->pe, irq_info->afu_err); 155f204e0b8SIan Munsie } else { 156f204e0b8SIan Munsie spin_lock(&ctx->lock); 157bc78b05bSIan Munsie ctx->afu_err = irq_info->afu_err; 1583382a622SAndrew Donnellan ctx->pending_afu_err = true; 159f204e0b8SIan Munsie spin_unlock(&ctx->lock); 160f204e0b8SIan Munsie 161f204e0b8SIan Munsie wake_up_all(&ctx->wq); 162f204e0b8SIan Munsie } 163f204e0b8SIan Munsie 1645be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 165a6130ed2SIan Munsie return IRQ_HANDLED; 166f204e0b8SIan Munsie } 167f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_OC) 168f204e0b8SIan Munsie pr_devel("CXL interrupt: OS Context Warning\n"); 169f204e0b8SIan Munsie 170f204e0b8SIan Munsie WARN(1, "Unhandled CXL PSL IRQ\n"); 171f204e0b8SIan Munsie return IRQ_HANDLED; 172f204e0b8SIan Munsie } 173f204e0b8SIan Munsie 174f204e0b8SIan Munsie static irqreturn_t cxl_irq_afu(int irq, void *data) 175f204e0b8SIan Munsie { 176f204e0b8SIan Munsie struct cxl_context *ctx = data; 177f204e0b8SIan Munsie irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); 17873d55c3bSFrederic Barrat int irq_off, afu_irq = 0; 179f204e0b8SIan Munsie __u16 range; 180f204e0b8SIan Munsie int r; 181f204e0b8SIan Munsie 18273d55c3bSFrederic Barrat /* 18373d55c3bSFrederic Barrat * Look for the interrupt number. 18473d55c3bSFrederic Barrat * On bare-metal, we know range 0 only contains the PSL 18573d55c3bSFrederic Barrat * interrupt so we could start counting at range 1 and initialize 18673d55c3bSFrederic Barrat * afu_irq at 1. 18773d55c3bSFrederic Barrat * In a guest, range 0 also contains AFU interrupts, so it must 18873d55c3bSFrederic Barrat * be counted for. Therefore we initialize afu_irq at 0 to take into 18973d55c3bSFrederic Barrat * account the PSL interrupt. 19073d55c3bSFrederic Barrat * 19173d55c3bSFrederic Barrat * For code-readability, it just seems easier to go over all 19273d55c3bSFrederic Barrat * the ranges on bare-metal and guest. The end result is the same. 19373d55c3bSFrederic Barrat */ 19473d55c3bSFrederic Barrat for (r = 0; r < CXL_IRQ_RANGES; r++) { 195f204e0b8SIan Munsie irq_off = hwirq - ctx->irqs.offset[r]; 196f204e0b8SIan Munsie range = ctx->irqs.range[r]; 197f204e0b8SIan Munsie if (irq_off >= 0 && irq_off < range) { 198f204e0b8SIan Munsie afu_irq += irq_off; 199f204e0b8SIan Munsie break; 200f204e0b8SIan Munsie } 201f204e0b8SIan Munsie afu_irq += range; 202f204e0b8SIan Munsie } 203f204e0b8SIan Munsie if (unlikely(r >= CXL_IRQ_RANGES)) { 20473d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", 205f204e0b8SIan Munsie ctx->pe, irq, hwirq); 206f204e0b8SIan Munsie return IRQ_HANDLED; 207f204e0b8SIan Munsie } 208f204e0b8SIan Munsie 2099bcf28cdSIan Munsie trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); 210f204e0b8SIan Munsie pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", 211f204e0b8SIan Munsie afu_irq, ctx->pe, irq, hwirq); 212f204e0b8SIan Munsie 213f204e0b8SIan Munsie if (unlikely(!ctx->irq_bitmap)) { 21473d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); 215f204e0b8SIan Munsie return IRQ_HANDLED; 216f204e0b8SIan Munsie } 217f204e0b8SIan Munsie spin_lock(&ctx->lock); 218f204e0b8SIan Munsie set_bit(afu_irq - 1, ctx->irq_bitmap); 219f204e0b8SIan Munsie ctx->pending_irq = true; 220f204e0b8SIan Munsie spin_unlock(&ctx->lock); 221f204e0b8SIan Munsie 222f204e0b8SIan Munsie wake_up_all(&ctx->wq); 223f204e0b8SIan Munsie 224f204e0b8SIan Munsie return IRQ_HANDLED; 225f204e0b8SIan Munsie } 226f204e0b8SIan Munsie 227f204e0b8SIan Munsie unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 22880fa93fcSMichael Neuling irq_handler_t handler, void *cookie, const char *name) 229f204e0b8SIan Munsie { 230f204e0b8SIan Munsie unsigned int virq; 231f204e0b8SIan Munsie int result; 232f204e0b8SIan Munsie 233f204e0b8SIan Munsie /* IRQ Domain? */ 234f204e0b8SIan Munsie virq = irq_create_mapping(NULL, hwirq); 235f204e0b8SIan Munsie if (!virq) { 236f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); 237f204e0b8SIan Munsie return 0; 238f204e0b8SIan Munsie } 239f204e0b8SIan Munsie 2405be587b1SFrederic Barrat if (cxl_ops->setup_irq) 2415be587b1SFrederic Barrat cxl_ops->setup_irq(adapter, hwirq, virq); 242f204e0b8SIan Munsie 243f204e0b8SIan Munsie pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 244f204e0b8SIan Munsie 24580fa93fcSMichael Neuling result = request_irq(virq, handler, 0, name, cookie); 246f204e0b8SIan Munsie if (result) { 247f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 248f204e0b8SIan Munsie return 0; 249f204e0b8SIan Munsie } 250f204e0b8SIan Munsie 251f204e0b8SIan Munsie return virq; 252f204e0b8SIan Munsie } 253f204e0b8SIan Munsie 254f204e0b8SIan Munsie void cxl_unmap_irq(unsigned int virq, void *cookie) 255f204e0b8SIan Munsie { 256f204e0b8SIan Munsie free_irq(virq, cookie); 257f204e0b8SIan Munsie } 258f204e0b8SIan Munsie 25986331862SChristophe Lombard int cxl_register_one_irq(struct cxl *adapter, 260f204e0b8SIan Munsie irq_handler_t handler, 261f204e0b8SIan Munsie void *cookie, 262f204e0b8SIan Munsie irq_hw_number_t *dest_hwirq, 26380fa93fcSMichael Neuling unsigned int *dest_virq, 26480fa93fcSMichael Neuling const char *name) 265f204e0b8SIan Munsie { 266f204e0b8SIan Munsie int hwirq, virq; 267f204e0b8SIan Munsie 2685be587b1SFrederic Barrat if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) 269f204e0b8SIan Munsie return hwirq; 270f204e0b8SIan Munsie 27180fa93fcSMichael Neuling if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) 272f204e0b8SIan Munsie goto err; 273f204e0b8SIan Munsie 274f204e0b8SIan Munsie *dest_hwirq = hwirq; 275f204e0b8SIan Munsie *dest_virq = virq; 276f204e0b8SIan Munsie 277f204e0b8SIan Munsie return 0; 278f204e0b8SIan Munsie 279f204e0b8SIan Munsie err: 2805be587b1SFrederic Barrat cxl_ops->release_one_irq(adapter, hwirq); 281f204e0b8SIan Munsie return -ENOMEM; 282f204e0b8SIan Munsie } 283f204e0b8SIan Munsie 2848dde152eSAndrew Donnellan void afu_irq_name_free(struct cxl_context *ctx) 28580fa93fcSMichael Neuling { 28680fa93fcSMichael Neuling struct cxl_irq_name *irq_name, *tmp; 28780fa93fcSMichael Neuling 28880fa93fcSMichael Neuling list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { 28980fa93fcSMichael Neuling kfree(irq_name->name); 29080fa93fcSMichael Neuling list_del(&irq_name->list); 29180fa93fcSMichael Neuling kfree(irq_name); 29280fa93fcSMichael Neuling } 293f204e0b8SIan Munsie } 294f204e0b8SIan Munsie 295c358d84bSMichael Neuling int afu_allocate_irqs(struct cxl_context *ctx, u32 count) 296f204e0b8SIan Munsie { 29780fa93fcSMichael Neuling int rc, r, i, j = 1; 29880fa93fcSMichael Neuling struct cxl_irq_name *irq_name; 29973d55c3bSFrederic Barrat int alloc_count; 30073d55c3bSFrederic Barrat 30173d55c3bSFrederic Barrat /* 30273d55c3bSFrederic Barrat * In native mode, range 0 is reserved for the multiplexed 30373d55c3bSFrederic Barrat * PSL interrupt. It has been allocated when the AFU was initialized. 30473d55c3bSFrederic Barrat * 30573d55c3bSFrederic Barrat * In a guest, the PSL interrupt is not mutliplexed, but per-context, 30673d55c3bSFrederic Barrat * and is the first interrupt from range 0. It still needs to be 30773d55c3bSFrederic Barrat * allocated, so bump the count by one. 30873d55c3bSFrederic Barrat */ 30973d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 31073d55c3bSFrederic Barrat alloc_count = count; 31173d55c3bSFrederic Barrat else 31273d55c3bSFrederic Barrat alloc_count = count + 1; 313f204e0b8SIan Munsie 3145be587b1SFrederic Barrat if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, 31573d55c3bSFrederic Barrat alloc_count))) 316f204e0b8SIan Munsie return rc; 317f204e0b8SIan Munsie 31873d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) { 319f204e0b8SIan Munsie /* Multiplexed PSL Interrupt */ 320cbffa3a5SChristophe Lombard ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 321f204e0b8SIan Munsie ctx->irqs.range[0] = 1; 32273d55c3bSFrederic Barrat } 323f204e0b8SIan Munsie 324f204e0b8SIan Munsie ctx->irq_count = count; 325f204e0b8SIan Munsie ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), 326f204e0b8SIan Munsie sizeof(*ctx->irq_bitmap), GFP_KERNEL); 327f204e0b8SIan Munsie if (!ctx->irq_bitmap) 328a6897f39SVaibhav Jain goto out; 32980fa93fcSMichael Neuling 33080fa93fcSMichael Neuling /* 33180fa93fcSMichael Neuling * Allocate names first. If any fail, bail out before allocating 33280fa93fcSMichael Neuling * actual hardware IRQs. 33380fa93fcSMichael Neuling */ 33473d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 335d3383aaaSColin Ian King for (i = 0; i < ctx->irqs.range[r]; i++) { 33680fa93fcSMichael Neuling irq_name = kmalloc(sizeof(struct cxl_irq_name), 33780fa93fcSMichael Neuling GFP_KERNEL); 33880fa93fcSMichael Neuling if (!irq_name) 33980fa93fcSMichael Neuling goto out; 34080fa93fcSMichael Neuling irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", 34180fa93fcSMichael Neuling dev_name(&ctx->afu->dev), 34280fa93fcSMichael Neuling ctx->pe, j); 34380fa93fcSMichael Neuling if (!irq_name->name) { 34480fa93fcSMichael Neuling kfree(irq_name); 34580fa93fcSMichael Neuling goto out; 34680fa93fcSMichael Neuling } 34780fa93fcSMichael Neuling /* Add to tail so next look get the correct order */ 34880fa93fcSMichael Neuling list_add_tail(&irq_name->list, &ctx->irq_names); 34980fa93fcSMichael Neuling j++; 35080fa93fcSMichael Neuling } 35180fa93fcSMichael Neuling } 352c358d84bSMichael Neuling return 0; 353c358d84bSMichael Neuling 354c358d84bSMichael Neuling out: 3555be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 356c358d84bSMichael Neuling afu_irq_name_free(ctx); 357c358d84bSMichael Neuling return -ENOMEM; 358c358d84bSMichael Neuling } 359c358d84bSMichael Neuling 3603d6b040eSDaniel Axtens static void afu_register_hwirqs(struct cxl_context *ctx) 361c358d84bSMichael Neuling { 362c358d84bSMichael Neuling irq_hw_number_t hwirq; 363c358d84bSMichael Neuling struct cxl_irq_name *irq_name; 364c358d84bSMichael Neuling int r, i; 36573d55c3bSFrederic Barrat irqreturn_t (*handler)(int irq, void *data); 36680fa93fcSMichael Neuling 36780fa93fcSMichael Neuling /* We've allocated all memory now, so let's do the irq allocations */ 36880fa93fcSMichael Neuling irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); 36973d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 370f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 371f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 37273d55c3bSFrederic Barrat if (r == 0 && i == 0) 37373d55c3bSFrederic Barrat /* 37473d55c3bSFrederic Barrat * The very first interrupt of range 0 is 37573d55c3bSFrederic Barrat * always the PSL interrupt, but we only 37673d55c3bSFrederic Barrat * need to connect a handler for guests, 37773d55c3bSFrederic Barrat * because there's one PSL interrupt per 37873d55c3bSFrederic Barrat * context. 37973d55c3bSFrederic Barrat * On bare-metal, the PSL interrupt is 38073d55c3bSFrederic Barrat * multiplexed and was setup when the AFU 38173d55c3bSFrederic Barrat * was configured. 38273d55c3bSFrederic Barrat */ 38373d55c3bSFrederic Barrat handler = cxl_ops->psl_interrupt; 38473d55c3bSFrederic Barrat else 38573d55c3bSFrederic Barrat handler = cxl_irq_afu; 38673d55c3bSFrederic Barrat cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, 38773d55c3bSFrederic Barrat irq_name->name); 38880fa93fcSMichael Neuling irq_name = list_next_entry(irq_name, list); 389f204e0b8SIan Munsie } 390f204e0b8SIan Munsie } 391c358d84bSMichael Neuling } 392f204e0b8SIan Munsie 393c358d84bSMichael Neuling int afu_register_irqs(struct cxl_context *ctx, u32 count) 394c358d84bSMichael Neuling { 395c358d84bSMichael Neuling int rc; 396c358d84bSMichael Neuling 397c358d84bSMichael Neuling rc = afu_allocate_irqs(ctx, count); 398c358d84bSMichael Neuling if (rc) 399c358d84bSMichael Neuling return rc; 400c358d84bSMichael Neuling 401c358d84bSMichael Neuling afu_register_hwirqs(ctx); 402f204e0b8SIan Munsie return 0; 403f204e0b8SIan Munsie } 404f204e0b8SIan Munsie 4056428832aSMichael Neuling void afu_release_irqs(struct cxl_context *ctx, void *cookie) 406f204e0b8SIan Munsie { 407f204e0b8SIan Munsie irq_hw_number_t hwirq; 408f204e0b8SIan Munsie unsigned int virq; 409f204e0b8SIan Munsie int r, i; 410f204e0b8SIan Munsie 41173d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 412f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 413f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 414f204e0b8SIan Munsie virq = irq_find_mapping(NULL, hwirq); 415f204e0b8SIan Munsie if (virq) 4166428832aSMichael Neuling cxl_unmap_irq(virq, cookie); 417f204e0b8SIan Munsie } 418f204e0b8SIan Munsie } 419f204e0b8SIan Munsie 42080fa93fcSMichael Neuling afu_irq_name_free(ctx); 4215be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 4228c7dd08aSVaibhav Jain 4238c7dd08aSVaibhav Jain ctx->irq_count = 0; 424f204e0b8SIan Munsie } 4256e0c50f9SPhilippe Bergheaud 4266e0c50f9SPhilippe Bergheaud void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) 4276e0c50f9SPhilippe Bergheaud { 4286e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4296e0c50f9SPhilippe Bergheaud "PSL Slice error received. Check AFU for root cause.\n"); 4306e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 4316e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuto) 4326e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Timeout\n"); 4336e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudis) 4346e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4356e0c50f9SPhilippe Bergheaud "MMIO targeted Accelerator that was not enabled\n"); 4366e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuov) 4376e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU CTAG Overflow\n"); 4386e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badsrc) 4396e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Interrupt Source\n"); 4406e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badctx) 4416e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Context Handle\n"); 4426e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdis) 4436e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); 4446e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdto) 4456e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); 4466e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afupar) 4476e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); 4486e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudup) 4496e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); 4506e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_AE) 4516e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 4526e0c50f9SPhilippe Bergheaud "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); 4536e0c50f9SPhilippe Bergheaud } 454