1f204e0b8SIan Munsie /* 2f204e0b8SIan Munsie * Copyright 2014 IBM Corp. 3f204e0b8SIan Munsie * 4f204e0b8SIan Munsie * This program is free software; you can redistribute it and/or 5f204e0b8SIan Munsie * modify it under the terms of the GNU General Public License 6f204e0b8SIan Munsie * as published by the Free Software Foundation; either version 7f204e0b8SIan Munsie * 2 of the License, or (at your option) any later version. 8f204e0b8SIan Munsie */ 9f204e0b8SIan Munsie 10f204e0b8SIan Munsie #include <linux/interrupt.h> 11f204e0b8SIan Munsie #include <linux/workqueue.h> 12f204e0b8SIan Munsie #include <linux/sched.h> 13f204e0b8SIan Munsie #include <linux/wait.h> 14f204e0b8SIan Munsie #include <linux/slab.h> 15f204e0b8SIan Munsie #include <linux/pid.h> 16f204e0b8SIan Munsie #include <asm/cputable.h> 17ec249dd8SMichael Neuling #include <misc/cxl-base.h> 18f204e0b8SIan Munsie 19f204e0b8SIan Munsie #include "cxl.h" 209bcf28cdSIan Munsie #include "trace.h" 21f204e0b8SIan Munsie 2273d55c3bSFrederic Barrat static int afu_irq_range_start(void) 2373d55c3bSFrederic Barrat { 2473d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 2573d55c3bSFrederic Barrat return 1; 2673d55c3bSFrederic Barrat return 0; 2773d55c3bSFrederic Barrat } 2873d55c3bSFrederic Barrat 29f204e0b8SIan Munsie static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) 30f204e0b8SIan Munsie { 31f204e0b8SIan Munsie ctx->dsisr = dsisr; 32f204e0b8SIan Munsie ctx->dar = dar; 33f204e0b8SIan Munsie schedule_work(&ctx->fault_work); 34f204e0b8SIan Munsie return IRQ_HANDLED; 35f204e0b8SIan Munsie } 36f204e0b8SIan Munsie 376d625ed9SFrederic Barrat irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) 38f204e0b8SIan Munsie { 39f204e0b8SIan Munsie u64 dsisr, dar; 40f204e0b8SIan Munsie 41bc78b05bSIan Munsie dsisr = irq_info->dsisr; 42bc78b05bSIan Munsie dar = irq_info->dar; 43f204e0b8SIan Munsie 449bcf28cdSIan Munsie trace_cxl_psl_irq(ctx, irq, dsisr, dar); 459bcf28cdSIan Munsie 46f204e0b8SIan Munsie pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 47f204e0b8SIan Munsie 48f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DS) { 49f204e0b8SIan Munsie /* 50f204e0b8SIan Munsie * We don't inherently need to sleep to handle this, but we do 51f204e0b8SIan Munsie * need to get a ref to the task's mm, which we can't do from 52f204e0b8SIan Munsie * irq context without the potential for a deadlock since it 53f204e0b8SIan Munsie * takes the task_lock. An alternate option would be to keep a 54f204e0b8SIan Munsie * reference to the task's mm the entire time it has cxl open, 55f204e0b8SIan Munsie * but to do that we need to solve the issue where we hold a 56f204e0b8SIan Munsie * ref to the mm, but the mm can hold a ref to the fd after an 57f204e0b8SIan Munsie * mmap preventing anything from being cleaned up. 58f204e0b8SIan Munsie */ 59f204e0b8SIan Munsie pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); 60f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 61f204e0b8SIan Munsie } 62f204e0b8SIan Munsie 63f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_M) 64f204e0b8SIan Munsie pr_devel("CXL interrupt: PTE not found\n"); 65f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_P) 66f204e0b8SIan Munsie pr_devel("CXL interrupt: Storage protection violation\n"); 67f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_A) 68f204e0b8SIan Munsie pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); 69f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_S) 70f204e0b8SIan Munsie pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); 71f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_K) 72f204e0b8SIan Munsie pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); 73f204e0b8SIan Munsie 74f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_DM) { 75f204e0b8SIan Munsie /* 76f204e0b8SIan Munsie * In some cases we might be able to handle the fault 77f204e0b8SIan Munsie * immediately if hash_page would succeed, but we still need 78f204e0b8SIan Munsie * the task's mm, which as above we can't get without a lock 79f204e0b8SIan Munsie */ 80f204e0b8SIan Munsie pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); 81f204e0b8SIan Munsie return schedule_cxl_fault(ctx, dsisr, dar); 82f204e0b8SIan Munsie } 83f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_ST) 84f204e0b8SIan Munsie WARN(1, "CXL interrupt: Segment Table PTE not found\n"); 85f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_UR) 86f204e0b8SIan Munsie pr_devel("CXL interrupt: AURP PTE not found\n"); 87f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_PE) 885be587b1SFrederic Barrat return cxl_ops->handle_psl_slice_error(ctx, dsisr, 895be587b1SFrederic Barrat irq_info->errstat); 90f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_AE) { 91de369538SRasmus Villemoes pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); 92f204e0b8SIan Munsie 93f204e0b8SIan Munsie if (ctx->pending_afu_err) { 94f204e0b8SIan Munsie /* 95f204e0b8SIan Munsie * This shouldn't happen - the PSL treats these errors 96f204e0b8SIan Munsie * as fatal and will have reset the AFU, so there's not 97f204e0b8SIan Munsie * much point buffering multiple AFU errors. 98f204e0b8SIan Munsie * OTOH if we DO ever see a storm of these come in it's 99f204e0b8SIan Munsie * probably best that we log them somewhere: 100f204e0b8SIan Munsie */ 101f204e0b8SIan Munsie dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 102de369538SRasmus Villemoes "undelivered to pe %i: 0x%016llx\n", 103bc78b05bSIan Munsie ctx->pe, irq_info->afu_err); 104f204e0b8SIan Munsie } else { 105f204e0b8SIan Munsie spin_lock(&ctx->lock); 106bc78b05bSIan Munsie ctx->afu_err = irq_info->afu_err; 107f204e0b8SIan Munsie ctx->pending_afu_err = 1; 108f204e0b8SIan Munsie spin_unlock(&ctx->lock); 109f204e0b8SIan Munsie 110f204e0b8SIan Munsie wake_up_all(&ctx->wq); 111f204e0b8SIan Munsie } 112f204e0b8SIan Munsie 1135be587b1SFrederic Barrat cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 114a6130ed2SIan Munsie return IRQ_HANDLED; 115f204e0b8SIan Munsie } 116f204e0b8SIan Munsie if (dsisr & CXL_PSL_DSISR_An_OC) 117f204e0b8SIan Munsie pr_devel("CXL interrupt: OS Context Warning\n"); 118f204e0b8SIan Munsie 119f204e0b8SIan Munsie WARN(1, "Unhandled CXL PSL IRQ\n"); 120f204e0b8SIan Munsie return IRQ_HANDLED; 121f204e0b8SIan Munsie } 122f204e0b8SIan Munsie 123f204e0b8SIan Munsie static irqreturn_t cxl_irq_afu(int irq, void *data) 124f204e0b8SIan Munsie { 125f204e0b8SIan Munsie struct cxl_context *ctx = data; 126f204e0b8SIan Munsie irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); 12773d55c3bSFrederic Barrat int irq_off, afu_irq = 0; 128f204e0b8SIan Munsie __u16 range; 129f204e0b8SIan Munsie int r; 130f204e0b8SIan Munsie 13173d55c3bSFrederic Barrat /* 13273d55c3bSFrederic Barrat * Look for the interrupt number. 13373d55c3bSFrederic Barrat * On bare-metal, we know range 0 only contains the PSL 13473d55c3bSFrederic Barrat * interrupt so we could start counting at range 1 and initialize 13573d55c3bSFrederic Barrat * afu_irq at 1. 13673d55c3bSFrederic Barrat * In a guest, range 0 also contains AFU interrupts, so it must 13773d55c3bSFrederic Barrat * be counted for. Therefore we initialize afu_irq at 0 to take into 13873d55c3bSFrederic Barrat * account the PSL interrupt. 13973d55c3bSFrederic Barrat * 14073d55c3bSFrederic Barrat * For code-readability, it just seems easier to go over all 14173d55c3bSFrederic Barrat * the ranges on bare-metal and guest. The end result is the same. 14273d55c3bSFrederic Barrat */ 14373d55c3bSFrederic Barrat for (r = 0; r < CXL_IRQ_RANGES; r++) { 144f204e0b8SIan Munsie irq_off = hwirq - ctx->irqs.offset[r]; 145f204e0b8SIan Munsie range = ctx->irqs.range[r]; 146f204e0b8SIan Munsie if (irq_off >= 0 && irq_off < range) { 147f204e0b8SIan Munsie afu_irq += irq_off; 148f204e0b8SIan Munsie break; 149f204e0b8SIan Munsie } 150f204e0b8SIan Munsie afu_irq += range; 151f204e0b8SIan Munsie } 152f204e0b8SIan Munsie if (unlikely(r >= CXL_IRQ_RANGES)) { 15373d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", 154f204e0b8SIan Munsie ctx->pe, irq, hwirq); 155f204e0b8SIan Munsie return IRQ_HANDLED; 156f204e0b8SIan Munsie } 157f204e0b8SIan Munsie 1589bcf28cdSIan Munsie trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); 159f204e0b8SIan Munsie pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", 160f204e0b8SIan Munsie afu_irq, ctx->pe, irq, hwirq); 161f204e0b8SIan Munsie 162f204e0b8SIan Munsie if (unlikely(!ctx->irq_bitmap)) { 16373d55c3bSFrederic Barrat WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); 164f204e0b8SIan Munsie return IRQ_HANDLED; 165f204e0b8SIan Munsie } 166f204e0b8SIan Munsie spin_lock(&ctx->lock); 167f204e0b8SIan Munsie set_bit(afu_irq - 1, ctx->irq_bitmap); 168f204e0b8SIan Munsie ctx->pending_irq = true; 169f204e0b8SIan Munsie spin_unlock(&ctx->lock); 170f204e0b8SIan Munsie 171f204e0b8SIan Munsie wake_up_all(&ctx->wq); 172f204e0b8SIan Munsie 173f204e0b8SIan Munsie return IRQ_HANDLED; 174f204e0b8SIan Munsie } 175f204e0b8SIan Munsie 176f204e0b8SIan Munsie unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 17780fa93fcSMichael Neuling irq_handler_t handler, void *cookie, const char *name) 178f204e0b8SIan Munsie { 179f204e0b8SIan Munsie unsigned int virq; 180f204e0b8SIan Munsie int result; 181f204e0b8SIan Munsie 182f204e0b8SIan Munsie /* IRQ Domain? */ 183f204e0b8SIan Munsie virq = irq_create_mapping(NULL, hwirq); 184f204e0b8SIan Munsie if (!virq) { 185f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); 186f204e0b8SIan Munsie return 0; 187f204e0b8SIan Munsie } 188f204e0b8SIan Munsie 1895be587b1SFrederic Barrat if (cxl_ops->setup_irq) 1905be587b1SFrederic Barrat cxl_ops->setup_irq(adapter, hwirq, virq); 191f204e0b8SIan Munsie 192f204e0b8SIan Munsie pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 193f204e0b8SIan Munsie 19480fa93fcSMichael Neuling result = request_irq(virq, handler, 0, name, cookie); 195f204e0b8SIan Munsie if (result) { 196f204e0b8SIan Munsie dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 197f204e0b8SIan Munsie return 0; 198f204e0b8SIan Munsie } 199f204e0b8SIan Munsie 200f204e0b8SIan Munsie return virq; 201f204e0b8SIan Munsie } 202f204e0b8SIan Munsie 203f204e0b8SIan Munsie void cxl_unmap_irq(unsigned int virq, void *cookie) 204f204e0b8SIan Munsie { 205f204e0b8SIan Munsie free_irq(virq, cookie); 206f204e0b8SIan Munsie } 207f204e0b8SIan Munsie 20886331862SChristophe Lombard int cxl_register_one_irq(struct cxl *adapter, 209f204e0b8SIan Munsie irq_handler_t handler, 210f204e0b8SIan Munsie void *cookie, 211f204e0b8SIan Munsie irq_hw_number_t *dest_hwirq, 21280fa93fcSMichael Neuling unsigned int *dest_virq, 21380fa93fcSMichael Neuling const char *name) 214f204e0b8SIan Munsie { 215f204e0b8SIan Munsie int hwirq, virq; 216f204e0b8SIan Munsie 2175be587b1SFrederic Barrat if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) 218f204e0b8SIan Munsie return hwirq; 219f204e0b8SIan Munsie 22080fa93fcSMichael Neuling if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) 221f204e0b8SIan Munsie goto err; 222f204e0b8SIan Munsie 223f204e0b8SIan Munsie *dest_hwirq = hwirq; 224f204e0b8SIan Munsie *dest_virq = virq; 225f204e0b8SIan Munsie 226f204e0b8SIan Munsie return 0; 227f204e0b8SIan Munsie 228f204e0b8SIan Munsie err: 2295be587b1SFrederic Barrat cxl_ops->release_one_irq(adapter, hwirq); 230f204e0b8SIan Munsie return -ENOMEM; 231f204e0b8SIan Munsie } 232f204e0b8SIan Munsie 2338dde152eSAndrew Donnellan void afu_irq_name_free(struct cxl_context *ctx) 23480fa93fcSMichael Neuling { 23580fa93fcSMichael Neuling struct cxl_irq_name *irq_name, *tmp; 23680fa93fcSMichael Neuling 23780fa93fcSMichael Neuling list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { 23880fa93fcSMichael Neuling kfree(irq_name->name); 23980fa93fcSMichael Neuling list_del(&irq_name->list); 24080fa93fcSMichael Neuling kfree(irq_name); 24180fa93fcSMichael Neuling } 242f204e0b8SIan Munsie } 243f204e0b8SIan Munsie 244c358d84bSMichael Neuling int afu_allocate_irqs(struct cxl_context *ctx, u32 count) 245f204e0b8SIan Munsie { 24680fa93fcSMichael Neuling int rc, r, i, j = 1; 24780fa93fcSMichael Neuling struct cxl_irq_name *irq_name; 24873d55c3bSFrederic Barrat int alloc_count; 24973d55c3bSFrederic Barrat 25073d55c3bSFrederic Barrat /* 25173d55c3bSFrederic Barrat * In native mode, range 0 is reserved for the multiplexed 25273d55c3bSFrederic Barrat * PSL interrupt. It has been allocated when the AFU was initialized. 25373d55c3bSFrederic Barrat * 25473d55c3bSFrederic Barrat * In a guest, the PSL interrupt is not mutliplexed, but per-context, 25573d55c3bSFrederic Barrat * and is the first interrupt from range 0. It still needs to be 25673d55c3bSFrederic Barrat * allocated, so bump the count by one. 25773d55c3bSFrederic Barrat */ 25873d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) 25973d55c3bSFrederic Barrat alloc_count = count; 26073d55c3bSFrederic Barrat else 26173d55c3bSFrederic Barrat alloc_count = count + 1; 262f204e0b8SIan Munsie 2635be587b1SFrederic Barrat if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, 26473d55c3bSFrederic Barrat alloc_count))) 265f204e0b8SIan Munsie return rc; 266f204e0b8SIan Munsie 26773d55c3bSFrederic Barrat if (cpu_has_feature(CPU_FTR_HVMODE)) { 268f204e0b8SIan Munsie /* Multiplexed PSL Interrupt */ 269cbffa3a5SChristophe Lombard ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 270f204e0b8SIan Munsie ctx->irqs.range[0] = 1; 27173d55c3bSFrederic Barrat } 272f204e0b8SIan Munsie 273f204e0b8SIan Munsie ctx->irq_count = count; 274f204e0b8SIan Munsie ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), 275f204e0b8SIan Munsie sizeof(*ctx->irq_bitmap), GFP_KERNEL); 276f204e0b8SIan Munsie if (!ctx->irq_bitmap) 277a6897f39SVaibhav Jain goto out; 27880fa93fcSMichael Neuling 27980fa93fcSMichael Neuling /* 28080fa93fcSMichael Neuling * Allocate names first. If any fail, bail out before allocating 28180fa93fcSMichael Neuling * actual hardware IRQs. 28280fa93fcSMichael Neuling */ 28373d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 284d3383aaaSColin Ian King for (i = 0; i < ctx->irqs.range[r]; i++) { 28580fa93fcSMichael Neuling irq_name = kmalloc(sizeof(struct cxl_irq_name), 28680fa93fcSMichael Neuling GFP_KERNEL); 28780fa93fcSMichael Neuling if (!irq_name) 28880fa93fcSMichael Neuling goto out; 28980fa93fcSMichael Neuling irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", 29080fa93fcSMichael Neuling dev_name(&ctx->afu->dev), 29180fa93fcSMichael Neuling ctx->pe, j); 29280fa93fcSMichael Neuling if (!irq_name->name) { 29380fa93fcSMichael Neuling kfree(irq_name); 29480fa93fcSMichael Neuling goto out; 29580fa93fcSMichael Neuling } 29680fa93fcSMichael Neuling /* Add to tail so next look get the correct order */ 29780fa93fcSMichael Neuling list_add_tail(&irq_name->list, &ctx->irq_names); 29880fa93fcSMichael Neuling j++; 29980fa93fcSMichael Neuling } 30080fa93fcSMichael Neuling } 301c358d84bSMichael Neuling return 0; 302c358d84bSMichael Neuling 303c358d84bSMichael Neuling out: 3045be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 305c358d84bSMichael Neuling afu_irq_name_free(ctx); 306c358d84bSMichael Neuling return -ENOMEM; 307c358d84bSMichael Neuling } 308c358d84bSMichael Neuling 3093d6b040eSDaniel Axtens static void afu_register_hwirqs(struct cxl_context *ctx) 310c358d84bSMichael Neuling { 311c358d84bSMichael Neuling irq_hw_number_t hwirq; 312c358d84bSMichael Neuling struct cxl_irq_name *irq_name; 313c358d84bSMichael Neuling int r, i; 31473d55c3bSFrederic Barrat irqreturn_t (*handler)(int irq, void *data); 31580fa93fcSMichael Neuling 31680fa93fcSMichael Neuling /* We've allocated all memory now, so let's do the irq allocations */ 31780fa93fcSMichael Neuling irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); 31873d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 319f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 320f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 32173d55c3bSFrederic Barrat if (r == 0 && i == 0) 32273d55c3bSFrederic Barrat /* 32373d55c3bSFrederic Barrat * The very first interrupt of range 0 is 32473d55c3bSFrederic Barrat * always the PSL interrupt, but we only 32573d55c3bSFrederic Barrat * need to connect a handler for guests, 32673d55c3bSFrederic Barrat * because there's one PSL interrupt per 32773d55c3bSFrederic Barrat * context. 32873d55c3bSFrederic Barrat * On bare-metal, the PSL interrupt is 32973d55c3bSFrederic Barrat * multiplexed and was setup when the AFU 33073d55c3bSFrederic Barrat * was configured. 33173d55c3bSFrederic Barrat */ 33273d55c3bSFrederic Barrat handler = cxl_ops->psl_interrupt; 33373d55c3bSFrederic Barrat else 33473d55c3bSFrederic Barrat handler = cxl_irq_afu; 33573d55c3bSFrederic Barrat cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, 33673d55c3bSFrederic Barrat irq_name->name); 33780fa93fcSMichael Neuling irq_name = list_next_entry(irq_name, list); 338f204e0b8SIan Munsie } 339f204e0b8SIan Munsie } 340c358d84bSMichael Neuling } 341f204e0b8SIan Munsie 342c358d84bSMichael Neuling int afu_register_irqs(struct cxl_context *ctx, u32 count) 343c358d84bSMichael Neuling { 344c358d84bSMichael Neuling int rc; 345c358d84bSMichael Neuling 346c358d84bSMichael Neuling rc = afu_allocate_irqs(ctx, count); 347c358d84bSMichael Neuling if (rc) 348c358d84bSMichael Neuling return rc; 349c358d84bSMichael Neuling 350c358d84bSMichael Neuling afu_register_hwirqs(ctx); 351f204e0b8SIan Munsie return 0; 352f204e0b8SIan Munsie } 353f204e0b8SIan Munsie 3546428832aSMichael Neuling void afu_release_irqs(struct cxl_context *ctx, void *cookie) 355f204e0b8SIan Munsie { 356f204e0b8SIan Munsie irq_hw_number_t hwirq; 357f204e0b8SIan Munsie unsigned int virq; 358f204e0b8SIan Munsie int r, i; 359f204e0b8SIan Munsie 36073d55c3bSFrederic Barrat for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { 361f204e0b8SIan Munsie hwirq = ctx->irqs.offset[r]; 362f204e0b8SIan Munsie for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 363f204e0b8SIan Munsie virq = irq_find_mapping(NULL, hwirq); 364f204e0b8SIan Munsie if (virq) 3656428832aSMichael Neuling cxl_unmap_irq(virq, cookie); 366f204e0b8SIan Munsie } 367f204e0b8SIan Munsie } 368f204e0b8SIan Munsie 36980fa93fcSMichael Neuling afu_irq_name_free(ctx); 3705be587b1SFrederic Barrat cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 3718c7dd08aSVaibhav Jain 3728c7dd08aSVaibhav Jain ctx->irq_count = 0; 373f204e0b8SIan Munsie } 374*6e0c50f9SPhilippe Bergheaud 375*6e0c50f9SPhilippe Bergheaud void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) 376*6e0c50f9SPhilippe Bergheaud { 377*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 378*6e0c50f9SPhilippe Bergheaud "PSL Slice error received. Check AFU for root cause.\n"); 379*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 380*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuto) 381*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Timeout\n"); 382*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudis) 383*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 384*6e0c50f9SPhilippe Bergheaud "MMIO targeted Accelerator that was not enabled\n"); 385*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afuov) 386*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU CTAG Overflow\n"); 387*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badsrc) 388*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Interrupt Source\n"); 389*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_badctx) 390*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "Bad Context Handle\n"); 391*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdis) 392*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); 393*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_llcmdto) 394*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); 395*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afupar) 396*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); 397*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_afudup) 398*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); 399*6e0c50f9SPhilippe Bergheaud if (serr & CXL_PSL_SERR_An_AE) 400*6e0c50f9SPhilippe Bergheaud dev_crit(&afu->dev, 401*6e0c50f9SPhilippe Bergheaud "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); 402*6e0c50f9SPhilippe Bergheaud } 403