1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/interrupt.h> 4 #include <linux/eventfd.h> 5 #include "ocxl_internal.h" 6 #include "trace.h" 7 8 struct afu_irq { 9 int id; 10 int hw_irq; 11 unsigned int virq; 12 char *name; 13 u64 trigger_page; 14 struct eventfd_ctx *ev_ctx; 15 }; 16 17 static int irq_offset_to_id(struct ocxl_context *ctx, u64 offset) 18 { 19 return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT; 20 } 21 22 static u64 irq_id_to_offset(struct ocxl_context *ctx, int id) 23 { 24 return ctx->afu->irq_base_offset + (id << PAGE_SHIFT); 25 } 26 27 static irqreturn_t afu_irq_handler(int virq, void *data) 28 { 29 struct afu_irq *irq = (struct afu_irq *) data; 30 31 trace_ocxl_afu_irq_receive(virq); 32 if (irq->ev_ctx) 33 eventfd_signal(irq->ev_ctx, 1); 34 return IRQ_HANDLED; 35 } 36 37 static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq) 38 { 39 int rc; 40 41 irq->virq = irq_create_mapping(NULL, irq->hw_irq); 42 if (!irq->virq) { 43 pr_err("irq_create_mapping failed\n"); 44 return -ENOMEM; 45 } 46 pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq); 47 48 irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq); 49 if (!irq->name) { 50 irq_dispose_mapping(irq->virq); 51 return -ENOMEM; 52 } 53 54 rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq); 55 if (rc) { 56 kfree(irq->name); 57 irq->name = NULL; 58 irq_dispose_mapping(irq->virq); 59 pr_err("request_irq failed: %d\n", rc); 60 return rc; 61 } 62 return 0; 63 } 64 65 static void release_afu_irq(struct afu_irq *irq) 66 { 67 free_irq(irq->virq, irq); 68 irq_dispose_mapping(irq->virq); 69 kfree(irq->name); 70 } 71 72 int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset) 73 { 74 struct afu_irq *irq; 75 int rc; 76 77 irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL); 78 if (!irq) 79 return -ENOMEM; 80 81 /* 82 * We limit the number of afu irqs per context and per link to 83 * avoid a single process or user depleting the pool of IPIs 84 */ 85 86 mutex_lock(&ctx->irq_lock); 87 88 irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT, 89 GFP_KERNEL); 90 if (irq->id < 0) { 91 rc = -ENOSPC; 92 goto err_unlock; 93 } 94 95 rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq, 96 &irq->trigger_page); 97 if (rc) 98 goto err_idr; 99 100 rc = setup_afu_irq(ctx, irq); 101 if (rc) 102 goto err_alloc; 103 104 *irq_offset = irq_id_to_offset(ctx, irq->id); 105 106 trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq, 107 *irq_offset); 108 mutex_unlock(&ctx->irq_lock); 109 return 0; 110 111 err_alloc: 112 ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); 113 err_idr: 114 idr_remove(&ctx->irq_idr, irq->id); 115 err_unlock: 116 mutex_unlock(&ctx->irq_lock); 117 kfree(irq); 118 return rc; 119 } 120 121 static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx) 122 { 123 trace_ocxl_afu_irq_free(ctx->pasid, irq->id); 124 if (ctx->mapping) 125 unmap_mapping_range(ctx->mapping, 126 irq_id_to_offset(ctx, irq->id), 127 1 << PAGE_SHIFT, 1); 128 release_afu_irq(irq); 129 if (irq->ev_ctx) 130 eventfd_ctx_put(irq->ev_ctx); 131 ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); 132 kfree(irq); 133 } 134 135 int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset) 136 { 137 struct afu_irq *irq; 138 int id = irq_offset_to_id(ctx, irq_offset); 139 140 mutex_lock(&ctx->irq_lock); 141 142 irq = idr_find(&ctx->irq_idr, id); 143 if (!irq) { 144 mutex_unlock(&ctx->irq_lock); 145 return -EINVAL; 146 } 147 idr_remove(&ctx->irq_idr, irq->id); 148 afu_irq_free(irq, ctx); 149 mutex_unlock(&ctx->irq_lock); 150 return 0; 151 } 152 153 void ocxl_afu_irq_free_all(struct ocxl_context *ctx) 154 { 155 struct afu_irq *irq; 156 int id; 157 158 mutex_lock(&ctx->irq_lock); 159 idr_for_each_entry(&ctx->irq_idr, irq, id) 160 afu_irq_free(irq, ctx); 161 mutex_unlock(&ctx->irq_lock); 162 } 163 164 int ocxl_afu_irq_set_fd(struct ocxl_context *ctx, u64 irq_offset, int eventfd) 165 { 166 struct afu_irq *irq; 167 struct eventfd_ctx *ev_ctx; 168 int rc = 0, id = irq_offset_to_id(ctx, irq_offset); 169 170 mutex_lock(&ctx->irq_lock); 171 irq = idr_find(&ctx->irq_idr, id); 172 if (!irq) { 173 rc = -EINVAL; 174 goto unlock; 175 } 176 177 ev_ctx = eventfd_ctx_fdget(eventfd); 178 if (IS_ERR(ev_ctx)) { 179 rc = -EINVAL; 180 goto unlock; 181 } 182 183 irq->ev_ctx = ev_ctx; 184 unlock: 185 mutex_unlock(&ctx->irq_lock); 186 return rc; 187 } 188 189 u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, u64 irq_offset) 190 { 191 struct afu_irq *irq; 192 int id = irq_offset_to_id(ctx, irq_offset); 193 u64 addr = 0; 194 195 mutex_lock(&ctx->irq_lock); 196 irq = idr_find(&ctx->irq_idr, id); 197 if (irq) 198 addr = irq->trigger_page; 199 mutex_unlock(&ctx->irq_lock); 200 return addr; 201 } 202