1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/sched/mm.h> 4 #include "trace.h" 5 #include "ocxl_internal.h" 6 7 int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, 8 struct address_space *mapping) 9 { 10 int pasid; 11 struct ocxl_context *ctx; 12 13 *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); 14 if (!*context) 15 return -ENOMEM; 16 17 ctx = *context; 18 19 ctx->afu = afu; 20 mutex_lock(&afu->contexts_lock); 21 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, 22 afu->pasid_base + afu->pasid_max, GFP_KERNEL); 23 if (pasid < 0) { 24 mutex_unlock(&afu->contexts_lock); 25 return pasid; 26 } 27 afu->pasid_count++; 28 mutex_unlock(&afu->contexts_lock); 29 30 ctx->pasid = pasid; 31 ctx->status = OPENED; 32 mutex_init(&ctx->status_mutex); 33 ctx->mapping = mapping; 34 mutex_init(&ctx->mapping_lock); 35 init_waitqueue_head(&ctx->events_wq); 36 mutex_init(&ctx->xsl_error_lock); 37 mutex_init(&ctx->irq_lock); 38 idr_init(&ctx->irq_idr); 39 ctx->tidr = 0; 40 41 /* 42 * Keep a reference on the AFU to make sure it's valid for the 43 * duration of the life of the context 44 */ 45 ocxl_afu_get(afu); 46 return 0; 47 } 48 EXPORT_SYMBOL_GPL(ocxl_context_alloc); 49 50 /* 51 * Callback for when a translation fault triggers an error 52 * data: a pointer to the context which triggered the fault 53 * addr: the address that triggered the error 54 * dsisr: the value of the PPC64 dsisr register 55 */ 56 static void xsl_fault_error(void *data, u64 addr, u64 dsisr) 57 { 58 struct ocxl_context *ctx = (struct ocxl_context *) data; 59 60 mutex_lock(&ctx->xsl_error_lock); 61 ctx->xsl_error.addr = addr; 62 ctx->xsl_error.dsisr = dsisr; 63 ctx->xsl_error.count++; 64 mutex_unlock(&ctx->xsl_error_lock); 65 66 wake_up_all(&ctx->events_wq); 67 } 68 69 int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm) 70 { 71 int rc; 72 73 // Locks both status & tidr 74 mutex_lock(&ctx->status_mutex); 75 if (ctx->status != OPENED) { 76 rc = -EIO; 77 goto out; 78 } 79 80 rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, 81 mm->context.id, ctx->tidr, amr, mm, 82 xsl_fault_error, ctx); 83 if (rc) 84 goto out; 85 86 ctx->status = ATTACHED; 87 out: 88 mutex_unlock(&ctx->status_mutex); 89 return rc; 90 } 91 EXPORT_SYMBOL_GPL(ocxl_context_attach); 92 93 static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, 94 u64 offset, struct ocxl_context *ctx) 95 { 96 u64 trigger_addr; 97 int irq_id = ocxl_irq_offset_to_id(ctx, offset); 98 99 trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id); 100 if (!trigger_addr) 101 return VM_FAULT_SIGBUS; 102 103 return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); 104 } 105 106 static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, 107 u64 offset, struct ocxl_context *ctx) 108 { 109 u64 pp_mmio_addr; 110 int pasid_off; 111 vm_fault_t ret; 112 113 if (offset >= ctx->afu->config.pp_mmio_stride) 114 return VM_FAULT_SIGBUS; 115 116 mutex_lock(&ctx->status_mutex); 117 if (ctx->status != ATTACHED) { 118 mutex_unlock(&ctx->status_mutex); 119 pr_debug("%s: Context not attached, failing mmio mmap\n", 120 __func__); 121 return VM_FAULT_SIGBUS; 122 } 123 124 pasid_off = ctx->pasid - ctx->afu->pasid_base; 125 pp_mmio_addr = ctx->afu->pp_mmio_start + 126 pasid_off * ctx->afu->config.pp_mmio_stride + 127 offset; 128 129 ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); 130 mutex_unlock(&ctx->status_mutex); 131 return ret; 132 } 133 134 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) 135 { 136 struct vm_area_struct *vma = vmf->vma; 137 struct ocxl_context *ctx = vma->vm_file->private_data; 138 u64 offset; 139 vm_fault_t ret; 140 141 offset = vmf->pgoff << PAGE_SHIFT; 142 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, 143 ctx->pasid, vmf->address, offset); 144 145 if (offset < ctx->afu->irq_base_offset) 146 ret = map_pp_mmio(vma, vmf->address, offset, ctx); 147 else 148 ret = map_afu_irq(vma, vmf->address, offset, ctx); 149 return ret; 150 } 151 152 static const struct vm_operations_struct ocxl_vmops = { 153 .fault = ocxl_mmap_fault, 154 }; 155 156 static int check_mmap_afu_irq(struct ocxl_context *ctx, 157 struct vm_area_struct *vma) 158 { 159 int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT); 160 161 /* only one page */ 162 if (vma_pages(vma) != 1) 163 return -EINVAL; 164 165 /* check offset validty */ 166 if (!ocxl_afu_irq_get_addr(ctx, irq_id)) 167 return -EINVAL; 168 169 /* 170 * trigger page should only be accessible in write mode. 171 * 172 * It's a bit theoretical, as a page mmaped with only 173 * PROT_WRITE is currently readable, but it doesn't hurt. 174 */ 175 if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || 176 !(vma->vm_flags & VM_WRITE)) 177 return -EINVAL; 178 vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); 179 return 0; 180 } 181 182 static int check_mmap_mmio(struct ocxl_context *ctx, 183 struct vm_area_struct *vma) 184 { 185 if ((vma_pages(vma) + vma->vm_pgoff) > 186 (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) 187 return -EINVAL; 188 return 0; 189 } 190 191 int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) 192 { 193 int rc; 194 195 if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) 196 rc = check_mmap_mmio(ctx, vma); 197 else 198 rc = check_mmap_afu_irq(ctx, vma); 199 if (rc) 200 return rc; 201 202 vma->vm_flags |= VM_IO | VM_PFNMAP; 203 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 204 vma->vm_ops = &ocxl_vmops; 205 return 0; 206 } 207 208 int ocxl_context_detach(struct ocxl_context *ctx) 209 { 210 struct pci_dev *dev; 211 int afu_control_pos; 212 enum ocxl_context_status status; 213 int rc; 214 215 mutex_lock(&ctx->status_mutex); 216 status = ctx->status; 217 ctx->status = CLOSED; 218 mutex_unlock(&ctx->status_mutex); 219 if (status != ATTACHED) 220 return 0; 221 222 dev = to_pci_dev(ctx->afu->fn->dev.parent); 223 afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; 224 225 mutex_lock(&ctx->afu->afu_control_lock); 226 rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); 227 mutex_unlock(&ctx->afu->afu_control_lock); 228 trace_ocxl_terminate_pasid(ctx->pasid, rc); 229 if (rc) { 230 /* 231 * If we timeout waiting for the AFU to terminate the 232 * pasid, then it's dangerous to clean up the Process 233 * Element entry in the SPA, as it may be referenced 234 * in the future by the AFU. In which case, we would 235 * checkstop because of an invalid PE access (FIR 236 * register 2, bit 42). So leave the PE 237 * defined. Caller shouldn't free the context so that 238 * PASID remains allocated. 239 * 240 * A link reset will be required to cleanup the AFU 241 * and the SPA. 242 */ 243 if (rc == -EBUSY) 244 return rc; 245 } 246 rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); 247 if (rc) { 248 dev_warn(&dev->dev, 249 "Couldn't remove PE entry cleanly: %d\n", rc); 250 } 251 return 0; 252 } 253 EXPORT_SYMBOL_GPL(ocxl_context_detach); 254 255 void ocxl_context_detach_all(struct ocxl_afu *afu) 256 { 257 struct ocxl_context *ctx; 258 int tmp; 259 260 mutex_lock(&afu->contexts_lock); 261 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { 262 ocxl_context_detach(ctx); 263 /* 264 * We are force detaching - remove any active mmio 265 * mappings so userspace cannot interfere with the 266 * card if it comes back. Easiest way to exercise 267 * this is to unbind and rebind the driver via sysfs 268 * while it is in use. 269 */ 270 mutex_lock(&ctx->mapping_lock); 271 if (ctx->mapping) 272 unmap_mapping_range(ctx->mapping, 0, 0, 1); 273 mutex_unlock(&ctx->mapping_lock); 274 } 275 mutex_unlock(&afu->contexts_lock); 276 } 277 278 void ocxl_context_free(struct ocxl_context *ctx) 279 { 280 mutex_lock(&ctx->afu->contexts_lock); 281 ctx->afu->pasid_count--; 282 idr_remove(&ctx->afu->contexts_idr, ctx->pasid); 283 mutex_unlock(&ctx->afu->contexts_lock); 284 285 ocxl_afu_irq_free_all(ctx); 286 idr_destroy(&ctx->irq_idr); 287 /* reference to the AFU taken in ocxl_context_init */ 288 ocxl_afu_put(ctx->afu); 289 kfree(ctx); 290 } 291 EXPORT_SYMBOL_GPL(ocxl_context_free); 292