1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/sched/mm.h> 4 #include "trace.h" 5 #include "ocxl_internal.h" 6 7 int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, 8 struct address_space *mapping) 9 { 10 int pasid; 11 struct ocxl_context *ctx; 12 13 *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); 14 if (!*context) 15 return -ENOMEM; 16 17 ctx = *context; 18 19 ctx->afu = afu; 20 mutex_lock(&afu->contexts_lock); 21 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, 22 afu->pasid_base + afu->pasid_max, GFP_KERNEL); 23 if (pasid < 0) { 24 mutex_unlock(&afu->contexts_lock); 25 return pasid; 26 } 27 afu->pasid_count++; 28 mutex_unlock(&afu->contexts_lock); 29 30 ctx->pasid = pasid; 31 ctx->status = OPENED; 32 mutex_init(&ctx->status_mutex); 33 ctx->mapping = mapping; 34 mutex_init(&ctx->mapping_lock); 35 init_waitqueue_head(&ctx->events_wq); 36 mutex_init(&ctx->xsl_error_lock); 37 mutex_init(&ctx->irq_lock); 38 idr_init(&ctx->irq_idr); 39 ctx->tidr = 0; 40 41 /* 42 * Keep a reference on the AFU to make sure it's valid for the 43 * duration of the life of the context 44 */ 45 ocxl_afu_get(afu); 46 return 0; 47 } 48 EXPORT_SYMBOL_GPL(ocxl_context_alloc); 49 50 /* 51 * Callback for when a translation fault triggers an error 52 * data: a pointer to the context which triggered the fault 53 * addr: the address that triggered the error 54 * dsisr: the value of the PPC64 dsisr register 55 */ 56 static void xsl_fault_error(void *data, u64 addr, u64 dsisr) 57 { 58 struct ocxl_context *ctx = (struct ocxl_context *) data; 59 60 mutex_lock(&ctx->xsl_error_lock); 61 ctx->xsl_error.addr = addr; 62 ctx->xsl_error.dsisr = dsisr; 63 ctx->xsl_error.count++; 64 mutex_unlock(&ctx->xsl_error_lock); 65 66 wake_up_all(&ctx->events_wq); 67 } 68 69 int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm) 70 { 71 int rc; 72 73 // Locks both status & tidr 74 mutex_lock(&ctx->status_mutex); 75 if (ctx->status != OPENED) { 76 rc = -EIO; 77 goto out; 78 } 79 80 rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, 81 mm->context.id, ctx->tidr, amr, mm, 82 xsl_fault_error, ctx); 83 if (rc) 84 goto out; 85 86 ctx->status = ATTACHED; 87 out: 88 mutex_unlock(&ctx->status_mutex); 89 return rc; 90 } 91 EXPORT_SYMBOL_GPL(ocxl_context_attach); 92 93 static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, 94 u64 offset, struct ocxl_context *ctx) 95 { 96 u64 trigger_addr; 97 98 trigger_addr = ocxl_afu_irq_get_addr(ctx, offset); 99 if (!trigger_addr) 100 return VM_FAULT_SIGBUS; 101 102 return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); 103 } 104 105 static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, 106 u64 offset, struct ocxl_context *ctx) 107 { 108 u64 pp_mmio_addr; 109 int pasid_off; 110 vm_fault_t ret; 111 112 if (offset >= ctx->afu->config.pp_mmio_stride) 113 return VM_FAULT_SIGBUS; 114 115 mutex_lock(&ctx->status_mutex); 116 if (ctx->status != ATTACHED) { 117 mutex_unlock(&ctx->status_mutex); 118 pr_debug("%s: Context not attached, failing mmio mmap\n", 119 __func__); 120 return VM_FAULT_SIGBUS; 121 } 122 123 pasid_off = ctx->pasid - ctx->afu->pasid_base; 124 pp_mmio_addr = ctx->afu->pp_mmio_start + 125 pasid_off * ctx->afu->config.pp_mmio_stride + 126 offset; 127 128 ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); 129 mutex_unlock(&ctx->status_mutex); 130 return ret; 131 } 132 133 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) 134 { 135 struct vm_area_struct *vma = vmf->vma; 136 struct ocxl_context *ctx = vma->vm_file->private_data; 137 u64 offset; 138 vm_fault_t ret; 139 140 offset = vmf->pgoff << PAGE_SHIFT; 141 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, 142 ctx->pasid, vmf->address, offset); 143 144 if (offset < ctx->afu->irq_base_offset) 145 ret = map_pp_mmio(vma, vmf->address, offset, ctx); 146 else 147 ret = map_afu_irq(vma, vmf->address, offset, ctx); 148 return ret; 149 } 150 151 static const struct vm_operations_struct ocxl_vmops = { 152 .fault = ocxl_mmap_fault, 153 }; 154 155 static int check_mmap_afu_irq(struct ocxl_context *ctx, 156 struct vm_area_struct *vma) 157 { 158 /* only one page */ 159 if (vma_pages(vma) != 1) 160 return -EINVAL; 161 162 /* check offset validty */ 163 if (!ocxl_afu_irq_get_addr(ctx, vma->vm_pgoff << PAGE_SHIFT)) 164 return -EINVAL; 165 166 /* 167 * trigger page should only be accessible in write mode. 168 * 169 * It's a bit theoretical, as a page mmaped with only 170 * PROT_WRITE is currently readable, but it doesn't hurt. 171 */ 172 if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || 173 !(vma->vm_flags & VM_WRITE)) 174 return -EINVAL; 175 vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); 176 return 0; 177 } 178 179 static int check_mmap_mmio(struct ocxl_context *ctx, 180 struct vm_area_struct *vma) 181 { 182 if ((vma_pages(vma) + vma->vm_pgoff) > 183 (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) 184 return -EINVAL; 185 return 0; 186 } 187 188 int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) 189 { 190 int rc; 191 192 if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) 193 rc = check_mmap_mmio(ctx, vma); 194 else 195 rc = check_mmap_afu_irq(ctx, vma); 196 if (rc) 197 return rc; 198 199 vma->vm_flags |= VM_IO | VM_PFNMAP; 200 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 201 vma->vm_ops = &ocxl_vmops; 202 return 0; 203 } 204 205 int ocxl_context_detach(struct ocxl_context *ctx) 206 { 207 struct pci_dev *dev; 208 int afu_control_pos; 209 enum ocxl_context_status status; 210 int rc; 211 212 mutex_lock(&ctx->status_mutex); 213 status = ctx->status; 214 ctx->status = CLOSED; 215 mutex_unlock(&ctx->status_mutex); 216 if (status != ATTACHED) 217 return 0; 218 219 dev = to_pci_dev(ctx->afu->fn->dev.parent); 220 afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; 221 222 mutex_lock(&ctx->afu->afu_control_lock); 223 rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); 224 mutex_unlock(&ctx->afu->afu_control_lock); 225 trace_ocxl_terminate_pasid(ctx->pasid, rc); 226 if (rc) { 227 /* 228 * If we timeout waiting for the AFU to terminate the 229 * pasid, then it's dangerous to clean up the Process 230 * Element entry in the SPA, as it may be referenced 231 * in the future by the AFU. In which case, we would 232 * checkstop because of an invalid PE access (FIR 233 * register 2, bit 42). So leave the PE 234 * defined. Caller shouldn't free the context so that 235 * PASID remains allocated. 236 * 237 * A link reset will be required to cleanup the AFU 238 * and the SPA. 239 */ 240 if (rc == -EBUSY) 241 return rc; 242 } 243 rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); 244 if (rc) { 245 dev_warn(&dev->dev, 246 "Couldn't remove PE entry cleanly: %d\n", rc); 247 } 248 return 0; 249 } 250 EXPORT_SYMBOL_GPL(ocxl_context_detach); 251 252 void ocxl_context_detach_all(struct ocxl_afu *afu) 253 { 254 struct ocxl_context *ctx; 255 int tmp; 256 257 mutex_lock(&afu->contexts_lock); 258 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { 259 ocxl_context_detach(ctx); 260 /* 261 * We are force detaching - remove any active mmio 262 * mappings so userspace cannot interfere with the 263 * card if it comes back. Easiest way to exercise 264 * this is to unbind and rebind the driver via sysfs 265 * while it is in use. 266 */ 267 mutex_lock(&ctx->mapping_lock); 268 if (ctx->mapping) 269 unmap_mapping_range(ctx->mapping, 0, 0, 1); 270 mutex_unlock(&ctx->mapping_lock); 271 } 272 mutex_unlock(&afu->contexts_lock); 273 } 274 275 void ocxl_context_free(struct ocxl_context *ctx) 276 { 277 mutex_lock(&ctx->afu->contexts_lock); 278 ctx->afu->pasid_count--; 279 idr_remove(&ctx->afu->contexts_idr, ctx->pasid); 280 mutex_unlock(&ctx->afu->contexts_lock); 281 282 ocxl_afu_irq_free_all(ctx); 283 idr_destroy(&ctx->irq_idr); 284 /* reference to the AFU taken in ocxl_context_init */ 285 ocxl_afu_put(ctx->afu); 286 kfree(ctx); 287 } 288 EXPORT_SYMBOL_GPL(ocxl_context_free); 289