1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/sched/mm.h> 4 #include "trace.h" 5 #include "ocxl_internal.h" 6 7 struct ocxl_context *ocxl_context_alloc(void) 8 { 9 return kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); 10 } 11 12 int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu, 13 struct address_space *mapping) 14 { 15 int pasid; 16 17 ctx->afu = afu; 18 mutex_lock(&afu->contexts_lock); 19 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, 20 afu->pasid_base + afu->pasid_max, GFP_KERNEL); 21 if (pasid < 0) { 22 mutex_unlock(&afu->contexts_lock); 23 return pasid; 24 } 25 afu->pasid_count++; 26 mutex_unlock(&afu->contexts_lock); 27 28 ctx->pasid = pasid; 29 ctx->status = OPENED; 30 mutex_init(&ctx->status_mutex); 31 ctx->mapping = mapping; 32 mutex_init(&ctx->mapping_lock); 33 init_waitqueue_head(&ctx->events_wq); 34 mutex_init(&ctx->xsl_error_lock); 35 mutex_init(&ctx->irq_lock); 36 idr_init(&ctx->irq_idr); 37 /* 38 * Keep a reference on the AFU to make sure it's valid for the 39 * duration of the life of the context 40 */ 41 ocxl_afu_get(afu); 42 return 0; 43 } 44 45 /* 46 * Callback for when a translation fault triggers an error 47 * data: a pointer to the context which triggered the fault 48 * addr: the address that triggered the error 49 * dsisr: the value of the PPC64 dsisr register 50 */ 51 static void xsl_fault_error(void *data, u64 addr, u64 dsisr) 52 { 53 struct ocxl_context *ctx = (struct ocxl_context *) data; 54 55 mutex_lock(&ctx->xsl_error_lock); 56 ctx->xsl_error.addr = addr; 57 ctx->xsl_error.dsisr = dsisr; 58 ctx->xsl_error.count++; 59 mutex_unlock(&ctx->xsl_error_lock); 60 61 wake_up_all(&ctx->events_wq); 62 } 63 64 int ocxl_context_attach(struct ocxl_context *ctx, u64 amr) 65 { 66 int rc; 67 68 mutex_lock(&ctx->status_mutex); 69 if (ctx->status != OPENED) { 70 rc = -EIO; 71 goto out; 72 } 73 74 rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, 75 current->mm->context.id, 0, amr, current->mm, 76 xsl_fault_error, ctx); 77 if (rc) 78 goto out; 79 80 ctx->status = ATTACHED; 81 out: 82 mutex_unlock(&ctx->status_mutex); 83 return rc; 84 } 85 86 static int map_afu_irq(struct vm_area_struct *vma, unsigned long address, 87 u64 offset, struct ocxl_context *ctx) 88 { 89 u64 trigger_addr; 90 91 trigger_addr = ocxl_afu_irq_get_addr(ctx, offset); 92 if (!trigger_addr) 93 return VM_FAULT_SIGBUS; 94 95 vm_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); 96 return VM_FAULT_NOPAGE; 97 } 98 99 static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address, 100 u64 offset, struct ocxl_context *ctx) 101 { 102 u64 pp_mmio_addr; 103 int pasid_off; 104 105 if (offset >= ctx->afu->config.pp_mmio_stride) 106 return VM_FAULT_SIGBUS; 107 108 mutex_lock(&ctx->status_mutex); 109 if (ctx->status != ATTACHED) { 110 mutex_unlock(&ctx->status_mutex); 111 pr_debug("%s: Context not attached, failing mmio mmap\n", 112 __func__); 113 return VM_FAULT_SIGBUS; 114 } 115 116 pasid_off = ctx->pasid - ctx->afu->pasid_base; 117 pp_mmio_addr = ctx->afu->pp_mmio_start + 118 pasid_off * ctx->afu->config.pp_mmio_stride + 119 offset; 120 121 vm_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); 122 mutex_unlock(&ctx->status_mutex); 123 return VM_FAULT_NOPAGE; 124 } 125 126 static int ocxl_mmap_fault(struct vm_fault *vmf) 127 { 128 struct vm_area_struct *vma = vmf->vma; 129 struct ocxl_context *ctx = vma->vm_file->private_data; 130 u64 offset; 131 int rc; 132 133 offset = vmf->pgoff << PAGE_SHIFT; 134 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, 135 ctx->pasid, vmf->address, offset); 136 137 if (offset < ctx->afu->irq_base_offset) 138 rc = map_pp_mmio(vma, vmf->address, offset, ctx); 139 else 140 rc = map_afu_irq(vma, vmf->address, offset, ctx); 141 return rc; 142 } 143 144 static const struct vm_operations_struct ocxl_vmops = { 145 .fault = ocxl_mmap_fault, 146 }; 147 148 static int check_mmap_afu_irq(struct ocxl_context *ctx, 149 struct vm_area_struct *vma) 150 { 151 /* only one page */ 152 if (vma_pages(vma) != 1) 153 return -EINVAL; 154 155 /* check offset validty */ 156 if (!ocxl_afu_irq_get_addr(ctx, vma->vm_pgoff << PAGE_SHIFT)) 157 return -EINVAL; 158 159 /* 160 * trigger page should only be accessible in write mode. 161 * 162 * It's a bit theoretical, as a page mmaped with only 163 * PROT_WRITE is currently readable, but it doesn't hurt. 164 */ 165 if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || 166 !(vma->vm_flags & VM_WRITE)) 167 return -EINVAL; 168 vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); 169 return 0; 170 } 171 172 static int check_mmap_mmio(struct ocxl_context *ctx, 173 struct vm_area_struct *vma) 174 { 175 if ((vma_pages(vma) + vma->vm_pgoff) > 176 (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) 177 return -EINVAL; 178 return 0; 179 } 180 181 int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) 182 { 183 int rc; 184 185 if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) 186 rc = check_mmap_mmio(ctx, vma); 187 else 188 rc = check_mmap_afu_irq(ctx, vma); 189 if (rc) 190 return rc; 191 192 vma->vm_flags |= VM_IO | VM_PFNMAP; 193 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 194 vma->vm_ops = &ocxl_vmops; 195 return 0; 196 } 197 198 int ocxl_context_detach(struct ocxl_context *ctx) 199 { 200 struct pci_dev *dev; 201 int afu_control_pos; 202 enum ocxl_context_status status; 203 int rc; 204 205 mutex_lock(&ctx->status_mutex); 206 status = ctx->status; 207 ctx->status = CLOSED; 208 mutex_unlock(&ctx->status_mutex); 209 if (status != ATTACHED) 210 return 0; 211 212 dev = to_pci_dev(ctx->afu->fn->dev.parent); 213 afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; 214 215 mutex_lock(&ctx->afu->afu_control_lock); 216 rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); 217 mutex_unlock(&ctx->afu->afu_control_lock); 218 trace_ocxl_terminate_pasid(ctx->pasid, rc); 219 if (rc) { 220 /* 221 * If we timeout waiting for the AFU to terminate the 222 * pasid, then it's dangerous to clean up the Process 223 * Element entry in the SPA, as it may be referenced 224 * in the future by the AFU. In which case, we would 225 * checkstop because of an invalid PE access (FIR 226 * register 2, bit 42). So leave the PE 227 * defined. Caller shouldn't free the context so that 228 * PASID remains allocated. 229 * 230 * A link reset will be required to cleanup the AFU 231 * and the SPA. 232 */ 233 if (rc == -EBUSY) 234 return rc; 235 } 236 rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); 237 if (rc) { 238 dev_warn(&ctx->afu->dev, 239 "Couldn't remove PE entry cleanly: %d\n", rc); 240 } 241 return 0; 242 } 243 244 void ocxl_context_detach_all(struct ocxl_afu *afu) 245 { 246 struct ocxl_context *ctx; 247 int tmp; 248 249 mutex_lock(&afu->contexts_lock); 250 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { 251 ocxl_context_detach(ctx); 252 /* 253 * We are force detaching - remove any active mmio 254 * mappings so userspace cannot interfere with the 255 * card if it comes back. Easiest way to exercise 256 * this is to unbind and rebind the driver via sysfs 257 * while it is in use. 258 */ 259 mutex_lock(&ctx->mapping_lock); 260 if (ctx->mapping) 261 unmap_mapping_range(ctx->mapping, 0, 0, 1); 262 mutex_unlock(&ctx->mapping_lock); 263 } 264 mutex_unlock(&afu->contexts_lock); 265 } 266 267 void ocxl_context_free(struct ocxl_context *ctx) 268 { 269 mutex_lock(&ctx->afu->contexts_lock); 270 ctx->afu->pasid_count--; 271 idr_remove(&ctx->afu->contexts_idr, ctx->pasid); 272 mutex_unlock(&ctx->afu->contexts_lock); 273 274 ocxl_afu_irq_free_all(ctx); 275 idr_destroy(&ctx->irq_idr); 276 /* reference to the AFU taken in ocxl_context_init */ 277 ocxl_afu_put(ctx->afu); 278 kfree(ctx); 279 } 280