1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/mm.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/sched/signal.h> 38 #include <linux/sched/mm.h> 39 #include <linux/export.h> 40 #include <linux/hugetlb.h> 41 #include <linux/slab.h> 42 #include <rdma/ib_umem_odp.h> 43 44 #include "uverbs.h" 45 46 47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 48 { 49 struct scatterlist *sg; 50 struct page *page; 51 int i; 52 53 if (umem->nmap > 0) 54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 55 umem->npages, 56 DMA_BIDIRECTIONAL); 57 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 59 60 page = sg_page(sg); 61 if (!PageDirty(page) && umem->writable && dirty) 62 set_page_dirty_lock(page); 63 put_page(page); 64 } 65 66 sg_free_table(&umem->sg_head); 67 return; 68 69 } 70 71 /** 72 * ib_umem_get - Pin and DMA map userspace memory. 73 * 74 * If access flags indicate ODP memory, avoid pinning. Instead, stores 75 * the mm for future page fault handling in conjunction with MMU notifiers. 76 * 77 * @context: userspace context to pin memory for 78 * @addr: userspace virtual address to start at 79 * @size: length of region to pin 80 * @access: IB_ACCESS_xxx flags for memory being pinned 81 * @dmasync: flush in-flight DMA when the memory region is written 82 */ 83 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 84 size_t size, int access, int dmasync) 85 { 86 struct ib_umem *umem; 87 struct page **page_list; 88 struct vm_area_struct **vma_list; 89 unsigned long locked; 90 unsigned long lock_limit; 91 unsigned long cur_base; 92 unsigned long npages; 93 int ret; 94 int i; 95 unsigned long dma_attrs = 0; 96 struct scatterlist *sg, *sg_list_start; 97 int need_release = 0; 98 unsigned int gup_flags = FOLL_WRITE; 99 100 if (dmasync) 101 dma_attrs |= DMA_ATTR_WRITE_BARRIER; 102 103 /* 104 * If the combination of the addr and size requested for this memory 105 * region causes an integer overflow, return error. 106 */ 107 if (((addr + size) < addr) || 108 PAGE_ALIGN(addr + size) < (addr + size)) 109 return ERR_PTR(-EINVAL); 110 111 if (!can_do_mlock()) 112 return ERR_PTR(-EPERM); 113 114 umem = kzalloc(sizeof *umem, GFP_KERNEL); 115 if (!umem) 116 return ERR_PTR(-ENOMEM); 117 118 umem->context = context; 119 umem->length = size; 120 umem->address = addr; 121 umem->page_shift = PAGE_SHIFT; 122 umem->pid = get_task_pid(current, PIDTYPE_PID); 123 /* 124 * We ask for writable memory if any of the following 125 * access flags are set. "Local write" and "remote write" 126 * obviously require write access. "Remote atomic" can do 127 * things like fetch and add, which will modify memory, and 128 * "MW bind" can change permissions by binding a window. 129 */ 130 umem->writable = !!(access & 131 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 132 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 133 134 if (access & IB_ACCESS_ON_DEMAND) { 135 put_pid(umem->pid); 136 ret = ib_umem_odp_get(context, umem, access); 137 if (ret) { 138 kfree(umem); 139 return ERR_PTR(ret); 140 } 141 return umem; 142 } 143 144 umem->odp_data = NULL; 145 146 /* We assume the memory is from hugetlb until proved otherwise */ 147 umem->hugetlb = 1; 148 149 page_list = (struct page **) __get_free_page(GFP_KERNEL); 150 if (!page_list) { 151 put_pid(umem->pid); 152 kfree(umem); 153 return ERR_PTR(-ENOMEM); 154 } 155 156 /* 157 * if we can't alloc the vma_list, it's not so bad; 158 * just assume the memory is not hugetlb memory 159 */ 160 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); 161 if (!vma_list) 162 umem->hugetlb = 0; 163 164 npages = ib_umem_num_pages(umem); 165 166 down_write(¤t->mm->mmap_sem); 167 168 locked = npages + current->mm->pinned_vm; 169 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 170 171 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 172 ret = -ENOMEM; 173 goto out; 174 } 175 176 cur_base = addr & PAGE_MASK; 177 178 if (npages == 0 || npages > UINT_MAX) { 179 ret = -EINVAL; 180 goto out; 181 } 182 183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); 184 if (ret) 185 goto out; 186 187 if (!umem->writable) 188 gup_flags |= FOLL_FORCE; 189 190 need_release = 1; 191 sg_list_start = umem->sg_head.sgl; 192 193 while (npages) { 194 ret = get_user_pages_longterm(cur_base, 195 min_t(unsigned long, npages, 196 PAGE_SIZE / sizeof (struct page *)), 197 gup_flags, page_list, vma_list); 198 199 if (ret < 0) 200 goto out; 201 202 umem->npages += ret; 203 cur_base += ret * PAGE_SIZE; 204 npages -= ret; 205 206 for_each_sg(sg_list_start, sg, ret, i) { 207 if (vma_list && !is_vm_hugetlb_page(vma_list[i])) 208 umem->hugetlb = 0; 209 210 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); 211 } 212 213 /* preparing for next loop */ 214 sg_list_start = sg; 215 } 216 217 umem->nmap = ib_dma_map_sg_attrs(context->device, 218 umem->sg_head.sgl, 219 umem->npages, 220 DMA_BIDIRECTIONAL, 221 dma_attrs); 222 223 if (umem->nmap <= 0) { 224 ret = -ENOMEM; 225 goto out; 226 } 227 228 ret = 0; 229 230 out: 231 if (ret < 0) { 232 if (need_release) 233 __ib_umem_release(context->device, umem, 0); 234 put_pid(umem->pid); 235 kfree(umem); 236 } else 237 current->mm->pinned_vm = locked; 238 239 up_write(¤t->mm->mmap_sem); 240 if (vma_list) 241 free_page((unsigned long) vma_list); 242 free_page((unsigned long) page_list); 243 244 return ret < 0 ? ERR_PTR(ret) : umem; 245 } 246 EXPORT_SYMBOL(ib_umem_get); 247 248 static void ib_umem_account(struct work_struct *work) 249 { 250 struct ib_umem *umem = container_of(work, struct ib_umem, work); 251 252 down_write(&umem->mm->mmap_sem); 253 umem->mm->pinned_vm -= umem->diff; 254 up_write(&umem->mm->mmap_sem); 255 mmput(umem->mm); 256 kfree(umem); 257 } 258 259 /** 260 * ib_umem_release - release memory pinned with ib_umem_get 261 * @umem: umem struct to release 262 */ 263 void ib_umem_release(struct ib_umem *umem) 264 { 265 struct ib_ucontext *context = umem->context; 266 struct mm_struct *mm; 267 struct task_struct *task; 268 unsigned long diff; 269 270 if (umem->odp_data) { 271 ib_umem_odp_release(umem); 272 return; 273 } 274 275 __ib_umem_release(umem->context->device, umem, 1); 276 277 task = get_pid_task(umem->pid, PIDTYPE_PID); 278 put_pid(umem->pid); 279 if (!task) 280 goto out; 281 mm = get_task_mm(task); 282 put_task_struct(task); 283 if (!mm) 284 goto out; 285 286 diff = ib_umem_num_pages(umem); 287 288 /* 289 * We may be called with the mm's mmap_sem already held. This 290 * can happen when a userspace munmap() is the call that drops 291 * the last reference to our file and calls our release 292 * method. If there are memory regions to destroy, we'll end 293 * up here and not be able to take the mmap_sem. In that case 294 * we defer the vm_locked accounting to the system workqueue. 295 */ 296 if (context->closing) { 297 if (!down_write_trylock(&mm->mmap_sem)) { 298 INIT_WORK(&umem->work, ib_umem_account); 299 umem->mm = mm; 300 umem->diff = diff; 301 302 queue_work(ib_wq, &umem->work); 303 return; 304 } 305 } else 306 down_write(&mm->mmap_sem); 307 308 mm->pinned_vm -= diff; 309 up_write(&mm->mmap_sem); 310 mmput(mm); 311 out: 312 kfree(umem); 313 } 314 EXPORT_SYMBOL(ib_umem_release); 315 316 int ib_umem_page_count(struct ib_umem *umem) 317 { 318 int i; 319 int n; 320 struct scatterlist *sg; 321 322 if (umem->odp_data) 323 return ib_umem_num_pages(umem); 324 325 n = 0; 326 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 327 n += sg_dma_len(sg) >> umem->page_shift; 328 329 return n; 330 } 331 EXPORT_SYMBOL(ib_umem_page_count); 332 333 /* 334 * Copy from the given ib_umem's pages to the given buffer. 335 * 336 * umem - the umem to copy from 337 * offset - offset to start copying from 338 * dst - destination buffer 339 * length - buffer length 340 * 341 * Returns 0 on success, or an error code. 342 */ 343 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 344 size_t length) 345 { 346 size_t end = offset + length; 347 int ret; 348 349 if (offset > umem->length || length > umem->length - offset) { 350 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", 351 offset, umem->length, end); 352 return -EINVAL; 353 } 354 355 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, 356 offset + ib_umem_offset(umem)); 357 358 if (ret < 0) 359 return ret; 360 else if (ret != length) 361 return -EINVAL; 362 else 363 return 0; 364 } 365 EXPORT_SYMBOL(ib_umem_copy_from); 366