1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2020 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/mm.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/sched/signal.h> 39 #include <linux/sched/mm.h> 40 #include <linux/export.h> 41 #include <linux/slab.h> 42 #include <linux/pagemap.h> 43 #include <linux/count_zeros.h> 44 #include <rdma/ib_umem_odp.h> 45 46 #include "uverbs.h" 47 48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 49 { 50 bool make_dirty = umem->writable && dirty; 51 struct scatterlist *sg; 52 unsigned int i; 53 54 if (dirty) 55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, 56 DMA_BIDIRECTIONAL, 0); 57 58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) 59 unpin_user_page_range_dirty_lock(sg_page(sg), 60 DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty); 61 62 sg_free_append_table(&umem->sgt_append); 63 } 64 65 /** 66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR 67 * 68 * @umem: umem struct 69 * @pgsz_bitmap: bitmap of HW supported page sizes 70 * @virt: IOVA 71 * 72 * This helper is intended for HW that support multiple page 73 * sizes but can do only a single page size in an MR. 74 * 75 * Returns 0 if the umem requires page sizes not supported by 76 * the driver to be mapped. Drivers always supporting PAGE_SIZE 77 * or smaller will never see a 0 result. 78 */ 79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, 80 unsigned long pgsz_bitmap, 81 unsigned long virt) 82 { 83 struct scatterlist *sg; 84 unsigned long va, pgoff; 85 dma_addr_t mask; 86 int i; 87 88 umem->iova = va = virt; 89 90 if (umem->is_odp) { 91 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); 92 93 /* ODP must always be self consistent. */ 94 if (!(pgsz_bitmap & page_size)) 95 return 0; 96 return page_size; 97 } 98 99 /* rdma_for_each_block() has a bug if the page size is smaller than the 100 * page size used to build the umem. For now prevent smaller page sizes 101 * from being returned. 102 */ 103 pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); 104 105 /* The best result is the smallest page size that results in the minimum 106 * number of required pages. Compute the largest page size that could 107 * work based on VA address bits that don't change. 108 */ 109 mask = pgsz_bitmap & 110 GENMASK(BITS_PER_LONG - 1, 111 bits_per((umem->length - 1 + virt) ^ virt)); 112 /* offset into first SGL */ 113 pgoff = umem->address & ~PAGE_MASK; 114 115 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { 116 /* Walk SGL and reduce max page size if VA/PA bits differ 117 * for any address. 118 */ 119 mask |= (sg_dma_address(sg) + pgoff) ^ va; 120 va += sg_dma_len(sg) - pgoff; 121 /* Except for the last entry, the ending iova alignment sets 122 * the maximum possible page size as the low bits of the iova 123 * must be zero when starting the next chunk. 124 */ 125 if (i != (umem->sgt_append.sgt.nents - 1)) 126 mask |= va; 127 pgoff = 0; 128 } 129 130 /* The mask accumulates 1's in each position where the VA and physical 131 * address differ, thus the length of trailing 0 is the largest page 132 * size that can pass the VA through to the physical. 133 */ 134 if (mask) 135 pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0); 136 return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0; 137 } 138 EXPORT_SYMBOL(ib_umem_find_best_pgsz); 139 140 /** 141 * ib_umem_get - Pin and DMA map userspace memory. 142 * 143 * @device: IB device to connect UMEM 144 * @addr: userspace virtual address to start at 145 * @size: length of region to pin 146 * @access: IB_ACCESS_xxx flags for memory being pinned 147 */ 148 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, 149 size_t size, int access) 150 { 151 struct ib_umem *umem; 152 struct page **page_list; 153 unsigned long lock_limit; 154 unsigned long new_pinned; 155 unsigned long cur_base; 156 unsigned long dma_attr = 0; 157 struct mm_struct *mm; 158 unsigned long npages; 159 int pinned, ret; 160 unsigned int gup_flags = FOLL_LONGTERM; 161 162 /* 163 * If the combination of the addr and size requested for this memory 164 * region causes an integer overflow, return error. 165 */ 166 if (((addr + size) < addr) || 167 PAGE_ALIGN(addr + size) < (addr + size)) 168 return ERR_PTR(-EINVAL); 169 170 if (!can_do_mlock()) 171 return ERR_PTR(-EPERM); 172 173 if (access & IB_ACCESS_ON_DEMAND) 174 return ERR_PTR(-EOPNOTSUPP); 175 176 umem = kzalloc(sizeof(*umem), GFP_KERNEL); 177 if (!umem) 178 return ERR_PTR(-ENOMEM); 179 umem->ibdev = device; 180 umem->length = size; 181 umem->address = addr; 182 /* 183 * Drivers should call ib_umem_find_best_pgsz() to set the iova 184 * correctly. 185 */ 186 umem->iova = addr; 187 umem->writable = ib_access_writable(access); 188 umem->owning_mm = mm = current->mm; 189 mmgrab(mm); 190 191 page_list = (struct page **) __get_free_page(GFP_KERNEL); 192 if (!page_list) { 193 ret = -ENOMEM; 194 goto umem_kfree; 195 } 196 197 npages = ib_umem_num_pages(umem); 198 if (npages == 0 || npages > UINT_MAX) { 199 ret = -EINVAL; 200 goto out; 201 } 202 203 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 204 205 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); 206 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { 207 atomic64_sub(npages, &mm->pinned_vm); 208 ret = -ENOMEM; 209 goto out; 210 } 211 212 cur_base = addr & PAGE_MASK; 213 214 if (umem->writable) 215 gup_flags |= FOLL_WRITE; 216 217 while (npages) { 218 cond_resched(); 219 pinned = pin_user_pages_fast(cur_base, 220 min_t(unsigned long, npages, 221 PAGE_SIZE / 222 sizeof(struct page *)), 223 gup_flags, page_list); 224 if (pinned < 0) { 225 ret = pinned; 226 goto umem_release; 227 } 228 229 cur_base += pinned * PAGE_SIZE; 230 npages -= pinned; 231 ret = sg_alloc_append_table_from_pages( 232 &umem->sgt_append, page_list, pinned, 0, 233 pinned << PAGE_SHIFT, ib_dma_max_seg_size(device), 234 npages, GFP_KERNEL); 235 if (ret) { 236 unpin_user_pages_dirty_lock(page_list, pinned, 0); 237 goto umem_release; 238 } 239 } 240 241 if (access & IB_ACCESS_RELAXED_ORDERING) 242 dma_attr |= DMA_ATTR_WEAK_ORDERING; 243 244 ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt, 245 DMA_BIDIRECTIONAL, dma_attr); 246 if (ret) 247 goto umem_release; 248 goto out; 249 250 umem_release: 251 __ib_umem_release(device, umem, 0); 252 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); 253 out: 254 free_page((unsigned long) page_list); 255 umem_kfree: 256 if (ret) { 257 mmdrop(umem->owning_mm); 258 kfree(umem); 259 } 260 return ret ? ERR_PTR(ret) : umem; 261 } 262 EXPORT_SYMBOL(ib_umem_get); 263 264 /** 265 * ib_umem_release - release memory pinned with ib_umem_get 266 * @umem: umem struct to release 267 */ 268 void ib_umem_release(struct ib_umem *umem) 269 { 270 if (!umem) 271 return; 272 if (umem->is_dmabuf) 273 return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem)); 274 if (umem->is_odp) 275 return ib_umem_odp_release(to_ib_umem_odp(umem)); 276 277 __ib_umem_release(umem->ibdev, umem, 1); 278 279 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); 280 mmdrop(umem->owning_mm); 281 kfree(umem); 282 } 283 EXPORT_SYMBOL(ib_umem_release); 284 285 /* 286 * Copy from the given ib_umem's pages to the given buffer. 287 * 288 * umem - the umem to copy from 289 * offset - offset to start copying from 290 * dst - destination buffer 291 * length - buffer length 292 * 293 * Returns 0 on success, or an error code. 294 */ 295 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 296 size_t length) 297 { 298 size_t end = offset + length; 299 int ret; 300 301 if (offset > umem->length || length > umem->length - offset) { 302 pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n", 303 __func__, offset, umem->length, end); 304 return -EINVAL; 305 } 306 307 ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl, 308 umem->sgt_append.sgt.orig_nents, dst, length, 309 offset + ib_umem_offset(umem)); 310 311 if (ret < 0) 312 return ret; 313 else if (ret != length) 314 return -EINVAL; 315 else 316 return 0; 317 } 318 EXPORT_SYMBOL(ib_umem_copy_from); 319