1 /* 2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <linux/sched.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/task.h> 37 #include <linux/pid.h> 38 #include <linux/slab.h> 39 #include <linux/export.h> 40 #include <linux/vmalloc.h> 41 #include <linux/hugetlb.h> 42 #include <linux/interval_tree.h> 43 #include <linux/pagemap.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_umem.h> 47 #include <rdma/ib_umem_odp.h> 48 49 #include "uverbs.h" 50 51 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, 52 const struct mmu_interval_notifier_ops *ops) 53 { 54 int ret; 55 56 umem_odp->umem.is_odp = 1; 57 mutex_init(&umem_odp->umem_mutex); 58 59 if (!umem_odp->is_implicit_odp) { 60 size_t page_size = 1UL << umem_odp->page_shift; 61 unsigned long start; 62 unsigned long end; 63 size_t pages; 64 65 start = ALIGN_DOWN(umem_odp->umem.address, page_size); 66 if (check_add_overflow(umem_odp->umem.address, 67 (unsigned long)umem_odp->umem.length, 68 &end)) 69 return -EOVERFLOW; 70 end = ALIGN(end, page_size); 71 if (unlikely(end < page_size)) 72 return -EOVERFLOW; 73 74 pages = (end - start) >> umem_odp->page_shift; 75 if (!pages) 76 return -EINVAL; 77 78 umem_odp->page_list = kvcalloc( 79 pages, sizeof(*umem_odp->page_list), GFP_KERNEL); 80 if (!umem_odp->page_list) 81 return -ENOMEM; 82 83 umem_odp->dma_list = kvcalloc( 84 pages, sizeof(*umem_odp->dma_list), GFP_KERNEL); 85 if (!umem_odp->dma_list) { 86 ret = -ENOMEM; 87 goto out_page_list; 88 } 89 90 ret = mmu_interval_notifier_insert(&umem_odp->notifier, 91 umem_odp->umem.owning_mm, 92 start, end - start, ops); 93 if (ret) 94 goto out_dma_list; 95 } 96 97 return 0; 98 99 out_dma_list: 100 kvfree(umem_odp->dma_list); 101 out_page_list: 102 kvfree(umem_odp->page_list); 103 return ret; 104 } 105 106 /** 107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem 108 * 109 * Implicit ODP umems do not have a VA range and do not have any page lists. 110 * They exist only to hold the per_mm reference to help the driver create 111 * children umems. 112 * 113 * @device: IB device to create UMEM 114 * @access: ib_reg_mr access flags 115 */ 116 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, 117 int access) 118 { 119 struct ib_umem *umem; 120 struct ib_umem_odp *umem_odp; 121 int ret; 122 123 if (access & IB_ACCESS_HUGETLB) 124 return ERR_PTR(-EINVAL); 125 126 umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); 127 if (!umem_odp) 128 return ERR_PTR(-ENOMEM); 129 umem = &umem_odp->umem; 130 umem->ibdev = device; 131 umem->writable = ib_access_writable(access); 132 umem->owning_mm = current->mm; 133 umem_odp->is_implicit_odp = 1; 134 umem_odp->page_shift = PAGE_SHIFT; 135 136 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 137 ret = ib_init_umem_odp(umem_odp, NULL); 138 if (ret) { 139 put_pid(umem_odp->tgid); 140 kfree(umem_odp); 141 return ERR_PTR(ret); 142 } 143 return umem_odp; 144 } 145 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); 146 147 /** 148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit 149 * parent ODP umem 150 * 151 * @root: The parent umem enclosing the child. This must be allocated using 152 * ib_alloc_implicit_odp_umem() 153 * @addr: The starting userspace VA 154 * @size: The length of the userspace VA 155 */ 156 struct ib_umem_odp * 157 ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr, 158 size_t size, 159 const struct mmu_interval_notifier_ops *ops) 160 { 161 /* 162 * Caller must ensure that root cannot be freed during the call to 163 * ib_alloc_odp_umem. 164 */ 165 struct ib_umem_odp *odp_data; 166 struct ib_umem *umem; 167 int ret; 168 169 if (WARN_ON(!root->is_implicit_odp)) 170 return ERR_PTR(-EINVAL); 171 172 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); 173 if (!odp_data) 174 return ERR_PTR(-ENOMEM); 175 umem = &odp_data->umem; 176 umem->ibdev = root->umem.ibdev; 177 umem->length = size; 178 umem->address = addr; 179 umem->writable = root->umem.writable; 180 umem->owning_mm = root->umem.owning_mm; 181 odp_data->page_shift = PAGE_SHIFT; 182 odp_data->notifier.ops = ops; 183 184 odp_data->tgid = get_pid(root->tgid); 185 ret = ib_init_umem_odp(odp_data, ops); 186 if (ret) { 187 put_pid(odp_data->tgid); 188 kfree(odp_data); 189 return ERR_PTR(ret); 190 } 191 return odp_data; 192 } 193 EXPORT_SYMBOL(ib_umem_odp_alloc_child); 194 195 /** 196 * ib_umem_odp_get - Create a umem_odp for a userspace va 197 * 198 * @device: IB device struct to get UMEM 199 * @addr: userspace virtual address to start at 200 * @size: length of region to pin 201 * @access: IB_ACCESS_xxx flags for memory being pinned 202 * 203 * The driver should use when the access flags indicate ODP memory. It avoids 204 * pinning, instead, stores the mm for future page fault handling in 205 * conjunction with MMU notifiers. 206 */ 207 struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, 208 unsigned long addr, size_t size, int access, 209 const struct mmu_interval_notifier_ops *ops) 210 { 211 struct ib_umem_odp *umem_odp; 212 struct mm_struct *mm; 213 int ret; 214 215 if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) 216 return ERR_PTR(-EINVAL); 217 218 umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); 219 if (!umem_odp) 220 return ERR_PTR(-ENOMEM); 221 222 umem_odp->umem.ibdev = device; 223 umem_odp->umem.length = size; 224 umem_odp->umem.address = addr; 225 umem_odp->umem.writable = ib_access_writable(access); 226 umem_odp->umem.owning_mm = mm = current->mm; 227 umem_odp->notifier.ops = ops; 228 229 umem_odp->page_shift = PAGE_SHIFT; 230 if (access & IB_ACCESS_HUGETLB) { 231 struct vm_area_struct *vma; 232 struct hstate *h; 233 234 down_read(&mm->mmap_sem); 235 vma = find_vma(mm, ib_umem_start(umem_odp)); 236 if (!vma || !is_vm_hugetlb_page(vma)) { 237 up_read(&mm->mmap_sem); 238 ret = -EINVAL; 239 goto err_free; 240 } 241 h = hstate_vma(vma); 242 umem_odp->page_shift = huge_page_shift(h); 243 up_read(&mm->mmap_sem); 244 } 245 246 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 247 ret = ib_init_umem_odp(umem_odp, ops); 248 if (ret) 249 goto err_put_pid; 250 return umem_odp; 251 252 err_put_pid: 253 put_pid(umem_odp->tgid); 254 err_free: 255 kfree(umem_odp); 256 return ERR_PTR(ret); 257 } 258 EXPORT_SYMBOL(ib_umem_odp_get); 259 260 void ib_umem_odp_release(struct ib_umem_odp *umem_odp) 261 { 262 /* 263 * Ensure that no more pages are mapped in the umem. 264 * 265 * It is the driver's responsibility to ensure, before calling us, 266 * that the hardware will not attempt to access the MR any more. 267 */ 268 if (!umem_odp->is_implicit_odp) { 269 mutex_lock(&umem_odp->umem_mutex); 270 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), 271 ib_umem_end(umem_odp)); 272 mutex_unlock(&umem_odp->umem_mutex); 273 mmu_interval_notifier_remove(&umem_odp->notifier); 274 kvfree(umem_odp->dma_list); 275 kvfree(umem_odp->page_list); 276 put_pid(umem_odp->tgid); 277 } 278 kfree(umem_odp); 279 } 280 EXPORT_SYMBOL(ib_umem_odp_release); 281 282 /* 283 * Map for DMA and insert a single page into the on-demand paging page tables. 284 * 285 * @umem: the umem to insert the page to. 286 * @page_index: index in the umem to add the page to. 287 * @page: the page struct to map and add. 288 * @access_mask: access permissions needed for this page. 289 * @current_seq: sequence number for synchronization with invalidations. 290 * the sequence number is taken from 291 * umem_odp->notifiers_seq. 292 * 293 * The function returns -EFAULT if the DMA mapping operation fails. It returns 294 * -EAGAIN if a concurrent invalidation prevents us from updating the page. 295 * 296 * The page is released via put_page even if the operation failed. For on-demand 297 * pinning, the page is released whenever it isn't stored in the umem. 298 */ 299 static int ib_umem_odp_map_dma_single_page( 300 struct ib_umem_odp *umem_odp, 301 unsigned int page_index, 302 struct page *page, 303 u64 access_mask, 304 unsigned long current_seq) 305 { 306 struct ib_device *dev = umem_odp->umem.ibdev; 307 dma_addr_t dma_addr; 308 int ret = 0; 309 310 if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) { 311 ret = -EAGAIN; 312 goto out; 313 } 314 if (!(umem_odp->dma_list[page_index])) { 315 dma_addr = 316 ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift), 317 DMA_BIDIRECTIONAL); 318 if (ib_dma_mapping_error(dev, dma_addr)) { 319 ret = -EFAULT; 320 goto out; 321 } 322 umem_odp->dma_list[page_index] = dma_addr | access_mask; 323 umem_odp->page_list[page_index] = page; 324 umem_odp->npages++; 325 } else if (umem_odp->page_list[page_index] == page) { 326 umem_odp->dma_list[page_index] |= access_mask; 327 } else { 328 /* 329 * This is a race here where we could have done: 330 * 331 * CPU0 CPU1 332 * get_user_pages() 333 * invalidate() 334 * page_fault() 335 * mutex_lock(umem_mutex) 336 * page from GUP != page in ODP 337 * 338 * It should be prevented by the retry test above as reading 339 * the seq number should be reliable under the 340 * umem_mutex. Thus something is really not working right if 341 * things get here. 342 */ 343 WARN(true, 344 "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 345 umem_odp->page_list[page_index], page); 346 ret = -EAGAIN; 347 } 348 349 out: 350 put_page(page); 351 return ret; 352 } 353 354 /** 355 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 356 * 357 * Pins the range of pages passed in the argument, and maps them to 358 * DMA addresses. The DMA addresses of the mapped pages is updated in 359 * umem_odp->dma_list. 360 * 361 * Returns the number of pages mapped in success, negative error code 362 * for failure. 363 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 364 * the function from completing its task. 365 * An -ENOENT error code indicates that userspace process is being terminated 366 * and mm was already destroyed. 367 * @umem_odp: the umem to map and pin 368 * @user_virt: the address from which we need to map. 369 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 370 * bigger due to alignment, and may also be smaller in case of an error 371 * pinning or mapping a page. The actual pages mapped is returned in 372 * the return value. 373 * @access_mask: bit mask of the requested access permissions for the given 374 * range. 375 * @current_seq: the MMU notifiers sequance value for synchronization with 376 * invalidations. the sequance number is read from 377 * umem_odp->notifiers_seq before calling this function 378 */ 379 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, 380 u64 bcnt, u64 access_mask, 381 unsigned long current_seq) 382 { 383 struct task_struct *owning_process = NULL; 384 struct mm_struct *owning_mm = umem_odp->umem.owning_mm; 385 struct page **local_page_list = NULL; 386 u64 page_mask, off; 387 int j, k, ret = 0, start_idx, npages = 0; 388 unsigned int flags = 0, page_shift; 389 phys_addr_t p = 0; 390 391 if (access_mask == 0) 392 return -EINVAL; 393 394 if (user_virt < ib_umem_start(umem_odp) || 395 user_virt + bcnt > ib_umem_end(umem_odp)) 396 return -EFAULT; 397 398 local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 399 if (!local_page_list) 400 return -ENOMEM; 401 402 page_shift = umem_odp->page_shift; 403 page_mask = ~(BIT(page_shift) - 1); 404 off = user_virt & (~page_mask); 405 user_virt = user_virt & page_mask; 406 bcnt += off; /* Charge for the first page offset as well. */ 407 408 /* 409 * owning_process is allowed to be NULL, this means somehow the mm is 410 * existing beyond the lifetime of the originating process.. Presumably 411 * mmget_not_zero will fail in this case. 412 */ 413 owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID); 414 if (!owning_process || !mmget_not_zero(owning_mm)) { 415 ret = -EINVAL; 416 goto out_put_task; 417 } 418 419 if (access_mask & ODP_WRITE_ALLOWED_BIT) 420 flags |= FOLL_WRITE; 421 422 start_idx = (user_virt - ib_umem_start(umem_odp)) >> page_shift; 423 k = start_idx; 424 425 while (bcnt > 0) { 426 const size_t gup_num_pages = min_t(size_t, 427 (bcnt + BIT(page_shift) - 1) >> page_shift, 428 PAGE_SIZE / sizeof(struct page *)); 429 430 down_read(&owning_mm->mmap_sem); 431 /* 432 * Note: this might result in redundent page getting. We can 433 * avoid this by checking dma_list to be 0 before calling 434 * get_user_pages. However, this make the code much more 435 * complex (and doesn't gain us much performance in most use 436 * cases). 437 */ 438 npages = get_user_pages_remote(owning_process, owning_mm, 439 user_virt, gup_num_pages, 440 flags, local_page_list, NULL, NULL); 441 up_read(&owning_mm->mmap_sem); 442 443 if (npages < 0) { 444 if (npages != -EAGAIN) 445 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 446 else 447 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 448 break; 449 } 450 451 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 452 mutex_lock(&umem_odp->umem_mutex); 453 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) { 454 if (user_virt & ~page_mask) { 455 p += PAGE_SIZE; 456 if (page_to_phys(local_page_list[j]) != p) { 457 ret = -EFAULT; 458 break; 459 } 460 put_page(local_page_list[j]); 461 continue; 462 } 463 464 ret = ib_umem_odp_map_dma_single_page( 465 umem_odp, k, local_page_list[j], 466 access_mask, current_seq); 467 if (ret < 0) { 468 if (ret != -EAGAIN) 469 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 470 else 471 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 472 break; 473 } 474 475 p = page_to_phys(local_page_list[j]); 476 k++; 477 } 478 mutex_unlock(&umem_odp->umem_mutex); 479 480 if (ret < 0) { 481 /* 482 * Release pages, remembering that the first page 483 * to hit an error was already released by 484 * ib_umem_odp_map_dma_single_page(). 485 */ 486 if (npages - (j + 1) > 0) 487 release_pages(&local_page_list[j+1], 488 npages - (j + 1)); 489 break; 490 } 491 } 492 493 if (ret >= 0) { 494 if (npages < 0 && k == start_idx) 495 ret = npages; 496 else 497 ret = k - start_idx; 498 } 499 500 mmput(owning_mm); 501 out_put_task: 502 if (owning_process) 503 put_task_struct(owning_process); 504 free_page((unsigned long)local_page_list); 505 return ret; 506 } 507 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 508 509 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, 510 u64 bound) 511 { 512 int idx; 513 u64 addr; 514 struct ib_device *dev = umem_odp->umem.ibdev; 515 516 lockdep_assert_held(&umem_odp->umem_mutex); 517 518 virt = max_t(u64, virt, ib_umem_start(umem_odp)); 519 bound = min_t(u64, bound, ib_umem_end(umem_odp)); 520 /* Note that during the run of this function, the 521 * notifiers_count of the MR is > 0, preventing any racing 522 * faults from completion. We might be racing with other 523 * invalidations, so we must make sure we free each page only 524 * once. */ 525 for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { 526 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 527 if (umem_odp->page_list[idx]) { 528 struct page *page = umem_odp->page_list[idx]; 529 dma_addr_t dma = umem_odp->dma_list[idx]; 530 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 531 532 WARN_ON(!dma_addr); 533 534 ib_dma_unmap_page(dev, dma_addr, 535 BIT(umem_odp->page_shift), 536 DMA_BIDIRECTIONAL); 537 if (dma & ODP_WRITE_ALLOWED_BIT) { 538 struct page *head_page = compound_head(page); 539 /* 540 * set_page_dirty prefers being called with 541 * the page lock. However, MMU notifiers are 542 * called sometimes with and sometimes without 543 * the lock. We rely on the umem_mutex instead 544 * to prevent other mmu notifiers from 545 * continuing and allowing the page mapping to 546 * be removed. 547 */ 548 set_page_dirty(head_page); 549 } 550 umem_odp->page_list[idx] = NULL; 551 umem_odp->dma_list[idx] = 0; 552 umem_odp->npages--; 553 } 554 } 555 } 556 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 557