1 /* 2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <linux/sched.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/task.h> 37 #include <linux/pid.h> 38 #include <linux/slab.h> 39 #include <linux/export.h> 40 #include <linux/vmalloc.h> 41 #include <linux/hugetlb.h> 42 #include <linux/interval_tree.h> 43 #include <linux/pagemap.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_umem.h> 47 #include <rdma/ib_umem_odp.h> 48 49 #include "uverbs.h" 50 51 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp) 52 { 53 mutex_lock(&umem_odp->umem_mutex); 54 if (umem_odp->notifiers_count++ == 0) 55 /* 56 * Initialize the completion object for waiting on 57 * notifiers. Since notifier_count is zero, no one should be 58 * waiting right now. 59 */ 60 reinit_completion(&umem_odp->notifier_completion); 61 mutex_unlock(&umem_odp->umem_mutex); 62 } 63 64 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp) 65 { 66 mutex_lock(&umem_odp->umem_mutex); 67 /* 68 * This sequence increase will notify the QP page fault that the page 69 * that is going to be mapped in the spte could have been freed. 70 */ 71 ++umem_odp->notifiers_seq; 72 if (--umem_odp->notifiers_count == 0) 73 complete_all(&umem_odp->notifier_completion); 74 mutex_unlock(&umem_odp->umem_mutex); 75 } 76 77 static void ib_umem_notifier_release(struct mmu_notifier *mn, 78 struct mm_struct *mm) 79 { 80 struct ib_ucontext_per_mm *per_mm = 81 container_of(mn, struct ib_ucontext_per_mm, mn); 82 struct rb_node *node; 83 84 down_read(&per_mm->umem_rwsem); 85 if (!per_mm->active) 86 goto out; 87 88 for (node = rb_first_cached(&per_mm->umem_tree); node; 89 node = rb_next(node)) { 90 struct ib_umem_odp *umem_odp = 91 rb_entry(node, struct ib_umem_odp, interval_tree.rb); 92 93 /* 94 * Increase the number of notifiers running, to prevent any 95 * further fault handling on this MR. 96 */ 97 ib_umem_notifier_start_account(umem_odp); 98 complete_all(&umem_odp->notifier_completion); 99 umem_odp->umem.context->device->ops.invalidate_range( 100 umem_odp, ib_umem_start(umem_odp), 101 ib_umem_end(umem_odp)); 102 } 103 104 out: 105 up_read(&per_mm->umem_rwsem); 106 } 107 108 static int invalidate_range_start_trampoline(struct ib_umem_odp *item, 109 u64 start, u64 end, void *cookie) 110 { 111 ib_umem_notifier_start_account(item); 112 item->umem.context->device->ops.invalidate_range(item, start, end); 113 return 0; 114 } 115 116 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, 117 const struct mmu_notifier_range *range) 118 { 119 struct ib_ucontext_per_mm *per_mm = 120 container_of(mn, struct ib_ucontext_per_mm, mn); 121 int rc; 122 123 if (mmu_notifier_range_blockable(range)) 124 down_read(&per_mm->umem_rwsem); 125 else if (!down_read_trylock(&per_mm->umem_rwsem)) 126 return -EAGAIN; 127 128 if (!per_mm->active) { 129 up_read(&per_mm->umem_rwsem); 130 /* 131 * At this point active is permanently set and visible to this 132 * CPU without a lock, that fact is relied on to skip the unlock 133 * in range_end. 134 */ 135 return 0; 136 } 137 138 rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, 139 range->end, 140 invalidate_range_start_trampoline, 141 mmu_notifier_range_blockable(range), 142 NULL); 143 if (rc) 144 up_read(&per_mm->umem_rwsem); 145 return rc; 146 } 147 148 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, 149 u64 end, void *cookie) 150 { 151 ib_umem_notifier_end_account(item); 152 return 0; 153 } 154 155 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, 156 const struct mmu_notifier_range *range) 157 { 158 struct ib_ucontext_per_mm *per_mm = 159 container_of(mn, struct ib_ucontext_per_mm, mn); 160 161 if (unlikely(!per_mm->active)) 162 return; 163 164 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, 165 range->end, 166 invalidate_range_end_trampoline, true, NULL); 167 up_read(&per_mm->umem_rwsem); 168 } 169 170 static const struct mmu_notifier_ops ib_umem_notifiers = { 171 .release = ib_umem_notifier_release, 172 .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 173 .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 174 }; 175 176 static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp) 177 { 178 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; 179 180 down_write(&per_mm->umem_rwsem); 181 interval_tree_remove(&umem_odp->interval_tree, &per_mm->umem_tree); 182 complete_all(&umem_odp->notifier_completion); 183 up_write(&per_mm->umem_rwsem); 184 } 185 186 static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx, 187 struct mm_struct *mm) 188 { 189 struct ib_ucontext_per_mm *per_mm; 190 int ret; 191 192 per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL); 193 if (!per_mm) 194 return ERR_PTR(-ENOMEM); 195 196 per_mm->context = ctx; 197 per_mm->mm = mm; 198 per_mm->umem_tree = RB_ROOT_CACHED; 199 init_rwsem(&per_mm->umem_rwsem); 200 per_mm->active = true; 201 202 rcu_read_lock(); 203 per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 204 rcu_read_unlock(); 205 206 WARN_ON(mm != current->mm); 207 208 per_mm->mn.ops = &ib_umem_notifiers; 209 ret = mmu_notifier_register(&per_mm->mn, per_mm->mm); 210 if (ret) { 211 dev_err(&ctx->device->dev, 212 "Failed to register mmu_notifier %d\n", ret); 213 goto out_pid; 214 } 215 216 list_add(&per_mm->ucontext_list, &ctx->per_mm_list); 217 return per_mm; 218 219 out_pid: 220 put_pid(per_mm->tgid); 221 kfree(per_mm); 222 return ERR_PTR(ret); 223 } 224 225 static struct ib_ucontext_per_mm *get_per_mm(struct ib_umem_odp *umem_odp) 226 { 227 struct ib_ucontext *ctx = umem_odp->umem.context; 228 struct ib_ucontext_per_mm *per_mm; 229 230 lockdep_assert_held(&ctx->per_mm_list_lock); 231 232 /* 233 * Generally speaking we expect only one or two per_mm in this list, 234 * so no reason to optimize this search today. 235 */ 236 list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) { 237 if (per_mm->mm == umem_odp->umem.owning_mm) 238 return per_mm; 239 } 240 241 return alloc_per_mm(ctx, umem_odp->umem.owning_mm); 242 } 243 244 static void free_per_mm(struct rcu_head *rcu) 245 { 246 kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu)); 247 } 248 249 static void put_per_mm(struct ib_umem_odp *umem_odp) 250 { 251 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; 252 struct ib_ucontext *ctx = umem_odp->umem.context; 253 bool need_free; 254 255 mutex_lock(&ctx->per_mm_list_lock); 256 umem_odp->per_mm = NULL; 257 per_mm->odp_mrs_count--; 258 need_free = per_mm->odp_mrs_count == 0; 259 if (need_free) 260 list_del(&per_mm->ucontext_list); 261 mutex_unlock(&ctx->per_mm_list_lock); 262 263 if (!need_free) 264 return; 265 266 /* 267 * NOTE! mmu_notifier_unregister() can happen between a start/end 268 * callback, resulting in an start/end, and thus an unbalanced 269 * lock. This doesn't really matter to us since we are about to kfree 270 * the memory that holds the lock, however LOCKDEP doesn't like this. 271 */ 272 down_write(&per_mm->umem_rwsem); 273 per_mm->active = false; 274 up_write(&per_mm->umem_rwsem); 275 276 WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root)); 277 mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm); 278 put_pid(per_mm->tgid); 279 mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm); 280 } 281 282 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, 283 struct ib_ucontext_per_mm *per_mm) 284 { 285 struct ib_ucontext *ctx = umem_odp->umem.context; 286 int ret; 287 288 umem_odp->umem.is_odp = 1; 289 if (!umem_odp->is_implicit_odp) { 290 size_t page_size = 1UL << umem_odp->page_shift; 291 size_t pages; 292 293 umem_odp->interval_tree.start = 294 ALIGN_DOWN(umem_odp->umem.address, page_size); 295 if (check_add_overflow(umem_odp->umem.address, 296 umem_odp->umem.length, 297 &umem_odp->interval_tree.last)) 298 return -EOVERFLOW; 299 umem_odp->interval_tree.last = 300 ALIGN(umem_odp->interval_tree.last, page_size); 301 if (unlikely(umem_odp->interval_tree.last < page_size)) 302 return -EOVERFLOW; 303 304 pages = (umem_odp->interval_tree.last - 305 umem_odp->interval_tree.start) >> 306 umem_odp->page_shift; 307 if (!pages) 308 return -EINVAL; 309 310 /* 311 * Note that the representation of the intervals in the 312 * interval tree considers the ending point as contained in 313 * the interval. 314 */ 315 umem_odp->interval_tree.last--; 316 317 umem_odp->page_list = kvcalloc( 318 pages, sizeof(*umem_odp->page_list), GFP_KERNEL); 319 if (!umem_odp->page_list) 320 return -ENOMEM; 321 322 umem_odp->dma_list = kvcalloc( 323 pages, sizeof(*umem_odp->dma_list), GFP_KERNEL); 324 if (!umem_odp->dma_list) { 325 ret = -ENOMEM; 326 goto out_page_list; 327 } 328 } 329 330 mutex_lock(&ctx->per_mm_list_lock); 331 if (!per_mm) { 332 per_mm = get_per_mm(umem_odp); 333 if (IS_ERR(per_mm)) { 334 ret = PTR_ERR(per_mm); 335 goto out_unlock; 336 } 337 } 338 umem_odp->per_mm = per_mm; 339 per_mm->odp_mrs_count++; 340 mutex_unlock(&ctx->per_mm_list_lock); 341 342 mutex_init(&umem_odp->umem_mutex); 343 init_completion(&umem_odp->notifier_completion); 344 345 if (!umem_odp->is_implicit_odp) { 346 down_write(&per_mm->umem_rwsem); 347 interval_tree_insert(&umem_odp->interval_tree, 348 &per_mm->umem_tree); 349 up_write(&per_mm->umem_rwsem); 350 } 351 mmgrab(umem_odp->umem.owning_mm); 352 353 return 0; 354 355 out_unlock: 356 mutex_unlock(&ctx->per_mm_list_lock); 357 kvfree(umem_odp->dma_list); 358 out_page_list: 359 kvfree(umem_odp->page_list); 360 return ret; 361 } 362 363 /** 364 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem 365 * 366 * Implicit ODP umems do not have a VA range and do not have any page lists. 367 * They exist only to hold the per_mm reference to help the driver create 368 * children umems. 369 * 370 * @udata: udata from the syscall being used to create the umem 371 * @access: ib_reg_mr access flags 372 */ 373 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, 374 int access) 375 { 376 struct ib_ucontext *context = 377 container_of(udata, struct uverbs_attr_bundle, driver_udata) 378 ->context; 379 struct ib_umem *umem; 380 struct ib_umem_odp *umem_odp; 381 int ret; 382 383 if (access & IB_ACCESS_HUGETLB) 384 return ERR_PTR(-EINVAL); 385 386 if (!context) 387 return ERR_PTR(-EIO); 388 if (WARN_ON_ONCE(!context->device->ops.invalidate_range)) 389 return ERR_PTR(-EINVAL); 390 391 umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL); 392 if (!umem_odp) 393 return ERR_PTR(-ENOMEM); 394 umem = &umem_odp->umem; 395 umem->context = context; 396 umem->writable = ib_access_writable(access); 397 umem->owning_mm = current->mm; 398 umem_odp->is_implicit_odp = 1; 399 umem_odp->page_shift = PAGE_SHIFT; 400 401 ret = ib_init_umem_odp(umem_odp, NULL); 402 if (ret) { 403 kfree(umem_odp); 404 return ERR_PTR(ret); 405 } 406 return umem_odp; 407 } 408 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); 409 410 /** 411 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit 412 * parent ODP umem 413 * 414 * @root: The parent umem enclosing the child. This must be allocated using 415 * ib_alloc_implicit_odp_umem() 416 * @addr: The starting userspace VA 417 * @size: The length of the userspace VA 418 */ 419 struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root, 420 unsigned long addr, size_t size) 421 { 422 /* 423 * Caller must ensure that root cannot be freed during the call to 424 * ib_alloc_odp_umem. 425 */ 426 struct ib_umem_odp *odp_data; 427 struct ib_umem *umem; 428 int ret; 429 430 if (WARN_ON(!root->is_implicit_odp)) 431 return ERR_PTR(-EINVAL); 432 433 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); 434 if (!odp_data) 435 return ERR_PTR(-ENOMEM); 436 umem = &odp_data->umem; 437 umem->context = root->umem.context; 438 umem->length = size; 439 umem->address = addr; 440 umem->writable = root->umem.writable; 441 umem->owning_mm = root->umem.owning_mm; 442 odp_data->page_shift = PAGE_SHIFT; 443 444 ret = ib_init_umem_odp(odp_data, root->per_mm); 445 if (ret) { 446 kfree(odp_data); 447 return ERR_PTR(ret); 448 } 449 return odp_data; 450 } 451 EXPORT_SYMBOL(ib_umem_odp_alloc_child); 452 453 /** 454 * ib_umem_odp_get - Create a umem_odp for a userspace va 455 * 456 * @udata: userspace context to pin memory for 457 * @addr: userspace virtual address to start at 458 * @size: length of region to pin 459 * @access: IB_ACCESS_xxx flags for memory being pinned 460 * 461 * The driver should use when the access flags indicate ODP memory. It avoids 462 * pinning, instead, stores the mm for future page fault handling in 463 * conjunction with MMU notifiers. 464 */ 465 struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, 466 size_t size, int access) 467 { 468 struct ib_umem_odp *umem_odp; 469 struct ib_ucontext *context; 470 struct mm_struct *mm; 471 int ret; 472 473 if (!udata) 474 return ERR_PTR(-EIO); 475 476 context = container_of(udata, struct uverbs_attr_bundle, driver_udata) 477 ->context; 478 if (!context) 479 return ERR_PTR(-EIO); 480 481 if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) || 482 WARN_ON_ONCE(!context->device->ops.invalidate_range)) 483 return ERR_PTR(-EINVAL); 484 485 umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); 486 if (!umem_odp) 487 return ERR_PTR(-ENOMEM); 488 489 umem_odp->umem.context = context; 490 umem_odp->umem.length = size; 491 umem_odp->umem.address = addr; 492 umem_odp->umem.writable = ib_access_writable(access); 493 umem_odp->umem.owning_mm = mm = current->mm; 494 495 umem_odp->page_shift = PAGE_SHIFT; 496 if (access & IB_ACCESS_HUGETLB) { 497 struct vm_area_struct *vma; 498 struct hstate *h; 499 500 down_read(&mm->mmap_sem); 501 vma = find_vma(mm, ib_umem_start(umem_odp)); 502 if (!vma || !is_vm_hugetlb_page(vma)) { 503 up_read(&mm->mmap_sem); 504 ret = -EINVAL; 505 goto err_free; 506 } 507 h = hstate_vma(vma); 508 umem_odp->page_shift = huge_page_shift(h); 509 up_read(&mm->mmap_sem); 510 } 511 512 ret = ib_init_umem_odp(umem_odp, NULL); 513 if (ret) 514 goto err_free; 515 return umem_odp; 516 517 err_free: 518 kfree(umem_odp); 519 return ERR_PTR(ret); 520 } 521 EXPORT_SYMBOL(ib_umem_odp_get); 522 523 void ib_umem_odp_release(struct ib_umem_odp *umem_odp) 524 { 525 /* 526 * Ensure that no more pages are mapped in the umem. 527 * 528 * It is the driver's responsibility to ensure, before calling us, 529 * that the hardware will not attempt to access the MR any more. 530 */ 531 if (!umem_odp->is_implicit_odp) { 532 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), 533 ib_umem_end(umem_odp)); 534 remove_umem_from_per_mm(umem_odp); 535 kvfree(umem_odp->dma_list); 536 kvfree(umem_odp->page_list); 537 } 538 put_per_mm(umem_odp); 539 mmdrop(umem_odp->umem.owning_mm); 540 kfree(umem_odp); 541 } 542 EXPORT_SYMBOL(ib_umem_odp_release); 543 544 /* 545 * Map for DMA and insert a single page into the on-demand paging page tables. 546 * 547 * @umem: the umem to insert the page to. 548 * @page_index: index in the umem to add the page to. 549 * @page: the page struct to map and add. 550 * @access_mask: access permissions needed for this page. 551 * @current_seq: sequence number for synchronization with invalidations. 552 * the sequence number is taken from 553 * umem_odp->notifiers_seq. 554 * 555 * The function returns -EFAULT if the DMA mapping operation fails. It returns 556 * -EAGAIN if a concurrent invalidation prevents us from updating the page. 557 * 558 * The page is released via put_user_page even if the operation failed. For 559 * on-demand pinning, the page is released whenever it isn't stored in the 560 * umem. 561 */ 562 static int ib_umem_odp_map_dma_single_page( 563 struct ib_umem_odp *umem_odp, 564 int page_index, 565 struct page *page, 566 u64 access_mask, 567 unsigned long current_seq) 568 { 569 struct ib_ucontext *context = umem_odp->umem.context; 570 struct ib_device *dev = context->device; 571 dma_addr_t dma_addr; 572 int remove_existing_mapping = 0; 573 int ret = 0; 574 575 /* 576 * Note: we avoid writing if seq is different from the initial seq, to 577 * handle case of a racing notifier. This check also allows us to bail 578 * early if we have a notifier running in parallel with us. 579 */ 580 if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) { 581 ret = -EAGAIN; 582 goto out; 583 } 584 if (!(umem_odp->dma_list[page_index])) { 585 dma_addr = 586 ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift), 587 DMA_BIDIRECTIONAL); 588 if (ib_dma_mapping_error(dev, dma_addr)) { 589 ret = -EFAULT; 590 goto out; 591 } 592 umem_odp->dma_list[page_index] = dma_addr | access_mask; 593 umem_odp->page_list[page_index] = page; 594 umem_odp->npages++; 595 } else if (umem_odp->page_list[page_index] == page) { 596 umem_odp->dma_list[page_index] |= access_mask; 597 } else { 598 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 599 umem_odp->page_list[page_index], page); 600 /* Better remove the mapping now, to prevent any further 601 * damage. */ 602 remove_existing_mapping = 1; 603 } 604 605 out: 606 put_user_page(page); 607 608 if (remove_existing_mapping) { 609 ib_umem_notifier_start_account(umem_odp); 610 dev->ops.invalidate_range( 611 umem_odp, 612 ib_umem_start(umem_odp) + 613 (page_index << umem_odp->page_shift), 614 ib_umem_start(umem_odp) + 615 ((page_index + 1) << umem_odp->page_shift)); 616 ib_umem_notifier_end_account(umem_odp); 617 ret = -EAGAIN; 618 } 619 620 return ret; 621 } 622 623 /** 624 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 625 * 626 * Pins the range of pages passed in the argument, and maps them to 627 * DMA addresses. The DMA addresses of the mapped pages is updated in 628 * umem_odp->dma_list. 629 * 630 * Returns the number of pages mapped in success, negative error code 631 * for failure. 632 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 633 * the function from completing its task. 634 * An -ENOENT error code indicates that userspace process is being terminated 635 * and mm was already destroyed. 636 * @umem_odp: the umem to map and pin 637 * @user_virt: the address from which we need to map. 638 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 639 * bigger due to alignment, and may also be smaller in case of an error 640 * pinning or mapping a page. The actual pages mapped is returned in 641 * the return value. 642 * @access_mask: bit mask of the requested access permissions for the given 643 * range. 644 * @current_seq: the MMU notifiers sequance value for synchronization with 645 * invalidations. the sequance number is read from 646 * umem_odp->notifiers_seq before calling this function 647 */ 648 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, 649 u64 bcnt, u64 access_mask, 650 unsigned long current_seq) 651 { 652 struct task_struct *owning_process = NULL; 653 struct mm_struct *owning_mm = umem_odp->umem.owning_mm; 654 struct page **local_page_list = NULL; 655 u64 page_mask, off; 656 int j, k, ret = 0, start_idx, npages = 0; 657 unsigned int flags = 0, page_shift; 658 phys_addr_t p = 0; 659 660 if (access_mask == 0) 661 return -EINVAL; 662 663 if (user_virt < ib_umem_start(umem_odp) || 664 user_virt + bcnt > ib_umem_end(umem_odp)) 665 return -EFAULT; 666 667 local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 668 if (!local_page_list) 669 return -ENOMEM; 670 671 page_shift = umem_odp->page_shift; 672 page_mask = ~(BIT(page_shift) - 1); 673 off = user_virt & (~page_mask); 674 user_virt = user_virt & page_mask; 675 bcnt += off; /* Charge for the first page offset as well. */ 676 677 /* 678 * owning_process is allowed to be NULL, this means somehow the mm is 679 * existing beyond the lifetime of the originating process.. Presumably 680 * mmget_not_zero will fail in this case. 681 */ 682 owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID); 683 if (!owning_process || !mmget_not_zero(owning_mm)) { 684 ret = -EINVAL; 685 goto out_put_task; 686 } 687 688 if (access_mask & ODP_WRITE_ALLOWED_BIT) 689 flags |= FOLL_WRITE; 690 691 start_idx = (user_virt - ib_umem_start(umem_odp)) >> page_shift; 692 k = start_idx; 693 694 while (bcnt > 0) { 695 const size_t gup_num_pages = min_t(size_t, 696 (bcnt + BIT(page_shift) - 1) >> page_shift, 697 PAGE_SIZE / sizeof(struct page *)); 698 699 down_read(&owning_mm->mmap_sem); 700 /* 701 * Note: this might result in redundent page getting. We can 702 * avoid this by checking dma_list to be 0 before calling 703 * get_user_pages. However, this make the code much more 704 * complex (and doesn't gain us much performance in most use 705 * cases). 706 */ 707 npages = get_user_pages_remote(owning_process, owning_mm, 708 user_virt, gup_num_pages, 709 flags, local_page_list, NULL, NULL); 710 up_read(&owning_mm->mmap_sem); 711 712 if (npages < 0) { 713 if (npages != -EAGAIN) 714 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 715 else 716 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 717 break; 718 } 719 720 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 721 mutex_lock(&umem_odp->umem_mutex); 722 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) { 723 if (user_virt & ~page_mask) { 724 p += PAGE_SIZE; 725 if (page_to_phys(local_page_list[j]) != p) { 726 ret = -EFAULT; 727 break; 728 } 729 put_user_page(local_page_list[j]); 730 continue; 731 } 732 733 ret = ib_umem_odp_map_dma_single_page( 734 umem_odp, k, local_page_list[j], 735 access_mask, current_seq); 736 if (ret < 0) { 737 if (ret != -EAGAIN) 738 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 739 else 740 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 741 break; 742 } 743 744 p = page_to_phys(local_page_list[j]); 745 k++; 746 } 747 mutex_unlock(&umem_odp->umem_mutex); 748 749 if (ret < 0) { 750 /* 751 * Release pages, remembering that the first page 752 * to hit an error was already released by 753 * ib_umem_odp_map_dma_single_page(). 754 */ 755 if (npages - (j + 1) > 0) 756 put_user_pages(&local_page_list[j+1], 757 npages - (j + 1)); 758 break; 759 } 760 } 761 762 if (ret >= 0) { 763 if (npages < 0 && k == start_idx) 764 ret = npages; 765 else 766 ret = k - start_idx; 767 } 768 769 mmput(owning_mm); 770 out_put_task: 771 if (owning_process) 772 put_task_struct(owning_process); 773 free_page((unsigned long)local_page_list); 774 return ret; 775 } 776 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 777 778 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, 779 u64 bound) 780 { 781 int idx; 782 u64 addr; 783 struct ib_device *dev = umem_odp->umem.context->device; 784 785 virt = max_t(u64, virt, ib_umem_start(umem_odp)); 786 bound = min_t(u64, bound, ib_umem_end(umem_odp)); 787 /* Note that during the run of this function, the 788 * notifiers_count of the MR is > 0, preventing any racing 789 * faults from completion. We might be racing with other 790 * invalidations, so we must make sure we free each page only 791 * once. */ 792 mutex_lock(&umem_odp->umem_mutex); 793 for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { 794 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; 795 if (umem_odp->page_list[idx]) { 796 struct page *page = umem_odp->page_list[idx]; 797 dma_addr_t dma = umem_odp->dma_list[idx]; 798 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 799 800 WARN_ON(!dma_addr); 801 802 ib_dma_unmap_page(dev, dma_addr, 803 BIT(umem_odp->page_shift), 804 DMA_BIDIRECTIONAL); 805 if (dma & ODP_WRITE_ALLOWED_BIT) { 806 struct page *head_page = compound_head(page); 807 /* 808 * set_page_dirty prefers being called with 809 * the page lock. However, MMU notifiers are 810 * called sometimes with and sometimes without 811 * the lock. We rely on the umem_mutex instead 812 * to prevent other mmu notifiers from 813 * continuing and allowing the page mapping to 814 * be removed. 815 */ 816 set_page_dirty(head_page); 817 } 818 umem_odp->page_list[idx] = NULL; 819 umem_odp->dma_list[idx] = 0; 820 umem_odp->npages--; 821 } 822 } 823 mutex_unlock(&umem_odp->umem_mutex); 824 } 825 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 826 827 /* @last is not a part of the interval. See comment for function 828 * node_last. 829 */ 830 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, 831 u64 start, u64 last, 832 umem_call_back cb, 833 bool blockable, 834 void *cookie) 835 { 836 int ret_val = 0; 837 struct interval_tree_node *node, *next; 838 struct ib_umem_odp *umem; 839 840 if (unlikely(start == last)) 841 return ret_val; 842 843 for (node = interval_tree_iter_first(root, start, last - 1); 844 node; node = next) { 845 /* TODO move the blockable decision up to the callback */ 846 if (!blockable) 847 return -EAGAIN; 848 next = interval_tree_iter_next(node, start, last - 1); 849 umem = container_of(node, struct ib_umem_odp, interval_tree); 850 ret_val = cb(umem, start, last, cookie) || ret_val; 851 } 852 853 return ret_val; 854 } 855