1 /* 2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <linux/sched.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/task.h> 37 #include <linux/pid.h> 38 #include <linux/slab.h> 39 #include <linux/export.h> 40 #include <linux/vmalloc.h> 41 #include <linux/hugetlb.h> 42 #include <linux/interval_tree_generic.h> 43 #include <linux/pagemap.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_umem.h> 47 #include <rdma/ib_umem_odp.h> 48 49 /* 50 * The ib_umem list keeps track of memory regions for which the HW 51 * device request to receive notification when the related memory 52 * mapping is changed. 53 * 54 * ib_umem_lock protects the list. 55 */ 56 57 static u64 node_start(struct umem_odp_node *n) 58 { 59 struct ib_umem_odp *umem_odp = 60 container_of(n, struct ib_umem_odp, interval_tree); 61 62 return ib_umem_start(&umem_odp->umem); 63 } 64 65 /* Note that the representation of the intervals in the interval tree 66 * considers the ending point as contained in the interval, while the 67 * function ib_umem_end returns the first address which is not contained 68 * in the umem. 69 */ 70 static u64 node_last(struct umem_odp_node *n) 71 { 72 struct ib_umem_odp *umem_odp = 73 container_of(n, struct ib_umem_odp, interval_tree); 74 75 return ib_umem_end(&umem_odp->umem) - 1; 76 } 77 78 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last, 79 node_start, node_last, static, rbt_ib_umem) 80 81 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp) 82 { 83 mutex_lock(&umem_odp->umem_mutex); 84 if (umem_odp->notifiers_count++ == 0) 85 /* 86 * Initialize the completion object for waiting on 87 * notifiers. Since notifier_count is zero, no one should be 88 * waiting right now. 89 */ 90 reinit_completion(&umem_odp->notifier_completion); 91 mutex_unlock(&umem_odp->umem_mutex); 92 } 93 94 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp) 95 { 96 mutex_lock(&umem_odp->umem_mutex); 97 /* 98 * This sequence increase will notify the QP page fault that the page 99 * that is going to be mapped in the spte could have been freed. 100 */ 101 ++umem_odp->notifiers_seq; 102 if (--umem_odp->notifiers_count == 0) 103 complete_all(&umem_odp->notifier_completion); 104 mutex_unlock(&umem_odp->umem_mutex); 105 } 106 107 static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp, 108 u64 start, u64 end, void *cookie) 109 { 110 struct ib_umem *umem = &umem_odp->umem; 111 112 /* 113 * Increase the number of notifiers running, to 114 * prevent any further fault handling on this MR. 115 */ 116 ib_umem_notifier_start_account(umem_odp); 117 umem_odp->dying = 1; 118 /* Make sure that the fact the umem is dying is out before we release 119 * all pending page faults. */ 120 smp_wmb(); 121 complete_all(&umem_odp->notifier_completion); 122 umem->context->invalidate_range(umem_odp, ib_umem_start(umem), 123 ib_umem_end(umem)); 124 return 0; 125 } 126 127 static void ib_umem_notifier_release(struct mmu_notifier *mn, 128 struct mm_struct *mm) 129 { 130 struct ib_ucontext_per_mm *per_mm = 131 container_of(mn, struct ib_ucontext_per_mm, mn); 132 133 down_read(&per_mm->umem_rwsem); 134 if (per_mm->active) 135 rbt_ib_umem_for_each_in_range( 136 &per_mm->umem_tree, 0, ULLONG_MAX, 137 ib_umem_notifier_release_trampoline, true, NULL); 138 up_read(&per_mm->umem_rwsem); 139 } 140 141 static int invalidate_range_start_trampoline(struct ib_umem_odp *item, 142 u64 start, u64 end, void *cookie) 143 { 144 ib_umem_notifier_start_account(item); 145 item->umem.context->invalidate_range(item, start, end); 146 return 0; 147 } 148 149 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, 150 const struct mmu_notifier_range *range) 151 { 152 struct ib_ucontext_per_mm *per_mm = 153 container_of(mn, struct ib_ucontext_per_mm, mn); 154 155 if (mmu_notifier_range_blockable(range)) 156 down_read(&per_mm->umem_rwsem); 157 else if (!down_read_trylock(&per_mm->umem_rwsem)) 158 return -EAGAIN; 159 160 if (!per_mm->active) { 161 up_read(&per_mm->umem_rwsem); 162 /* 163 * At this point active is permanently set and visible to this 164 * CPU without a lock, that fact is relied on to skip the unlock 165 * in range_end. 166 */ 167 return 0; 168 } 169 170 return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, 171 range->end, 172 invalidate_range_start_trampoline, 173 mmu_notifier_range_blockable(range), 174 NULL); 175 } 176 177 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, 178 u64 end, void *cookie) 179 { 180 ib_umem_notifier_end_account(item); 181 return 0; 182 } 183 184 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, 185 const struct mmu_notifier_range *range) 186 { 187 struct ib_ucontext_per_mm *per_mm = 188 container_of(mn, struct ib_ucontext_per_mm, mn); 189 190 if (unlikely(!per_mm->active)) 191 return; 192 193 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start, 194 range->end, 195 invalidate_range_end_trampoline, true, NULL); 196 up_read(&per_mm->umem_rwsem); 197 } 198 199 static const struct mmu_notifier_ops ib_umem_notifiers = { 200 .release = ib_umem_notifier_release, 201 .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 202 .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 203 }; 204 205 static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp) 206 { 207 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; 208 struct ib_umem *umem = &umem_odp->umem; 209 210 down_write(&per_mm->umem_rwsem); 211 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 212 rbt_ib_umem_insert(&umem_odp->interval_tree, 213 &per_mm->umem_tree); 214 up_write(&per_mm->umem_rwsem); 215 } 216 217 static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp) 218 { 219 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; 220 struct ib_umem *umem = &umem_odp->umem; 221 222 down_write(&per_mm->umem_rwsem); 223 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 224 rbt_ib_umem_remove(&umem_odp->interval_tree, 225 &per_mm->umem_tree); 226 complete_all(&umem_odp->notifier_completion); 227 228 up_write(&per_mm->umem_rwsem); 229 } 230 231 static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx, 232 struct mm_struct *mm) 233 { 234 struct ib_ucontext_per_mm *per_mm; 235 int ret; 236 237 per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL); 238 if (!per_mm) 239 return ERR_PTR(-ENOMEM); 240 241 per_mm->context = ctx; 242 per_mm->mm = mm; 243 per_mm->umem_tree = RB_ROOT_CACHED; 244 init_rwsem(&per_mm->umem_rwsem); 245 per_mm->active = true; 246 247 rcu_read_lock(); 248 per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); 249 rcu_read_unlock(); 250 251 WARN_ON(mm != current->mm); 252 253 per_mm->mn.ops = &ib_umem_notifiers; 254 ret = mmu_notifier_register(&per_mm->mn, per_mm->mm); 255 if (ret) { 256 dev_err(&ctx->device->dev, 257 "Failed to register mmu_notifier %d\n", ret); 258 goto out_pid; 259 } 260 261 list_add(&per_mm->ucontext_list, &ctx->per_mm_list); 262 return per_mm; 263 264 out_pid: 265 put_pid(per_mm->tgid); 266 kfree(per_mm); 267 return ERR_PTR(ret); 268 } 269 270 static int get_per_mm(struct ib_umem_odp *umem_odp) 271 { 272 struct ib_ucontext *ctx = umem_odp->umem.context; 273 struct ib_ucontext_per_mm *per_mm; 274 275 /* 276 * Generally speaking we expect only one or two per_mm in this list, 277 * so no reason to optimize this search today. 278 */ 279 mutex_lock(&ctx->per_mm_list_lock); 280 list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) { 281 if (per_mm->mm == umem_odp->umem.owning_mm) 282 goto found; 283 } 284 285 per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm); 286 if (IS_ERR(per_mm)) { 287 mutex_unlock(&ctx->per_mm_list_lock); 288 return PTR_ERR(per_mm); 289 } 290 291 found: 292 umem_odp->per_mm = per_mm; 293 per_mm->odp_mrs_count++; 294 mutex_unlock(&ctx->per_mm_list_lock); 295 296 return 0; 297 } 298 299 static void free_per_mm(struct rcu_head *rcu) 300 { 301 kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu)); 302 } 303 304 static void put_per_mm(struct ib_umem_odp *umem_odp) 305 { 306 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm; 307 struct ib_ucontext *ctx = umem_odp->umem.context; 308 bool need_free; 309 310 mutex_lock(&ctx->per_mm_list_lock); 311 umem_odp->per_mm = NULL; 312 per_mm->odp_mrs_count--; 313 need_free = per_mm->odp_mrs_count == 0; 314 if (need_free) 315 list_del(&per_mm->ucontext_list); 316 mutex_unlock(&ctx->per_mm_list_lock); 317 318 if (!need_free) 319 return; 320 321 /* 322 * NOTE! mmu_notifier_unregister() can happen between a start/end 323 * callback, resulting in an start/end, and thus an unbalanced 324 * lock. This doesn't really matter to us since we are about to kfree 325 * the memory that holds the lock, however LOCKDEP doesn't like this. 326 */ 327 down_write(&per_mm->umem_rwsem); 328 per_mm->active = false; 329 up_write(&per_mm->umem_rwsem); 330 331 WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root)); 332 mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm); 333 put_pid(per_mm->tgid); 334 mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm); 335 } 336 337 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root, 338 unsigned long addr, size_t size) 339 { 340 struct ib_ucontext_per_mm *per_mm = root->per_mm; 341 struct ib_ucontext *ctx = per_mm->context; 342 struct ib_umem_odp *odp_data; 343 struct ib_umem *umem; 344 int pages = size >> PAGE_SHIFT; 345 int ret; 346 347 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); 348 if (!odp_data) 349 return ERR_PTR(-ENOMEM); 350 umem = &odp_data->umem; 351 umem->context = ctx; 352 umem->length = size; 353 umem->address = addr; 354 umem->page_shift = PAGE_SHIFT; 355 umem->writable = root->umem.writable; 356 umem->is_odp = 1; 357 odp_data->per_mm = per_mm; 358 umem->owning_mm = per_mm->mm; 359 mmgrab(umem->owning_mm); 360 361 mutex_init(&odp_data->umem_mutex); 362 init_completion(&odp_data->notifier_completion); 363 364 odp_data->page_list = 365 vzalloc(array_size(pages, sizeof(*odp_data->page_list))); 366 if (!odp_data->page_list) { 367 ret = -ENOMEM; 368 goto out_odp_data; 369 } 370 371 odp_data->dma_list = 372 vzalloc(array_size(pages, sizeof(*odp_data->dma_list))); 373 if (!odp_data->dma_list) { 374 ret = -ENOMEM; 375 goto out_page_list; 376 } 377 378 /* 379 * Caller must ensure that the umem_odp that the per_mm came from 380 * cannot be freed during the call to ib_alloc_odp_umem. 381 */ 382 mutex_lock(&ctx->per_mm_list_lock); 383 per_mm->odp_mrs_count++; 384 mutex_unlock(&ctx->per_mm_list_lock); 385 add_umem_to_per_mm(odp_data); 386 387 return odp_data; 388 389 out_page_list: 390 vfree(odp_data->page_list); 391 out_odp_data: 392 mmdrop(umem->owning_mm); 393 kfree(odp_data); 394 return ERR_PTR(ret); 395 } 396 EXPORT_SYMBOL(ib_alloc_odp_umem); 397 398 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) 399 { 400 struct ib_umem *umem = &umem_odp->umem; 401 /* 402 * NOTE: This must called in a process context where umem->owning_mm 403 * == current->mm 404 */ 405 struct mm_struct *mm = umem->owning_mm; 406 int ret_val; 407 408 if (access & IB_ACCESS_HUGETLB) { 409 struct vm_area_struct *vma; 410 struct hstate *h; 411 412 down_read(&mm->mmap_sem); 413 vma = find_vma(mm, ib_umem_start(umem)); 414 if (!vma || !is_vm_hugetlb_page(vma)) { 415 up_read(&mm->mmap_sem); 416 return -EINVAL; 417 } 418 h = hstate_vma(vma); 419 umem->page_shift = huge_page_shift(h); 420 up_read(&mm->mmap_sem); 421 } 422 423 mutex_init(&umem_odp->umem_mutex); 424 425 init_completion(&umem_odp->notifier_completion); 426 427 if (ib_umem_num_pages(umem)) { 428 umem_odp->page_list = 429 vzalloc(array_size(sizeof(*umem_odp->page_list), 430 ib_umem_num_pages(umem))); 431 if (!umem_odp->page_list) 432 return -ENOMEM; 433 434 umem_odp->dma_list = 435 vzalloc(array_size(sizeof(*umem_odp->dma_list), 436 ib_umem_num_pages(umem))); 437 if (!umem_odp->dma_list) { 438 ret_val = -ENOMEM; 439 goto out_page_list; 440 } 441 } 442 443 ret_val = get_per_mm(umem_odp); 444 if (ret_val) 445 goto out_dma_list; 446 add_umem_to_per_mm(umem_odp); 447 448 return 0; 449 450 out_dma_list: 451 vfree(umem_odp->dma_list); 452 out_page_list: 453 vfree(umem_odp->page_list); 454 return ret_val; 455 } 456 457 void ib_umem_odp_release(struct ib_umem_odp *umem_odp) 458 { 459 struct ib_umem *umem = &umem_odp->umem; 460 461 /* 462 * Ensure that no more pages are mapped in the umem. 463 * 464 * It is the driver's responsibility to ensure, before calling us, 465 * that the hardware will not attempt to access the MR any more. 466 */ 467 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem), 468 ib_umem_end(umem)); 469 470 remove_umem_from_per_mm(umem_odp); 471 put_per_mm(umem_odp); 472 vfree(umem_odp->dma_list); 473 vfree(umem_odp->page_list); 474 } 475 476 /* 477 * Map for DMA and insert a single page into the on-demand paging page tables. 478 * 479 * @umem: the umem to insert the page to. 480 * @page_index: index in the umem to add the page to. 481 * @page: the page struct to map and add. 482 * @access_mask: access permissions needed for this page. 483 * @current_seq: sequence number for synchronization with invalidations. 484 * the sequence number is taken from 485 * umem_odp->notifiers_seq. 486 * 487 * The function returns -EFAULT if the DMA mapping operation fails. It returns 488 * -EAGAIN if a concurrent invalidation prevents us from updating the page. 489 * 490 * The page is released via put_page even if the operation failed. For 491 * on-demand pinning, the page is released whenever it isn't stored in the 492 * umem. 493 */ 494 static int ib_umem_odp_map_dma_single_page( 495 struct ib_umem_odp *umem_odp, 496 int page_index, 497 struct page *page, 498 u64 access_mask, 499 unsigned long current_seq) 500 { 501 struct ib_umem *umem = &umem_odp->umem; 502 struct ib_device *dev = umem->context->device; 503 dma_addr_t dma_addr; 504 int remove_existing_mapping = 0; 505 int ret = 0; 506 507 /* 508 * Note: we avoid writing if seq is different from the initial seq, to 509 * handle case of a racing notifier. This check also allows us to bail 510 * early if we have a notifier running in parallel with us. 511 */ 512 if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) { 513 ret = -EAGAIN; 514 goto out; 515 } 516 if (!(umem_odp->dma_list[page_index])) { 517 dma_addr = ib_dma_map_page(dev, 518 page, 519 0, BIT(umem->page_shift), 520 DMA_BIDIRECTIONAL); 521 if (ib_dma_mapping_error(dev, dma_addr)) { 522 ret = -EFAULT; 523 goto out; 524 } 525 umem_odp->dma_list[page_index] = dma_addr | access_mask; 526 umem_odp->page_list[page_index] = page; 527 umem_odp->npages++; 528 } else if (umem_odp->page_list[page_index] == page) { 529 umem_odp->dma_list[page_index] |= access_mask; 530 } else { 531 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 532 umem_odp->page_list[page_index], page); 533 /* Better remove the mapping now, to prevent any further 534 * damage. */ 535 remove_existing_mapping = 1; 536 } 537 538 out: 539 put_page(page); 540 541 if (remove_existing_mapping) { 542 ib_umem_notifier_start_account(umem_odp); 543 umem->context->invalidate_range( 544 umem_odp, 545 ib_umem_start(umem) + (page_index << umem->page_shift), 546 ib_umem_start(umem) + 547 ((page_index + 1) << umem->page_shift)); 548 ib_umem_notifier_end_account(umem_odp); 549 ret = -EAGAIN; 550 } 551 552 return ret; 553 } 554 555 /** 556 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 557 * 558 * Pins the range of pages passed in the argument, and maps them to 559 * DMA addresses. The DMA addresses of the mapped pages is updated in 560 * umem_odp->dma_list. 561 * 562 * Returns the number of pages mapped in success, negative error code 563 * for failure. 564 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 565 * the function from completing its task. 566 * An -ENOENT error code indicates that userspace process is being terminated 567 * and mm was already destroyed. 568 * @umem_odp: the umem to map and pin 569 * @user_virt: the address from which we need to map. 570 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 571 * bigger due to alignment, and may also be smaller in case of an error 572 * pinning or mapping a page. The actual pages mapped is returned in 573 * the return value. 574 * @access_mask: bit mask of the requested access permissions for the given 575 * range. 576 * @current_seq: the MMU notifiers sequance value for synchronization with 577 * invalidations. the sequance number is read from 578 * umem_odp->notifiers_seq before calling this function 579 */ 580 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, 581 u64 bcnt, u64 access_mask, 582 unsigned long current_seq) 583 { 584 struct ib_umem *umem = &umem_odp->umem; 585 struct task_struct *owning_process = NULL; 586 struct mm_struct *owning_mm = umem_odp->umem.owning_mm; 587 struct page **local_page_list = NULL; 588 u64 page_mask, off; 589 int j, k, ret = 0, start_idx, npages = 0, page_shift; 590 unsigned int flags = 0; 591 phys_addr_t p = 0; 592 593 if (access_mask == 0) 594 return -EINVAL; 595 596 if (user_virt < ib_umem_start(umem) || 597 user_virt + bcnt > ib_umem_end(umem)) 598 return -EFAULT; 599 600 local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 601 if (!local_page_list) 602 return -ENOMEM; 603 604 page_shift = umem->page_shift; 605 page_mask = ~(BIT(page_shift) - 1); 606 off = user_virt & (~page_mask); 607 user_virt = user_virt & page_mask; 608 bcnt += off; /* Charge for the first page offset as well. */ 609 610 /* 611 * owning_process is allowed to be NULL, this means somehow the mm is 612 * existing beyond the lifetime of the originating process.. Presumably 613 * mmget_not_zero will fail in this case. 614 */ 615 owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID); 616 if (!owning_process || !mmget_not_zero(owning_mm)) { 617 ret = -EINVAL; 618 goto out_put_task; 619 } 620 621 if (access_mask & ODP_WRITE_ALLOWED_BIT) 622 flags |= FOLL_WRITE; 623 624 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift; 625 k = start_idx; 626 627 while (bcnt > 0) { 628 const size_t gup_num_pages = min_t(size_t, 629 (bcnt + BIT(page_shift) - 1) >> page_shift, 630 PAGE_SIZE / sizeof(struct page *)); 631 632 down_read(&owning_mm->mmap_sem); 633 /* 634 * Note: this might result in redundent page getting. We can 635 * avoid this by checking dma_list to be 0 before calling 636 * get_user_pages. However, this make the code much more 637 * complex (and doesn't gain us much performance in most use 638 * cases). 639 */ 640 npages = get_user_pages_remote(owning_process, owning_mm, 641 user_virt, gup_num_pages, 642 flags, local_page_list, NULL, NULL); 643 up_read(&owning_mm->mmap_sem); 644 645 if (npages < 0) { 646 if (npages != -EAGAIN) 647 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 648 else 649 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages); 650 break; 651 } 652 653 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 654 mutex_lock(&umem_odp->umem_mutex); 655 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) { 656 if (user_virt & ~page_mask) { 657 p += PAGE_SIZE; 658 if (page_to_phys(local_page_list[j]) != p) { 659 ret = -EFAULT; 660 break; 661 } 662 put_page(local_page_list[j]); 663 continue; 664 } 665 666 ret = ib_umem_odp_map_dma_single_page( 667 umem_odp, k, local_page_list[j], 668 access_mask, current_seq); 669 if (ret < 0) { 670 if (ret != -EAGAIN) 671 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 672 else 673 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret); 674 break; 675 } 676 677 p = page_to_phys(local_page_list[j]); 678 k++; 679 } 680 mutex_unlock(&umem_odp->umem_mutex); 681 682 if (ret < 0) { 683 /* 684 * Release pages, remembering that the first page 685 * to hit an error was already released by 686 * ib_umem_odp_map_dma_single_page(). 687 */ 688 if (npages - (j + 1) > 0) 689 release_pages(&local_page_list[j+1], 690 npages - (j + 1)); 691 break; 692 } 693 } 694 695 if (ret >= 0) { 696 if (npages < 0 && k == start_idx) 697 ret = npages; 698 else 699 ret = k - start_idx; 700 } 701 702 mmput(owning_mm); 703 out_put_task: 704 if (owning_process) 705 put_task_struct(owning_process); 706 free_page((unsigned long)local_page_list); 707 return ret; 708 } 709 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 710 711 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, 712 u64 bound) 713 { 714 struct ib_umem *umem = &umem_odp->umem; 715 int idx; 716 u64 addr; 717 struct ib_device *dev = umem->context->device; 718 719 virt = max_t(u64, virt, ib_umem_start(umem)); 720 bound = min_t(u64, bound, ib_umem_end(umem)); 721 /* Note that during the run of this function, the 722 * notifiers_count of the MR is > 0, preventing any racing 723 * faults from completion. We might be racing with other 724 * invalidations, so we must make sure we free each page only 725 * once. */ 726 mutex_lock(&umem_odp->umem_mutex); 727 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) { 728 idx = (addr - ib_umem_start(umem)) >> umem->page_shift; 729 if (umem_odp->page_list[idx]) { 730 struct page *page = umem_odp->page_list[idx]; 731 dma_addr_t dma = umem_odp->dma_list[idx]; 732 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 733 734 WARN_ON(!dma_addr); 735 736 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 737 DMA_BIDIRECTIONAL); 738 if (dma & ODP_WRITE_ALLOWED_BIT) { 739 struct page *head_page = compound_head(page); 740 /* 741 * set_page_dirty prefers being called with 742 * the page lock. However, MMU notifiers are 743 * called sometimes with and sometimes without 744 * the lock. We rely on the umem_mutex instead 745 * to prevent other mmu notifiers from 746 * continuing and allowing the page mapping to 747 * be removed. 748 */ 749 set_page_dirty(head_page); 750 } 751 umem_odp->page_list[idx] = NULL; 752 umem_odp->dma_list[idx] = 0; 753 umem_odp->npages--; 754 } 755 } 756 mutex_unlock(&umem_odp->umem_mutex); 757 } 758 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 759 760 /* @last is not a part of the interval. See comment for function 761 * node_last. 762 */ 763 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, 764 u64 start, u64 last, 765 umem_call_back cb, 766 bool blockable, 767 void *cookie) 768 { 769 int ret_val = 0; 770 struct umem_odp_node *node, *next; 771 struct ib_umem_odp *umem; 772 773 if (unlikely(start == last)) 774 return ret_val; 775 776 for (node = rbt_ib_umem_iter_first(root, start, last - 1); 777 node; node = next) { 778 /* TODO move the blockable decision up to the callback */ 779 if (!blockable) 780 return -EAGAIN; 781 next = rbt_ib_umem_iter_next(node, start, last - 1); 782 umem = container_of(node, struct ib_umem_odp, interval_tree); 783 ret_val = cb(umem, start, last, cookie) || ret_val; 784 } 785 786 return ret_val; 787 } 788 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range); 789 790 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, 791 u64 addr, u64 length) 792 { 793 struct umem_odp_node *node; 794 795 node = rbt_ib_umem_iter_first(root, addr, addr + length - 1); 796 if (node) 797 return container_of(node, struct ib_umem_odp, interval_tree); 798 return NULL; 799 800 } 801 EXPORT_SYMBOL(rbt_ib_umem_lookup); 802