1 /*
2  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_umem.h>
45 #include <rdma/ib_umem_odp.h>
46 
47 static void ib_umem_notifier_start_account(struct ib_umem *item)
48 {
49 	mutex_lock(&item->odp_data->umem_mutex);
50 
51 	/* Only update private counters for this umem if it has them.
52 	 * Otherwise skip it. All page faults will be delayed for this umem. */
53 	if (item->odp_data->mn_counters_active) {
54 		int notifiers_count = item->odp_data->notifiers_count++;
55 
56 		if (notifiers_count == 0)
57 			/* Initialize the completion object for waiting on
58 			 * notifiers. Since notifier_count is zero, no one
59 			 * should be waiting right now. */
60 			reinit_completion(&item->odp_data->notifier_completion);
61 	}
62 	mutex_unlock(&item->odp_data->umem_mutex);
63 }
64 
65 static void ib_umem_notifier_end_account(struct ib_umem *item)
66 {
67 	mutex_lock(&item->odp_data->umem_mutex);
68 
69 	/* Only update private counters for this umem if it has them.
70 	 * Otherwise skip it. All page faults will be delayed for this umem. */
71 	if (item->odp_data->mn_counters_active) {
72 		/*
73 		 * This sequence increase will notify the QP page fault that
74 		 * the page that is going to be mapped in the spte could have
75 		 * been freed.
76 		 */
77 		++item->odp_data->notifiers_seq;
78 		if (--item->odp_data->notifiers_count == 0)
79 			complete_all(&item->odp_data->notifier_completion);
80 	}
81 	mutex_unlock(&item->odp_data->umem_mutex);
82 }
83 
84 /* Account for a new mmu notifier in an ib_ucontext. */
85 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
86 {
87 	atomic_inc(&context->notifier_count);
88 }
89 
90 /* Account for a terminating mmu notifier in an ib_ucontext.
91  *
92  * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
93  * the function takes the semaphore itself. */
94 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
95 {
96 	int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
97 
98 	if (zero_notifiers &&
99 	    !list_empty(&context->no_private_counters)) {
100 		/* No currently running mmu notifiers. Now is the chance to
101 		 * add private accounting to all previously added umems. */
102 		struct ib_umem_odp *odp_data, *next;
103 
104 		/* Prevent concurrent mmu notifiers from working on the
105 		 * no_private_counters list. */
106 		down_write(&context->umem_rwsem);
107 
108 		/* Read the notifier_count again, with the umem_rwsem
109 		 * semaphore taken for write. */
110 		if (!atomic_read(&context->notifier_count)) {
111 			list_for_each_entry_safe(odp_data, next,
112 						 &context->no_private_counters,
113 						 no_private_counters) {
114 				mutex_lock(&odp_data->umem_mutex);
115 				odp_data->mn_counters_active = true;
116 				list_del(&odp_data->no_private_counters);
117 				complete_all(&odp_data->notifier_completion);
118 				mutex_unlock(&odp_data->umem_mutex);
119 			}
120 		}
121 
122 		up_write(&context->umem_rwsem);
123 	}
124 }
125 
126 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
127 					       u64 end, void *cookie) {
128 	/*
129 	 * Increase the number of notifiers running, to
130 	 * prevent any further fault handling on this MR.
131 	 */
132 	ib_umem_notifier_start_account(item);
133 	item->odp_data->dying = 1;
134 	/* Make sure that the fact the umem is dying is out before we release
135 	 * all pending page faults. */
136 	smp_wmb();
137 	complete_all(&item->odp_data->notifier_completion);
138 	item->context->invalidate_range(item, ib_umem_start(item),
139 					ib_umem_end(item));
140 	return 0;
141 }
142 
143 static void ib_umem_notifier_release(struct mmu_notifier *mn,
144 				     struct mm_struct *mm)
145 {
146 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
147 
148 	if (!context->invalidate_range)
149 		return;
150 
151 	ib_ucontext_notifier_start_account(context);
152 	down_read(&context->umem_rwsem);
153 	rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
154 				      ULLONG_MAX,
155 				      ib_umem_notifier_release_trampoline,
156 				      NULL);
157 	up_read(&context->umem_rwsem);
158 }
159 
160 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
161 				      u64 end, void *cookie)
162 {
163 	ib_umem_notifier_start_account(item);
164 	item->context->invalidate_range(item, start, start + PAGE_SIZE);
165 	ib_umem_notifier_end_account(item);
166 	return 0;
167 }
168 
169 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
170 					     u64 end, void *cookie)
171 {
172 	ib_umem_notifier_start_account(item);
173 	item->context->invalidate_range(item, start, end);
174 	return 0;
175 }
176 
177 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
178 						    struct mm_struct *mm,
179 						    unsigned long start,
180 						    unsigned long end)
181 {
182 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
183 
184 	if (!context->invalidate_range)
185 		return;
186 
187 	ib_ucontext_notifier_start_account(context);
188 	down_read(&context->umem_rwsem);
189 	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
190 				      end,
191 				      invalidate_range_start_trampoline, NULL);
192 	up_read(&context->umem_rwsem);
193 }
194 
195 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
196 					   u64 end, void *cookie)
197 {
198 	ib_umem_notifier_end_account(item);
199 	return 0;
200 }
201 
202 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
203 						  struct mm_struct *mm,
204 						  unsigned long start,
205 						  unsigned long end)
206 {
207 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
208 
209 	if (!context->invalidate_range)
210 		return;
211 
212 	down_read(&context->umem_rwsem);
213 	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
214 				      end,
215 				      invalidate_range_end_trampoline, NULL);
216 	up_read(&context->umem_rwsem);
217 	ib_ucontext_notifier_end_account(context);
218 }
219 
220 static const struct mmu_notifier_ops ib_umem_notifiers = {
221 	.release                    = ib_umem_notifier_release,
222 	.invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
223 	.invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
224 };
225 
226 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
227 				  unsigned long addr,
228 				  size_t size)
229 {
230 	struct ib_umem *umem;
231 	struct ib_umem_odp *odp_data;
232 	int pages = size >> PAGE_SHIFT;
233 	int ret;
234 
235 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
236 	if (!umem)
237 		return ERR_PTR(-ENOMEM);
238 
239 	umem->context    = context;
240 	umem->length     = size;
241 	umem->address    = addr;
242 	umem->page_shift = PAGE_SHIFT;
243 	umem->writable   = 1;
244 
245 	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
246 	if (!odp_data) {
247 		ret = -ENOMEM;
248 		goto out_umem;
249 	}
250 	odp_data->umem = umem;
251 
252 	mutex_init(&odp_data->umem_mutex);
253 	init_completion(&odp_data->notifier_completion);
254 
255 	odp_data->page_list = vzalloc(pages * sizeof(*odp_data->page_list));
256 	if (!odp_data->page_list) {
257 		ret = -ENOMEM;
258 		goto out_odp_data;
259 	}
260 
261 	odp_data->dma_list = vzalloc(pages * sizeof(*odp_data->dma_list));
262 	if (!odp_data->dma_list) {
263 		ret = -ENOMEM;
264 		goto out_page_list;
265 	}
266 
267 	down_write(&context->umem_rwsem);
268 	context->odp_mrs_count++;
269 	rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
270 	if (likely(!atomic_read(&context->notifier_count)))
271 		odp_data->mn_counters_active = true;
272 	else
273 		list_add(&odp_data->no_private_counters,
274 			 &context->no_private_counters);
275 	up_write(&context->umem_rwsem);
276 
277 	umem->odp_data = odp_data;
278 
279 	return umem;
280 
281 out_page_list:
282 	vfree(odp_data->page_list);
283 out_odp_data:
284 	kfree(odp_data);
285 out_umem:
286 	kfree(umem);
287 	return ERR_PTR(ret);
288 }
289 EXPORT_SYMBOL(ib_alloc_odp_umem);
290 
291 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
292 		    int access)
293 {
294 	int ret_val;
295 	struct pid *our_pid;
296 	struct mm_struct *mm = get_task_mm(current);
297 
298 	if (!mm)
299 		return -EINVAL;
300 
301 	if (access & IB_ACCESS_HUGETLB) {
302 		struct vm_area_struct *vma;
303 		struct hstate *h;
304 
305 		down_read(&mm->mmap_sem);
306 		vma = find_vma(mm, ib_umem_start(umem));
307 		if (!vma || !is_vm_hugetlb_page(vma)) {
308 			up_read(&mm->mmap_sem);
309 			return -EINVAL;
310 		}
311 		h = hstate_vma(vma);
312 		umem->page_shift = huge_page_shift(h);
313 		up_read(&mm->mmap_sem);
314 		umem->hugetlb = 1;
315 	} else {
316 		umem->hugetlb = 0;
317 	}
318 
319 	/* Prevent creating ODP MRs in child processes */
320 	rcu_read_lock();
321 	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
322 	rcu_read_unlock();
323 	put_pid(our_pid);
324 	if (context->tgid != our_pid) {
325 		ret_val = -EINVAL;
326 		goto out_mm;
327 	}
328 
329 	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
330 	if (!umem->odp_data) {
331 		ret_val = -ENOMEM;
332 		goto out_mm;
333 	}
334 	umem->odp_data->umem = umem;
335 
336 	mutex_init(&umem->odp_data->umem_mutex);
337 
338 	init_completion(&umem->odp_data->notifier_completion);
339 
340 	if (ib_umem_num_pages(umem)) {
341 		umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
342 					    sizeof(*umem->odp_data->page_list));
343 		if (!umem->odp_data->page_list) {
344 			ret_val = -ENOMEM;
345 			goto out_odp_data;
346 		}
347 
348 		umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
349 					  sizeof(*umem->odp_data->dma_list));
350 		if (!umem->odp_data->dma_list) {
351 			ret_val = -ENOMEM;
352 			goto out_page_list;
353 		}
354 	}
355 
356 	/*
357 	 * When using MMU notifiers, we will get a
358 	 * notification before the "current" task (and MM) is
359 	 * destroyed. We use the umem_rwsem semaphore to synchronize.
360 	 */
361 	down_write(&context->umem_rwsem);
362 	context->odp_mrs_count++;
363 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
364 		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
365 				   &context->umem_tree);
366 	if (likely(!atomic_read(&context->notifier_count)) ||
367 	    context->odp_mrs_count == 1)
368 		umem->odp_data->mn_counters_active = true;
369 	else
370 		list_add(&umem->odp_data->no_private_counters,
371 			 &context->no_private_counters);
372 	downgrade_write(&context->umem_rwsem);
373 
374 	if (context->odp_mrs_count == 1) {
375 		/*
376 		 * Note that at this point, no MMU notifier is running
377 		 * for this context!
378 		 */
379 		atomic_set(&context->notifier_count, 0);
380 		INIT_HLIST_NODE(&context->mn.hlist);
381 		context->mn.ops = &ib_umem_notifiers;
382 		/*
383 		 * Lock-dep detects a false positive for mmap_sem vs.
384 		 * umem_rwsem, due to not grasping downgrade_write correctly.
385 		 */
386 		lockdep_off();
387 		ret_val = mmu_notifier_register(&context->mn, mm);
388 		lockdep_on();
389 		if (ret_val) {
390 			pr_err("Failed to register mmu_notifier %d\n", ret_val);
391 			ret_val = -EBUSY;
392 			goto out_mutex;
393 		}
394 	}
395 
396 	up_read(&context->umem_rwsem);
397 
398 	/*
399 	 * Note that doing an mmput can cause a notifier for the relevant mm.
400 	 * If the notifier is called while we hold the umem_rwsem, this will
401 	 * cause a deadlock. Therefore, we release the reference only after we
402 	 * released the semaphore.
403 	 */
404 	mmput(mm);
405 	return 0;
406 
407 out_mutex:
408 	up_read(&context->umem_rwsem);
409 	vfree(umem->odp_data->dma_list);
410 out_page_list:
411 	vfree(umem->odp_data->page_list);
412 out_odp_data:
413 	kfree(umem->odp_data);
414 out_mm:
415 	mmput(mm);
416 	return ret_val;
417 }
418 
419 void ib_umem_odp_release(struct ib_umem *umem)
420 {
421 	struct ib_ucontext *context = umem->context;
422 
423 	/*
424 	 * Ensure that no more pages are mapped in the umem.
425 	 *
426 	 * It is the driver's responsibility to ensure, before calling us,
427 	 * that the hardware will not attempt to access the MR any more.
428 	 */
429 	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
430 				    ib_umem_end(umem));
431 
432 	down_write(&context->umem_rwsem);
433 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
434 		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
435 				   &context->umem_tree);
436 	context->odp_mrs_count--;
437 	if (!umem->odp_data->mn_counters_active) {
438 		list_del(&umem->odp_data->no_private_counters);
439 		complete_all(&umem->odp_data->notifier_completion);
440 	}
441 
442 	/*
443 	 * Downgrade the lock to a read lock. This ensures that the notifiers
444 	 * (who lock the mutex for reading) will be able to finish, and we
445 	 * will be able to enventually obtain the mmu notifiers SRCU. Note
446 	 * that since we are doing it atomically, no other user could register
447 	 * and unregister while we do the check.
448 	 */
449 	downgrade_write(&context->umem_rwsem);
450 	if (!context->odp_mrs_count) {
451 		struct task_struct *owning_process = NULL;
452 		struct mm_struct *owning_mm        = NULL;
453 
454 		owning_process = get_pid_task(context->tgid,
455 					      PIDTYPE_PID);
456 		if (owning_process == NULL)
457 			/*
458 			 * The process is already dead, notifier were removed
459 			 * already.
460 			 */
461 			goto out;
462 
463 		owning_mm = get_task_mm(owning_process);
464 		if (owning_mm == NULL)
465 			/*
466 			 * The process' mm is already dead, notifier were
467 			 * removed already.
468 			 */
469 			goto out_put_task;
470 		mmu_notifier_unregister(&context->mn, owning_mm);
471 
472 		mmput(owning_mm);
473 
474 out_put_task:
475 		put_task_struct(owning_process);
476 	}
477 out:
478 	up_read(&context->umem_rwsem);
479 
480 	vfree(umem->odp_data->dma_list);
481 	vfree(umem->odp_data->page_list);
482 	kfree(umem->odp_data);
483 	kfree(umem);
484 }
485 
486 /*
487  * Map for DMA and insert a single page into the on-demand paging page tables.
488  *
489  * @umem: the umem to insert the page to.
490  * @page_index: index in the umem to add the page to.
491  * @page: the page struct to map and add.
492  * @access_mask: access permissions needed for this page.
493  * @current_seq: sequence number for synchronization with invalidations.
494  *               the sequence number is taken from
495  *               umem->odp_data->notifiers_seq.
496  *
497  * The function returns -EFAULT if the DMA mapping operation fails. It returns
498  * -EAGAIN if a concurrent invalidation prevents us from updating the page.
499  *
500  * The page is released via put_page even if the operation failed. For
501  * on-demand pinning, the page is released whenever it isn't stored in the
502  * umem.
503  */
504 static int ib_umem_odp_map_dma_single_page(
505 		struct ib_umem *umem,
506 		int page_index,
507 		struct page *page,
508 		u64 access_mask,
509 		unsigned long current_seq)
510 {
511 	struct ib_device *dev = umem->context->device;
512 	dma_addr_t dma_addr;
513 	int stored_page = 0;
514 	int remove_existing_mapping = 0;
515 	int ret = 0;
516 
517 	/*
518 	 * Note: we avoid writing if seq is different from the initial seq, to
519 	 * handle case of a racing notifier. This check also allows us to bail
520 	 * early if we have a notifier running in parallel with us.
521 	 */
522 	if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
523 		ret = -EAGAIN;
524 		goto out;
525 	}
526 	if (!(umem->odp_data->dma_list[page_index])) {
527 		dma_addr = ib_dma_map_page(dev,
528 					   page,
529 					   0, BIT(umem->page_shift),
530 					   DMA_BIDIRECTIONAL);
531 		if (ib_dma_mapping_error(dev, dma_addr)) {
532 			ret = -EFAULT;
533 			goto out;
534 		}
535 		umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
536 		umem->odp_data->page_list[page_index] = page;
537 		umem->npages++;
538 		stored_page = 1;
539 	} else if (umem->odp_data->page_list[page_index] == page) {
540 		umem->odp_data->dma_list[page_index] |= access_mask;
541 	} else {
542 		pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
543 		       umem->odp_data->page_list[page_index], page);
544 		/* Better remove the mapping now, to prevent any further
545 		 * damage. */
546 		remove_existing_mapping = 1;
547 	}
548 
549 out:
550 	/* On Demand Paging - avoid pinning the page */
551 	if (umem->context->invalidate_range || !stored_page)
552 		put_page(page);
553 
554 	if (remove_existing_mapping && umem->context->invalidate_range) {
555 		invalidate_page_trampoline(
556 			umem,
557 			ib_umem_start(umem) + (page_index >> umem->page_shift),
558 			ib_umem_start(umem) + ((page_index + 1) >>
559 					       umem->page_shift),
560 			NULL);
561 		ret = -EAGAIN;
562 	}
563 
564 	return ret;
565 }
566 
567 /**
568  * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
569  *
570  * Pins the range of pages passed in the argument, and maps them to
571  * DMA addresses. The DMA addresses of the mapped pages is updated in
572  * umem->odp_data->dma_list.
573  *
574  * Returns the number of pages mapped in success, negative error code
575  * for failure.
576  * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
577  * the function from completing its task.
578  * An -ENOENT error code indicates that userspace process is being terminated
579  * and mm was already destroyed.
580  * @umem: the umem to map and pin
581  * @user_virt: the address from which we need to map.
582  * @bcnt: the minimal number of bytes to pin and map. The mapping might be
583  *        bigger due to alignment, and may also be smaller in case of an error
584  *        pinning or mapping a page. The actual pages mapped is returned in
585  *        the return value.
586  * @access_mask: bit mask of the requested access permissions for the given
587  *               range.
588  * @current_seq: the MMU notifiers sequance value for synchronization with
589  *               invalidations. the sequance number is read from
590  *               umem->odp_data->notifiers_seq before calling this function
591  */
592 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
593 			      u64 access_mask, unsigned long current_seq)
594 {
595 	struct task_struct *owning_process  = NULL;
596 	struct mm_struct   *owning_mm       = NULL;
597 	struct page       **local_page_list = NULL;
598 	u64 page_mask, off;
599 	int j, k, ret = 0, start_idx, npages = 0, page_shift;
600 	unsigned int flags = 0;
601 	phys_addr_t p = 0;
602 
603 	if (access_mask == 0)
604 		return -EINVAL;
605 
606 	if (user_virt < ib_umem_start(umem) ||
607 	    user_virt + bcnt > ib_umem_end(umem))
608 		return -EFAULT;
609 
610 	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
611 	if (!local_page_list)
612 		return -ENOMEM;
613 
614 	page_shift = umem->page_shift;
615 	page_mask = ~(BIT(page_shift) - 1);
616 	off = user_virt & (~page_mask);
617 	user_virt = user_virt & page_mask;
618 	bcnt += off; /* Charge for the first page offset as well. */
619 
620 	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
621 	if (owning_process == NULL) {
622 		ret = -EINVAL;
623 		goto out_no_task;
624 	}
625 
626 	owning_mm = get_task_mm(owning_process);
627 	if (owning_mm == NULL) {
628 		ret = -ENOENT;
629 		goto out_put_task;
630 	}
631 
632 	if (access_mask & ODP_WRITE_ALLOWED_BIT)
633 		flags |= FOLL_WRITE;
634 
635 	start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
636 	k = start_idx;
637 
638 	while (bcnt > 0) {
639 		const size_t gup_num_pages = min_t(size_t,
640 				(bcnt + BIT(page_shift) - 1) >> page_shift,
641 				PAGE_SIZE / sizeof(struct page *));
642 
643 		down_read(&owning_mm->mmap_sem);
644 		/*
645 		 * Note: this might result in redundent page getting. We can
646 		 * avoid this by checking dma_list to be 0 before calling
647 		 * get_user_pages. However, this make the code much more
648 		 * complex (and doesn't gain us much performance in most use
649 		 * cases).
650 		 */
651 		npages = get_user_pages_remote(owning_process, owning_mm,
652 				user_virt, gup_num_pages,
653 				flags, local_page_list, NULL, NULL);
654 		up_read(&owning_mm->mmap_sem);
655 
656 		if (npages < 0)
657 			break;
658 
659 		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
660 		mutex_lock(&umem->odp_data->umem_mutex);
661 		for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
662 			if (user_virt & ~page_mask) {
663 				p += PAGE_SIZE;
664 				if (page_to_phys(local_page_list[j]) != p) {
665 					ret = -EFAULT;
666 					break;
667 				}
668 				put_page(local_page_list[j]);
669 				continue;
670 			}
671 
672 			ret = ib_umem_odp_map_dma_single_page(
673 					umem, k, local_page_list[j],
674 					access_mask, current_seq);
675 			if (ret < 0)
676 				break;
677 
678 			p = page_to_phys(local_page_list[j]);
679 			k++;
680 		}
681 		mutex_unlock(&umem->odp_data->umem_mutex);
682 
683 		if (ret < 0) {
684 			/* Release left over pages when handling errors. */
685 			for (++j; j < npages; ++j)
686 				put_page(local_page_list[j]);
687 			break;
688 		}
689 	}
690 
691 	if (ret >= 0) {
692 		if (npages < 0 && k == start_idx)
693 			ret = npages;
694 		else
695 			ret = k - start_idx;
696 	}
697 
698 	mmput(owning_mm);
699 out_put_task:
700 	put_task_struct(owning_process);
701 out_no_task:
702 	free_page((unsigned long)local_page_list);
703 	return ret;
704 }
705 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
706 
707 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
708 				 u64 bound)
709 {
710 	int idx;
711 	u64 addr;
712 	struct ib_device *dev = umem->context->device;
713 
714 	virt  = max_t(u64, virt,  ib_umem_start(umem));
715 	bound = min_t(u64, bound, ib_umem_end(umem));
716 	/* Note that during the run of this function, the
717 	 * notifiers_count of the MR is > 0, preventing any racing
718 	 * faults from completion. We might be racing with other
719 	 * invalidations, so we must make sure we free each page only
720 	 * once. */
721 	mutex_lock(&umem->odp_data->umem_mutex);
722 	for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
723 		idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
724 		if (umem->odp_data->page_list[idx]) {
725 			struct page *page = umem->odp_data->page_list[idx];
726 			dma_addr_t dma = umem->odp_data->dma_list[idx];
727 			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
728 
729 			WARN_ON(!dma_addr);
730 
731 			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
732 					  DMA_BIDIRECTIONAL);
733 			if (dma & ODP_WRITE_ALLOWED_BIT) {
734 				struct page *head_page = compound_head(page);
735 				/*
736 				 * set_page_dirty prefers being called with
737 				 * the page lock. However, MMU notifiers are
738 				 * called sometimes with and sometimes without
739 				 * the lock. We rely on the umem_mutex instead
740 				 * to prevent other mmu notifiers from
741 				 * continuing and allowing the page mapping to
742 				 * be removed.
743 				 */
744 				set_page_dirty(head_page);
745 			}
746 			/* on demand pinning support */
747 			if (!umem->context->invalidate_range)
748 				put_page(page);
749 			umem->odp_data->page_list[idx] = NULL;
750 			umem->odp_data->dma_list[idx] = 0;
751 			umem->npages--;
752 		}
753 	}
754 	mutex_unlock(&umem->odp_data->umem_mutex);
755 }
756 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
757