1 /*
2  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_umem_odp.h>
45 
46 static void ib_umem_notifier_start_account(struct ib_umem *item)
47 {
48 	mutex_lock(&item->odp_data->umem_mutex);
49 
50 	/* Only update private counters for this umem if it has them.
51 	 * Otherwise skip it. All page faults will be delayed for this umem. */
52 	if (item->odp_data->mn_counters_active) {
53 		int notifiers_count = item->odp_data->notifiers_count++;
54 
55 		if (notifiers_count == 0)
56 			/* Initialize the completion object for waiting on
57 			 * notifiers. Since notifier_count is zero, no one
58 			 * should be waiting right now. */
59 			reinit_completion(&item->odp_data->notifier_completion);
60 	}
61 	mutex_unlock(&item->odp_data->umem_mutex);
62 }
63 
64 static void ib_umem_notifier_end_account(struct ib_umem *item)
65 {
66 	mutex_lock(&item->odp_data->umem_mutex);
67 
68 	/* Only update private counters for this umem if it has them.
69 	 * Otherwise skip it. All page faults will be delayed for this umem. */
70 	if (item->odp_data->mn_counters_active) {
71 		/*
72 		 * This sequence increase will notify the QP page fault that
73 		 * the page that is going to be mapped in the spte could have
74 		 * been freed.
75 		 */
76 		++item->odp_data->notifiers_seq;
77 		if (--item->odp_data->notifiers_count == 0)
78 			complete_all(&item->odp_data->notifier_completion);
79 	}
80 	mutex_unlock(&item->odp_data->umem_mutex);
81 }
82 
83 /* Account for a new mmu notifier in an ib_ucontext. */
84 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
85 {
86 	atomic_inc(&context->notifier_count);
87 }
88 
89 /* Account for a terminating mmu notifier in an ib_ucontext.
90  *
91  * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
92  * the function takes the semaphore itself. */
93 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
94 {
95 	int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
96 
97 	if (zero_notifiers &&
98 	    !list_empty(&context->no_private_counters)) {
99 		/* No currently running mmu notifiers. Now is the chance to
100 		 * add private accounting to all previously added umems. */
101 		struct ib_umem_odp *odp_data, *next;
102 
103 		/* Prevent concurrent mmu notifiers from working on the
104 		 * no_private_counters list. */
105 		down_write(&context->umem_rwsem);
106 
107 		/* Read the notifier_count again, with the umem_rwsem
108 		 * semaphore taken for write. */
109 		if (!atomic_read(&context->notifier_count)) {
110 			list_for_each_entry_safe(odp_data, next,
111 						 &context->no_private_counters,
112 						 no_private_counters) {
113 				mutex_lock(&odp_data->umem_mutex);
114 				odp_data->mn_counters_active = true;
115 				list_del(&odp_data->no_private_counters);
116 				complete_all(&odp_data->notifier_completion);
117 				mutex_unlock(&odp_data->umem_mutex);
118 			}
119 		}
120 
121 		up_write(&context->umem_rwsem);
122 	}
123 }
124 
125 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
126 					       u64 end, void *cookie) {
127 	/*
128 	 * Increase the number of notifiers running, to
129 	 * prevent any further fault handling on this MR.
130 	 */
131 	ib_umem_notifier_start_account(item);
132 	item->odp_data->dying = 1;
133 	/* Make sure that the fact the umem is dying is out before we release
134 	 * all pending page faults. */
135 	smp_wmb();
136 	complete_all(&item->odp_data->notifier_completion);
137 	item->context->invalidate_range(item, ib_umem_start(item),
138 					ib_umem_end(item));
139 	return 0;
140 }
141 
142 static void ib_umem_notifier_release(struct mmu_notifier *mn,
143 				     struct mm_struct *mm)
144 {
145 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
146 
147 	if (!context->invalidate_range)
148 		return;
149 
150 	ib_ucontext_notifier_start_account(context);
151 	down_read(&context->umem_rwsem);
152 	rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
153 				      ULLONG_MAX,
154 				      ib_umem_notifier_release_trampoline,
155 				      NULL);
156 	up_read(&context->umem_rwsem);
157 }
158 
159 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
160 				      u64 end, void *cookie)
161 {
162 	ib_umem_notifier_start_account(item);
163 	item->context->invalidate_range(item, start, start + PAGE_SIZE);
164 	ib_umem_notifier_end_account(item);
165 	return 0;
166 }
167 
168 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
169 					     struct mm_struct *mm,
170 					     unsigned long address)
171 {
172 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
173 
174 	if (!context->invalidate_range)
175 		return;
176 
177 	ib_ucontext_notifier_start_account(context);
178 	down_read(&context->umem_rwsem);
179 	rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
180 				      address + PAGE_SIZE,
181 				      invalidate_page_trampoline, NULL);
182 	up_read(&context->umem_rwsem);
183 	ib_ucontext_notifier_end_account(context);
184 }
185 
186 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
187 					     u64 end, void *cookie)
188 {
189 	ib_umem_notifier_start_account(item);
190 	item->context->invalidate_range(item, start, end);
191 	return 0;
192 }
193 
194 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
195 						    struct mm_struct *mm,
196 						    unsigned long start,
197 						    unsigned long end)
198 {
199 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
200 
201 	if (!context->invalidate_range)
202 		return;
203 
204 	ib_ucontext_notifier_start_account(context);
205 	down_read(&context->umem_rwsem);
206 	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
207 				      end,
208 				      invalidate_range_start_trampoline, NULL);
209 	up_read(&context->umem_rwsem);
210 }
211 
212 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
213 					   u64 end, void *cookie)
214 {
215 	ib_umem_notifier_end_account(item);
216 	return 0;
217 }
218 
219 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
220 						  struct mm_struct *mm,
221 						  unsigned long start,
222 						  unsigned long end)
223 {
224 	struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
225 
226 	if (!context->invalidate_range)
227 		return;
228 
229 	down_read(&context->umem_rwsem);
230 	rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
231 				      end,
232 				      invalidate_range_end_trampoline, NULL);
233 	up_read(&context->umem_rwsem);
234 	ib_ucontext_notifier_end_account(context);
235 }
236 
237 static const struct mmu_notifier_ops ib_umem_notifiers = {
238 	.release                    = ib_umem_notifier_release,
239 	.invalidate_page            = ib_umem_notifier_invalidate_page,
240 	.invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
241 	.invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
242 };
243 
244 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
245 				  unsigned long addr,
246 				  size_t size)
247 {
248 	struct ib_umem *umem;
249 	struct ib_umem_odp *odp_data;
250 	int pages = size >> PAGE_SHIFT;
251 	int ret;
252 
253 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
254 	if (!umem)
255 		return ERR_PTR(-ENOMEM);
256 
257 	umem->context   = context;
258 	umem->length    = size;
259 	umem->address   = addr;
260 	umem->page_size = PAGE_SIZE;
261 	umem->writable  = 1;
262 
263 	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
264 	if (!odp_data) {
265 		ret = -ENOMEM;
266 		goto out_umem;
267 	}
268 	odp_data->umem = umem;
269 
270 	mutex_init(&odp_data->umem_mutex);
271 	init_completion(&odp_data->notifier_completion);
272 
273 	odp_data->page_list = vzalloc(pages * sizeof(*odp_data->page_list));
274 	if (!odp_data->page_list) {
275 		ret = -ENOMEM;
276 		goto out_odp_data;
277 	}
278 
279 	odp_data->dma_list = vzalloc(pages * sizeof(*odp_data->dma_list));
280 	if (!odp_data->dma_list) {
281 		ret = -ENOMEM;
282 		goto out_page_list;
283 	}
284 
285 	down_write(&context->umem_rwsem);
286 	context->odp_mrs_count++;
287 	rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
288 	if (likely(!atomic_read(&context->notifier_count)))
289 		odp_data->mn_counters_active = true;
290 	else
291 		list_add(&odp_data->no_private_counters,
292 			 &context->no_private_counters);
293 	up_write(&context->umem_rwsem);
294 
295 	umem->odp_data = odp_data;
296 
297 	return umem;
298 
299 out_page_list:
300 	vfree(odp_data->page_list);
301 out_odp_data:
302 	kfree(odp_data);
303 out_umem:
304 	kfree(umem);
305 	return ERR_PTR(ret);
306 }
307 EXPORT_SYMBOL(ib_alloc_odp_umem);
308 
309 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
310 {
311 	int ret_val;
312 	struct pid *our_pid;
313 	struct mm_struct *mm = get_task_mm(current);
314 
315 	if (!mm)
316 		return -EINVAL;
317 
318 	/* Prevent creating ODP MRs in child processes */
319 	rcu_read_lock();
320 	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
321 	rcu_read_unlock();
322 	put_pid(our_pid);
323 	if (context->tgid != our_pid) {
324 		ret_val = -EINVAL;
325 		goto out_mm;
326 	}
327 
328 	umem->hugetlb = 0;
329 	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
330 	if (!umem->odp_data) {
331 		ret_val = -ENOMEM;
332 		goto out_mm;
333 	}
334 	umem->odp_data->umem = umem;
335 
336 	mutex_init(&umem->odp_data->umem_mutex);
337 
338 	init_completion(&umem->odp_data->notifier_completion);
339 
340 	if (ib_umem_num_pages(umem)) {
341 		umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
342 					    sizeof(*umem->odp_data->page_list));
343 		if (!umem->odp_data->page_list) {
344 			ret_val = -ENOMEM;
345 			goto out_odp_data;
346 		}
347 
348 		umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
349 					  sizeof(*umem->odp_data->dma_list));
350 		if (!umem->odp_data->dma_list) {
351 			ret_val = -ENOMEM;
352 			goto out_page_list;
353 		}
354 	}
355 
356 	/*
357 	 * When using MMU notifiers, we will get a
358 	 * notification before the "current" task (and MM) is
359 	 * destroyed. We use the umem_rwsem semaphore to synchronize.
360 	 */
361 	down_write(&context->umem_rwsem);
362 	context->odp_mrs_count++;
363 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
364 		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
365 				   &context->umem_tree);
366 	if (likely(!atomic_read(&context->notifier_count)) ||
367 	    context->odp_mrs_count == 1)
368 		umem->odp_data->mn_counters_active = true;
369 	else
370 		list_add(&umem->odp_data->no_private_counters,
371 			 &context->no_private_counters);
372 	downgrade_write(&context->umem_rwsem);
373 
374 	if (context->odp_mrs_count == 1) {
375 		/*
376 		 * Note that at this point, no MMU notifier is running
377 		 * for this context!
378 		 */
379 		atomic_set(&context->notifier_count, 0);
380 		INIT_HLIST_NODE(&context->mn.hlist);
381 		context->mn.ops = &ib_umem_notifiers;
382 		/*
383 		 * Lock-dep detects a false positive for mmap_sem vs.
384 		 * umem_rwsem, due to not grasping downgrade_write correctly.
385 		 */
386 		lockdep_off();
387 		ret_val = mmu_notifier_register(&context->mn, mm);
388 		lockdep_on();
389 		if (ret_val) {
390 			pr_err("Failed to register mmu_notifier %d\n", ret_val);
391 			ret_val = -EBUSY;
392 			goto out_mutex;
393 		}
394 	}
395 
396 	up_read(&context->umem_rwsem);
397 
398 	/*
399 	 * Note that doing an mmput can cause a notifier for the relevant mm.
400 	 * If the notifier is called while we hold the umem_rwsem, this will
401 	 * cause a deadlock. Therefore, we release the reference only after we
402 	 * released the semaphore.
403 	 */
404 	mmput(mm);
405 	return 0;
406 
407 out_mutex:
408 	up_read(&context->umem_rwsem);
409 	vfree(umem->odp_data->dma_list);
410 out_page_list:
411 	vfree(umem->odp_data->page_list);
412 out_odp_data:
413 	kfree(umem->odp_data);
414 out_mm:
415 	mmput(mm);
416 	return ret_val;
417 }
418 
419 void ib_umem_odp_release(struct ib_umem *umem)
420 {
421 	struct ib_ucontext *context = umem->context;
422 
423 	/*
424 	 * Ensure that no more pages are mapped in the umem.
425 	 *
426 	 * It is the driver's responsibility to ensure, before calling us,
427 	 * that the hardware will not attempt to access the MR any more.
428 	 */
429 	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
430 				    ib_umem_end(umem));
431 
432 	down_write(&context->umem_rwsem);
433 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
434 		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
435 				   &context->umem_tree);
436 	context->odp_mrs_count--;
437 	if (!umem->odp_data->mn_counters_active) {
438 		list_del(&umem->odp_data->no_private_counters);
439 		complete_all(&umem->odp_data->notifier_completion);
440 	}
441 
442 	/*
443 	 * Downgrade the lock to a read lock. This ensures that the notifiers
444 	 * (who lock the mutex for reading) will be able to finish, and we
445 	 * will be able to enventually obtain the mmu notifiers SRCU. Note
446 	 * that since we are doing it atomically, no other user could register
447 	 * and unregister while we do the check.
448 	 */
449 	downgrade_write(&context->umem_rwsem);
450 	if (!context->odp_mrs_count) {
451 		struct task_struct *owning_process = NULL;
452 		struct mm_struct *owning_mm        = NULL;
453 
454 		owning_process = get_pid_task(context->tgid,
455 					      PIDTYPE_PID);
456 		if (owning_process == NULL)
457 			/*
458 			 * The process is already dead, notifier were removed
459 			 * already.
460 			 */
461 			goto out;
462 
463 		owning_mm = get_task_mm(owning_process);
464 		if (owning_mm == NULL)
465 			/*
466 			 * The process' mm is already dead, notifier were
467 			 * removed already.
468 			 */
469 			goto out_put_task;
470 		mmu_notifier_unregister(&context->mn, owning_mm);
471 
472 		mmput(owning_mm);
473 
474 out_put_task:
475 		put_task_struct(owning_process);
476 	}
477 out:
478 	up_read(&context->umem_rwsem);
479 
480 	vfree(umem->odp_data->dma_list);
481 	vfree(umem->odp_data->page_list);
482 	kfree(umem->odp_data);
483 	kfree(umem);
484 }
485 
486 /*
487  * Map for DMA and insert a single page into the on-demand paging page tables.
488  *
489  * @umem: the umem to insert the page to.
490  * @page_index: index in the umem to add the page to.
491  * @page: the page struct to map and add.
492  * @access_mask: access permissions needed for this page.
493  * @current_seq: sequence number for synchronization with invalidations.
494  *               the sequence number is taken from
495  *               umem->odp_data->notifiers_seq.
496  *
497  * The function returns -EFAULT if the DMA mapping operation fails. It returns
498  * -EAGAIN if a concurrent invalidation prevents us from updating the page.
499  *
500  * The page is released via put_page even if the operation failed. For
501  * on-demand pinning, the page is released whenever it isn't stored in the
502  * umem.
503  */
504 static int ib_umem_odp_map_dma_single_page(
505 		struct ib_umem *umem,
506 		int page_index,
507 		u64 base_virt_addr,
508 		struct page *page,
509 		u64 access_mask,
510 		unsigned long current_seq)
511 {
512 	struct ib_device *dev = umem->context->device;
513 	dma_addr_t dma_addr;
514 	int stored_page = 0;
515 	int remove_existing_mapping = 0;
516 	int ret = 0;
517 
518 	/*
519 	 * Note: we avoid writing if seq is different from the initial seq, to
520 	 * handle case of a racing notifier. This check also allows us to bail
521 	 * early if we have a notifier running in parallel with us.
522 	 */
523 	if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
524 		ret = -EAGAIN;
525 		goto out;
526 	}
527 	if (!(umem->odp_data->dma_list[page_index])) {
528 		dma_addr = ib_dma_map_page(dev,
529 					   page,
530 					   0, PAGE_SIZE,
531 					   DMA_BIDIRECTIONAL);
532 		if (ib_dma_mapping_error(dev, dma_addr)) {
533 			ret = -EFAULT;
534 			goto out;
535 		}
536 		umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
537 		umem->odp_data->page_list[page_index] = page;
538 		umem->npages++;
539 		stored_page = 1;
540 	} else if (umem->odp_data->page_list[page_index] == page) {
541 		umem->odp_data->dma_list[page_index] |= access_mask;
542 	} else {
543 		pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
544 		       umem->odp_data->page_list[page_index], page);
545 		/* Better remove the mapping now, to prevent any further
546 		 * damage. */
547 		remove_existing_mapping = 1;
548 	}
549 
550 out:
551 	/* On Demand Paging - avoid pinning the page */
552 	if (umem->context->invalidate_range || !stored_page)
553 		put_page(page);
554 
555 	if (remove_existing_mapping && umem->context->invalidate_range) {
556 		invalidate_page_trampoline(
557 			umem,
558 			base_virt_addr + (page_index * PAGE_SIZE),
559 			base_virt_addr + ((page_index+1)*PAGE_SIZE),
560 			NULL);
561 		ret = -EAGAIN;
562 	}
563 
564 	return ret;
565 }
566 
567 /**
568  * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
569  *
570  * Pins the range of pages passed in the argument, and maps them to
571  * DMA addresses. The DMA addresses of the mapped pages is updated in
572  * umem->odp_data->dma_list.
573  *
574  * Returns the number of pages mapped in success, negative error code
575  * for failure.
576  * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
577  * the function from completing its task.
578  * An -ENOENT error code indicates that userspace process is being terminated
579  * and mm was already destroyed.
580  * @umem: the umem to map and pin
581  * @user_virt: the address from which we need to map.
582  * @bcnt: the minimal number of bytes to pin and map. The mapping might be
583  *        bigger due to alignment, and may also be smaller in case of an error
584  *        pinning or mapping a page. The actual pages mapped is returned in
585  *        the return value.
586  * @access_mask: bit mask of the requested access permissions for the given
587  *               range.
588  * @current_seq: the MMU notifiers sequance value for synchronization with
589  *               invalidations. the sequance number is read from
590  *               umem->odp_data->notifiers_seq before calling this function
591  */
592 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
593 			      u64 access_mask, unsigned long current_seq)
594 {
595 	struct task_struct *owning_process  = NULL;
596 	struct mm_struct   *owning_mm       = NULL;
597 	struct page       **local_page_list = NULL;
598 	u64 off;
599 	int j, k, ret = 0, start_idx, npages = 0;
600 	u64 base_virt_addr;
601 	unsigned int flags = 0;
602 
603 	if (access_mask == 0)
604 		return -EINVAL;
605 
606 	if (user_virt < ib_umem_start(umem) ||
607 	    user_virt + bcnt > ib_umem_end(umem))
608 		return -EFAULT;
609 
610 	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
611 	if (!local_page_list)
612 		return -ENOMEM;
613 
614 	off = user_virt & (~PAGE_MASK);
615 	user_virt = user_virt & PAGE_MASK;
616 	base_virt_addr = user_virt;
617 	bcnt += off; /* Charge for the first page offset as well. */
618 
619 	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
620 	if (owning_process == NULL) {
621 		ret = -EINVAL;
622 		goto out_no_task;
623 	}
624 
625 	owning_mm = get_task_mm(owning_process);
626 	if (owning_mm == NULL) {
627 		ret = -ENOENT;
628 		goto out_put_task;
629 	}
630 
631 	if (access_mask & ODP_WRITE_ALLOWED_BIT)
632 		flags |= FOLL_WRITE;
633 
634 	start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
635 	k = start_idx;
636 
637 	while (bcnt > 0) {
638 		const size_t gup_num_pages =
639 			min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
640 			      PAGE_SIZE / sizeof(struct page *));
641 
642 		down_read(&owning_mm->mmap_sem);
643 		/*
644 		 * Note: this might result in redundent page getting. We can
645 		 * avoid this by checking dma_list to be 0 before calling
646 		 * get_user_pages. However, this make the code much more
647 		 * complex (and doesn't gain us much performance in most use
648 		 * cases).
649 		 */
650 		npages = get_user_pages_remote(owning_process, owning_mm,
651 				user_virt, gup_num_pages,
652 				flags, local_page_list, NULL, NULL);
653 		up_read(&owning_mm->mmap_sem);
654 
655 		if (npages < 0)
656 			break;
657 
658 		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
659 		user_virt += npages << PAGE_SHIFT;
660 		mutex_lock(&umem->odp_data->umem_mutex);
661 		for (j = 0; j < npages; ++j) {
662 			ret = ib_umem_odp_map_dma_single_page(
663 				umem, k, base_virt_addr, local_page_list[j],
664 				access_mask, current_seq);
665 			if (ret < 0)
666 				break;
667 			k++;
668 		}
669 		mutex_unlock(&umem->odp_data->umem_mutex);
670 
671 		if (ret < 0) {
672 			/* Release left over pages when handling errors. */
673 			for (++j; j < npages; ++j)
674 				put_page(local_page_list[j]);
675 			break;
676 		}
677 	}
678 
679 	if (ret >= 0) {
680 		if (npages < 0 && k == start_idx)
681 			ret = npages;
682 		else
683 			ret = k - start_idx;
684 	}
685 
686 	mmput(owning_mm);
687 out_put_task:
688 	put_task_struct(owning_process);
689 out_no_task:
690 	free_page((unsigned long)local_page_list);
691 	return ret;
692 }
693 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
694 
695 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
696 				 u64 bound)
697 {
698 	int idx;
699 	u64 addr;
700 	struct ib_device *dev = umem->context->device;
701 
702 	virt  = max_t(u64, virt,  ib_umem_start(umem));
703 	bound = min_t(u64, bound, ib_umem_end(umem));
704 	/* Note that during the run of this function, the
705 	 * notifiers_count of the MR is > 0, preventing any racing
706 	 * faults from completion. We might be racing with other
707 	 * invalidations, so we must make sure we free each page only
708 	 * once. */
709 	mutex_lock(&umem->odp_data->umem_mutex);
710 	for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
711 		idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
712 		if (umem->odp_data->page_list[idx]) {
713 			struct page *page = umem->odp_data->page_list[idx];
714 			dma_addr_t dma = umem->odp_data->dma_list[idx];
715 			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
716 
717 			WARN_ON(!dma_addr);
718 
719 			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
720 					  DMA_BIDIRECTIONAL);
721 			if (dma & ODP_WRITE_ALLOWED_BIT) {
722 				struct page *head_page = compound_head(page);
723 				/*
724 				 * set_page_dirty prefers being called with
725 				 * the page lock. However, MMU notifiers are
726 				 * called sometimes with and sometimes without
727 				 * the lock. We rely on the umem_mutex instead
728 				 * to prevent other mmu notifiers from
729 				 * continuing and allowing the page mapping to
730 				 * be removed.
731 				 */
732 				set_page_dirty(head_page);
733 			}
734 			/* on demand pinning support */
735 			if (!umem->context->invalidate_range)
736 				put_page(page);
737 			umem->odp_data->page_list[idx] = NULL;
738 			umem->odp_data->dma_list[idx] = 0;
739 			umem->npages--;
740 		}
741 	}
742 	mutex_unlock(&umem->odp_data->umem_mutex);
743 }
744 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
745