Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/mmu_notifier.c
13 #include <linux/mm.h>
19 #include <linux/sched/mm.h>
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
38 /* all mmu notifiers registered in this mm are queued in this list */
51 * This is a collision-retry read-side/write-side 'lock', a lot like a
52 * seqcount, however this allows multiple write-sides to hold it at
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
60 * progress on the mm side.
67 * - mm->active_invalidate_ranges != 0
68 * - subscriptions->invalidate_seq & 1 == True (odd)
69 * - some range on the mm_struct is being invalidated
70 * - the itree is not allowed to change
73 * - mm->active_invalidate_ranges != 0
74 * - subscriptions->invalidate_seq & 1 == False (even)
75 * - some range on the mm_struct is being invalidated
76 * - the itree is allowed to change
78 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
89 lockdep_assert_held(&subscriptions->lock); in mn_itree_is_invalidating()
90 return subscriptions->invalidate_seq & 1; in mn_itree_is_invalidating()
101 spin_lock(&subscriptions->lock); in mn_itree_inv_start_range()
102 subscriptions->active_invalidate_ranges++; in mn_itree_inv_start_range()
103 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
104 range->end - 1); in mn_itree_inv_start_range()
106 subscriptions->invalidate_seq |= 1; in mn_itree_inv_start_range()
111 *seq = subscriptions->invalidate_seq; in mn_itree_inv_start_range()
112 spin_unlock(&subscriptions->lock); in mn_itree_inv_start_range()
122 node = interval_tree_iter_next(&interval_sub->interval_tree, in mn_itree_inv_next()
123 range->start, range->end - 1); in mn_itree_inv_next()
134 spin_lock(&subscriptions->lock); in mn_itree_inv_end()
135 if (--subscriptions->active_invalidate_ranges || in mn_itree_inv_end()
137 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
142 subscriptions->invalidate_seq++; in mn_itree_inv_end()
151 &subscriptions->deferred_list, in mn_itree_inv_end()
153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) in mn_itree_inv_end()
154 interval_tree_insert(&interval_sub->interval_tree, in mn_itree_inv_end()
155 &subscriptions->itree); in mn_itree_inv_end()
157 interval_tree_remove(&interval_sub->interval_tree, in mn_itree_inv_end()
158 &subscriptions->itree); in mn_itree_inv_end()
159 hlist_del(&interval_sub->deferred_item); in mn_itree_inv_end()
161 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
163 wake_up_all(&subscriptions->wq); in mn_itree_inv_end()
167 * mmu_interval_read_begin - Begin a read side critical section against a VA
172 * collision-retry scheme similar to seqcount for the VA range under
173 * subscription. If the mm invokes invalidation during the critical section
189 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin()
204 * seq = READ_ONCE(interval_sub->invalidate_seq); in mmu_interval_read_begin()
205 * seq == subs->invalidate_seq in mmu_interval_read_begin()
208 * seq = ++subscriptions->invalidate_seq in mmu_interval_read_begin()
210 * op->invalidate(): in mmu_interval_read_begin()
213 * interval_sub->invalidate_seq = seq in mmu_interval_read_begin()
220 * seq = ++subscriptions->invalidate_seq in mmu_interval_read_begin()
225 * interval_sub->invalidate_seq != seq in mmu_interval_read_begin()
232 spin_lock(&subscriptions->lock); in mmu_interval_read_begin()
234 seq = READ_ONCE(interval_sub->invalidate_seq); in mmu_interval_read_begin()
235 is_invalidating = seq == subscriptions->invalidate_seq; in mmu_interval_read_begin()
236 spin_unlock(&subscriptions->lock); in mmu_interval_read_begin()
239 * interval_sub->invalidate_seq must always be set to an odd value via in mmu_interval_read_begin()
243 * subscriptions->invalidate_seq is even in the idle state. in mmu_interval_read_begin()
248 wait_event(subscriptions->wq, in mmu_interval_read_begin()
249 READ_ONCE(subscriptions->invalidate_seq) != seq); in mmu_interval_read_begin()
262 struct mm_struct *mm) in mn_itree_release() argument
267 .mm = mm, in mn_itree_release()
268 .start = 0, in mn_itree_release()
279 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 * in parallel despite there being no task using this mm any more,
294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
300 struct mm_struct *mm) in mn_hlist_release() argument
307 * ->release returns. in mn_hlist_release()
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_release()
313 * If ->release runs before mmu_notifier_unregister it must be in mn_hlist_release()
316 * sptes before all the pages in the mm are freed. in mn_hlist_release()
318 if (subscription->ops->release) in mn_hlist_release()
319 subscription->ops->release(subscription, mm); in mn_hlist_release()
321 spin_lock(&subscriptions->lock); in mn_hlist_release()
322 while (unlikely(!hlist_empty(&subscriptions->list))) { in mn_hlist_release()
323 subscription = hlist_entry(subscriptions->list.first, in mn_hlist_release()
328 * for ->release to finish and for mmu_notifier_unregister to in mn_hlist_release()
331 hlist_del_init_rcu(&subscription->hlist); in mn_hlist_release()
333 spin_unlock(&subscriptions->lock); in mn_hlist_release()
338 * exit_mmap (which would proceed with freeing all pages in the mm) in mn_hlist_release()
339 * until the ->release method returns, if it was invoked by in mn_hlist_release()
348 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument
351 mm->notifier_subscriptions; in __mmu_notifier_release()
353 if (subscriptions->has_itree) in __mmu_notifier_release()
354 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
356 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_release()
357 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 * unmap the address and return 1 or 0 depending if the mapping previously
365 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument
370 int young = 0, id; in __mmu_notifier_clear_flush_young()
374 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_flush_young()
376 if (subscription->ops->clear_flush_young) in __mmu_notifier_clear_flush_young()
377 young |= subscription->ops->clear_flush_young( in __mmu_notifier_clear_flush_young()
378 subscription, mm, start, end); in __mmu_notifier_clear_flush_young()
385 int __mmu_notifier_clear_young(struct mm_struct *mm, in __mmu_notifier_clear_young() argument
390 int young = 0, id; in __mmu_notifier_clear_young()
394 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_clear_young()
396 if (subscription->ops->clear_young) in __mmu_notifier_clear_young()
397 young |= subscription->ops->clear_young(subscription, in __mmu_notifier_clear_young()
398 mm, start, end); in __mmu_notifier_clear_young()
405 int __mmu_notifier_test_young(struct mm_struct *mm, in __mmu_notifier_test_young() argument
409 int young = 0, id; in __mmu_notifier_test_young()
413 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_test_young()
415 if (subscription->ops->test_young) { in __mmu_notifier_test_young()
416 young = subscription->ops->test_young(subscription, mm, in __mmu_notifier_test_young()
427 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, in __mmu_notifier_change_pte() argument
435 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_change_pte()
437 if (subscription->ops->change_pte) in __mmu_notifier_change_pte()
438 subscription->ops->change_pte(subscription, mm, address, in __mmu_notifier_change_pte()
456 ret = interval_sub->ops->invalidate(interval_sub, range, in mn_itree_invalidate()
464 return 0; in mn_itree_invalidate()
468 * On -EAGAIN the non-blocking caller is not allowed to call in mn_itree_invalidate()
472 return -EAGAIN; in mn_itree_invalidate()
480 int ret = 0; in mn_hlist_invalidate_range_start()
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_range_start()
486 const struct mmu_notifier_ops *ops = subscription->ops; in mn_hlist_invalidate_range_start()
488 if (ops->invalidate_range_start) { in mn_hlist_invalidate_range_start()
493 _ret = ops->invalidate_range_start(subscription, range); in mn_hlist_invalidate_range_start()
498 ops->invalidate_range_start, _ret, in mn_hlist_invalidate_range_start()
500 "non-" : in mn_hlist_invalidate_range_start()
503 _ret != -EAGAIN); in mn_hlist_invalidate_range_start()
510 WARN_ON(ops->invalidate_range_end); in mn_hlist_invalidate_range_start()
518 * Must be non-blocking to get here. If there are multiple in mn_hlist_invalidate_range_start()
522 hlist_for_each_entry_rcu(subscription, &subscriptions->list, in mn_hlist_invalidate_range_start()
524 if (!subscription->ops->invalidate_range_end) in mn_hlist_invalidate_range_start()
527 subscription->ops->invalidate_range_end(subscription, in mn_hlist_invalidate_range_start()
539 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
542 if (subscriptions->has_itree) { in __mmu_notifier_invalidate_range_start()
547 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_start()
549 return 0; in __mmu_notifier_invalidate_range_start()
560 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_end()
562 if (subscription->ops->invalidate_range_end) { in mn_hlist_invalidate_end()
565 subscription->ops->invalidate_range_end(subscription, in mn_hlist_invalidate_end()
577 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
580 if (subscriptions->has_itree) in __mmu_notifier_invalidate_range_end()
583 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_end()
588 void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, in __mmu_notifier_arch_invalidate_secondary_tlbs() argument
596 &mm->notifier_subscriptions->list, hlist, in __mmu_notifier_arch_invalidate_secondary_tlbs()
598 if (subscription->ops->arch_invalidate_secondary_tlbs) in __mmu_notifier_arch_invalidate_secondary_tlbs()
599 subscription->ops->arch_invalidate_secondary_tlbs( in __mmu_notifier_arch_invalidate_secondary_tlbs()
600 subscription, mm, in __mmu_notifier_arch_invalidate_secondary_tlbs()
612 struct mm_struct *mm) in __mmu_notifier_register() argument
617 mmap_assert_write_locked(mm); in __mmu_notifier_register()
618 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
625 (subscription->ops->arch_invalidate_secondary_tlbs && in __mmu_notifier_register()
626 (subscription->ops->invalidate_range_start || in __mmu_notifier_register()
627 subscription->ops->invalidate_range_end)))) in __mmu_notifier_register()
628 return -EINVAL; in __mmu_notifier_register()
630 if (!mm->notifier_subscriptions) { in __mmu_notifier_register()
633 * know that mm->notifier_subscriptions can't change while we in __mmu_notifier_register()
639 return -ENOMEM; in __mmu_notifier_register()
641 INIT_HLIST_HEAD(&subscriptions->list); in __mmu_notifier_register()
642 spin_lock_init(&subscriptions->lock); in __mmu_notifier_register()
643 subscriptions->invalidate_seq = 2; in __mmu_notifier_register()
644 subscriptions->itree = RB_ROOT_CACHED; in __mmu_notifier_register()
645 init_waitqueue_head(&subscriptions->wq); in __mmu_notifier_register()
646 INIT_HLIST_HEAD(&subscriptions->deferred_list); in __mmu_notifier_register()
649 ret = mm_take_all_locks(mm); in __mmu_notifier_register()
657 * current->mm or explicitly with get_task_mm() or similar). in __mmu_notifier_register()
665 * mmu_notifier_subscriptions is not freed until the mm is destroyed. in __mmu_notifier_register()
670 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
674 mmgrab(mm); in __mmu_notifier_register()
675 subscription->mm = mm; in __mmu_notifier_register()
676 subscription->users = 1; in __mmu_notifier_register()
678 spin_lock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
679 hlist_add_head_rcu(&subscription->hlist, in __mmu_notifier_register()
680 &mm->notifier_subscriptions->list); in __mmu_notifier_register()
681 spin_unlock(&mm->notifier_subscriptions->lock); in __mmu_notifier_register()
683 mm->notifier_subscriptions->has_itree = true; in __mmu_notifier_register()
685 mm_drop_all_locks(mm); in __mmu_notifier_register()
686 BUG_ON(atomic_read(&mm->mm_users) <= 0); in __mmu_notifier_register()
687 return 0; in __mmu_notifier_register()
696 * mmu_notifier_register - Register a notifier on a mm
698 * @mm: The mm to attach the notifier to
703 * so mm has to be current->mm or the mm should be pinned safely such
704 * as with get_task_mm(). If the mm is not current->mm, the mm_users
711 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
712 * valid, and can be converted to an active mm pointer via mmget_not_zero().
715 struct mm_struct *mm) in mmu_notifier_register() argument
719 mmap_write_lock(mm); in mmu_notifier_register()
720 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_register()
721 mmap_write_unlock(mm); in mmu_notifier_register()
727 find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) in find_get_mmu_notifier() argument
731 spin_lock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
733 &mm->notifier_subscriptions->list, hlist, in find_get_mmu_notifier()
734 lockdep_is_held(&mm->notifier_subscriptions->lock)) { in find_get_mmu_notifier()
735 if (subscription->ops != ops) in find_get_mmu_notifier()
738 if (likely(subscription->users != UINT_MAX)) in find_get_mmu_notifier()
739 subscription->users++; in find_get_mmu_notifier()
741 subscription = ERR_PTR(-EOVERFLOW); in find_get_mmu_notifier()
742 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
745 spin_unlock(&mm->notifier_subscriptions->lock); in find_get_mmu_notifier()
750 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
751 * the mm & ops
753 * @mm : The mm to attach notifiers too
756 * ops->alloc_notifier(), or returns an already existing notifier on the
761 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
763 * While the caller has a mmu_notifier get the mm pointer will remain valid,
764 * and can be converted to an active mm pointer via mmget_not_zero().
767 struct mm_struct *mm) in mmu_notifier_get_locked() argument
772 mmap_assert_write_locked(mm); in mmu_notifier_get_locked()
774 if (mm->notifier_subscriptions) { in mmu_notifier_get_locked()
775 subscription = find_get_mmu_notifier(mm, ops); in mmu_notifier_get_locked()
780 subscription = ops->alloc_notifier(mm); in mmu_notifier_get_locked()
783 subscription->ops = ops; in mmu_notifier_get_locked()
784 ret = __mmu_notifier_register(subscription, mm); in mmu_notifier_get_locked()
789 subscription->ops->free_notifier(subscription); in mmu_notifier_get_locked()
795 void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) in __mmu_notifier_subscriptions_destroy() argument
797 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); in __mmu_notifier_subscriptions_destroy()
798 kfree(mm->notifier_subscriptions); in __mmu_notifier_subscriptions_destroy()
799 mm->notifier_subscriptions = LIST_POISON1; /* debug */ in __mmu_notifier_subscriptions_destroy()
803 * This releases the mm_count pin automatically and frees the mm
807 * calling mmu_notifier_unregister. ->release or any other notifier
810 * that ->release or any other method can't run anymore.
813 struct mm_struct *mm) in mmu_notifier_unregister() argument
815 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
817 if (!hlist_unhashed(&subscription->hlist)) { in mmu_notifier_unregister()
819 * SRCU here will force exit_mmap to wait for ->release to in mmu_notifier_unregister()
827 * that ->release is called before freeing the pages. in mmu_notifier_unregister()
829 if (subscription->ops->release) in mmu_notifier_unregister()
830 subscription->ops->release(subscription, mm); in mmu_notifier_unregister()
833 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
838 hlist_del_init_rcu(&subscription->hlist); in mmu_notifier_unregister()
839 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_unregister()
844 * ->release if it was run by mmu_notifier_release instead of us. in mmu_notifier_unregister()
848 BUG_ON(atomic_read(&mm->mm_count) <= 0); in mmu_notifier_unregister()
850 mmdrop(mm); in mmu_notifier_unregister()
858 struct mm_struct *mm = subscription->mm; in mmu_notifier_free_rcu() local
860 subscription->ops->free_notifier(subscription); in mmu_notifier_free_rcu()
862 mmdrop(mm); in mmu_notifier_free_rcu()
866 * mmu_notifier_put - Release the reference on the notifier
873 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
877 * As ops->release is not guaranteed to be called, the user must ensure that
881 * This function can be called from the ops->release callback, however the
889 struct mm_struct *mm = subscription->mm; in mmu_notifier_put() local
891 spin_lock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
892 if (WARN_ON(!subscription->users) || --subscription->users) in mmu_notifier_put()
894 hlist_del_init_rcu(&subscription->hlist); in mmu_notifier_put()
895 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
897 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); in mmu_notifier_put()
901 spin_unlock(&mm->notifier_subscriptions->lock); in mmu_notifier_put()
906 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in __mmu_interval_notifier_insert() argument
910 interval_sub->mm = mm; in __mmu_interval_notifier_insert()
911 interval_sub->ops = ops; in __mmu_interval_notifier_insert()
912 RB_CLEAR_NODE(&interval_sub->interval_tree.rb); in __mmu_interval_notifier_insert()
913 interval_sub->interval_tree.start = start; in __mmu_interval_notifier_insert()
918 if (length == 0 || in __mmu_interval_notifier_insert()
919 check_add_overflow(start, length - 1, in __mmu_interval_notifier_insert()
920 &interval_sub->interval_tree.last)) in __mmu_interval_notifier_insert()
921 return -EOVERFLOW; in __mmu_interval_notifier_insert()
924 if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) in __mmu_interval_notifier_insert()
925 return -EINVAL; in __mmu_interval_notifier_insert()
928 mmgrab(mm); in __mmu_interval_notifier_insert()
940 * In all cases the value for the interval_sub->invalidate_seq should be in __mmu_interval_notifier_insert()
943 spin_lock(&subscriptions->lock); in __mmu_interval_notifier_insert()
944 if (subscriptions->active_invalidate_ranges) { in __mmu_interval_notifier_insert()
946 hlist_add_head(&interval_sub->deferred_item, in __mmu_interval_notifier_insert()
947 &subscriptions->deferred_list); in __mmu_interval_notifier_insert()
949 subscriptions->invalidate_seq |= 1; in __mmu_interval_notifier_insert()
950 interval_tree_insert(&interval_sub->interval_tree, in __mmu_interval_notifier_insert()
951 &subscriptions->itree); in __mmu_interval_notifier_insert()
953 interval_sub->invalidate_seq = subscriptions->invalidate_seq; in __mmu_interval_notifier_insert()
962 interval_sub->invalidate_seq = in __mmu_interval_notifier_insert()
963 subscriptions->invalidate_seq - 1; in __mmu_interval_notifier_insert()
964 interval_tree_insert(&interval_sub->interval_tree, in __mmu_interval_notifier_insert()
965 &subscriptions->itree); in __mmu_interval_notifier_insert()
967 spin_unlock(&subscriptions->lock); in __mmu_interval_notifier_insert()
968 return 0; in __mmu_interval_notifier_insert()
972 * mmu_interval_notifier_insert - Insert an interval notifier
976 * @mm: mm_struct to attach to
980 * mm. Upon return the ops related to mmu_interval_notifier will be called
988 struct mm_struct *mm, unsigned long start, in mmu_interval_notifier_insert() argument
995 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
997 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
998 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert()
999 ret = mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert()
1002 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
1004 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
1010 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, in mmu_interval_notifier_insert_locked() argument
1015 mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1018 mmap_assert_write_locked(mm); in mmu_interval_notifier_insert_locked()
1020 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert_locked()
1021 ret = __mmu_notifier_register(NULL, mm); in mmu_interval_notifier_insert_locked()
1024 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1026 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1037 spin_lock(&subscriptions->lock); in mmu_interval_seq_released()
1038 ret = subscriptions->invalidate_seq != seq; in mmu_interval_seq_released()
1039 spin_unlock(&subscriptions->lock); in mmu_interval_seq_released()
1044 * mmu_interval_notifier_remove - Remove a interval notifier
1055 struct mm_struct *mm = interval_sub->mm; in mmu_interval_notifier_remove() local
1057 mm->notifier_subscriptions; in mmu_interval_notifier_remove()
1058 unsigned long seq = 0; in mmu_interval_notifier_remove()
1062 spin_lock(&subscriptions->lock); in mmu_interval_notifier_remove()
1068 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { in mmu_interval_notifier_remove()
1069 hlist_del(&interval_sub->deferred_item); in mmu_interval_notifier_remove()
1071 hlist_add_head(&interval_sub->deferred_item, in mmu_interval_notifier_remove()
1072 &subscriptions->deferred_list); in mmu_interval_notifier_remove()
1073 seq = subscriptions->invalidate_seq; in mmu_interval_notifier_remove()
1076 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); in mmu_interval_notifier_remove()
1077 interval_tree_remove(&interval_sub->interval_tree, in mmu_interval_notifier_remove()
1078 &subscriptions->itree); in mmu_interval_notifier_remove()
1080 spin_unlock(&subscriptions->lock); in mmu_interval_notifier_remove()
1089 wait_event(subscriptions->wq, in mmu_interval_notifier_remove()
1093 mmdrop(mm); in mmu_interval_notifier_remove()
1098 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed