xref: /openbmc/linux/mm/mmu_notifier.c (revision bf070bb0)
1 /*
2  *  linux/mm/mmu_notifier.c
3  *
4  *  Copyright (C) 2008  Qumranet, Inc.
5  *  Copyright (C) 2008  SGI
6  *             Christoph Lameter <cl@linux.com>
7  *
8  *  This work is licensed under the terms of the GNU GPL, version 2. See
9  *  the COPYING file in the top-level directory.
10  */
11 
12 #include <linux/rculist.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/err.h>
17 #include <linux/srcu.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/slab.h>
22 
23 /* global SRCU for all MMs */
24 DEFINE_STATIC_SRCU(srcu);
25 
26 /*
27  * This function allows mmu_notifier::release callback to delay a call to
28  * a function that will free appropriate resources. The function must be
29  * quick and must not block.
30  */
31 void mmu_notifier_call_srcu(struct rcu_head *rcu,
32 			    void (*func)(struct rcu_head *rcu))
33 {
34 	call_srcu(&srcu, rcu, func);
35 }
36 EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
37 
38 void mmu_notifier_synchronize(void)
39 {
40 	/* Wait for any running method to finish. */
41 	srcu_barrier(&srcu);
42 }
43 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
44 
45 /*
46  * This function can't run concurrently against mmu_notifier_register
47  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
48  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
49  * in parallel despite there being no task using this mm any more,
50  * through the vmas outside of the exit_mmap context, such as with
51  * vmtruncate. This serializes against mmu_notifier_unregister with
52  * the mmu_notifier_mm->lock in addition to SRCU and it serializes
53  * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
54  * can't go away from under us as exit_mmap holds an mm_count pin
55  * itself.
56  */
57 void __mmu_notifier_release(struct mm_struct *mm)
58 {
59 	struct mmu_notifier *mn;
60 	int id;
61 
62 	/*
63 	 * SRCU here will block mmu_notifier_unregister until
64 	 * ->release returns.
65 	 */
66 	id = srcu_read_lock(&srcu);
67 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
68 		/*
69 		 * If ->release runs before mmu_notifier_unregister it must be
70 		 * handled, as it's the only way for the driver to flush all
71 		 * existing sptes and stop the driver from establishing any more
72 		 * sptes before all the pages in the mm are freed.
73 		 */
74 		if (mn->ops->release)
75 			mn->ops->release(mn, mm);
76 
77 	spin_lock(&mm->mmu_notifier_mm->lock);
78 	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
79 		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
80 				 struct mmu_notifier,
81 				 hlist);
82 		/*
83 		 * We arrived before mmu_notifier_unregister so
84 		 * mmu_notifier_unregister will do nothing other than to wait
85 		 * for ->release to finish and for mmu_notifier_unregister to
86 		 * return.
87 		 */
88 		hlist_del_init_rcu(&mn->hlist);
89 	}
90 	spin_unlock(&mm->mmu_notifier_mm->lock);
91 	srcu_read_unlock(&srcu, id);
92 
93 	/*
94 	 * synchronize_srcu here prevents mmu_notifier_release from returning to
95 	 * exit_mmap (which would proceed with freeing all pages in the mm)
96 	 * until the ->release method returns, if it was invoked by
97 	 * mmu_notifier_unregister.
98 	 *
99 	 * The mmu_notifier_mm can't go away from under us because one mm_count
100 	 * is held by exit_mmap.
101 	 */
102 	synchronize_srcu(&srcu);
103 }
104 
105 /*
106  * If no young bitflag is supported by the hardware, ->clear_flush_young can
107  * unmap the address and return 1 or 0 depending if the mapping previously
108  * existed or not.
109  */
110 int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
111 					unsigned long start,
112 					unsigned long end)
113 {
114 	struct mmu_notifier *mn;
115 	int young = 0, id;
116 
117 	id = srcu_read_lock(&srcu);
118 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
119 		if (mn->ops->clear_flush_young)
120 			young |= mn->ops->clear_flush_young(mn, mm, start, end);
121 	}
122 	srcu_read_unlock(&srcu, id);
123 
124 	return young;
125 }
126 
127 int __mmu_notifier_clear_young(struct mm_struct *mm,
128 			       unsigned long start,
129 			       unsigned long end)
130 {
131 	struct mmu_notifier *mn;
132 	int young = 0, id;
133 
134 	id = srcu_read_lock(&srcu);
135 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
136 		if (mn->ops->clear_young)
137 			young |= mn->ops->clear_young(mn, mm, start, end);
138 	}
139 	srcu_read_unlock(&srcu, id);
140 
141 	return young;
142 }
143 
144 int __mmu_notifier_test_young(struct mm_struct *mm,
145 			      unsigned long address)
146 {
147 	struct mmu_notifier *mn;
148 	int young = 0, id;
149 
150 	id = srcu_read_lock(&srcu);
151 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
152 		if (mn->ops->test_young) {
153 			young = mn->ops->test_young(mn, mm, address);
154 			if (young)
155 				break;
156 		}
157 	}
158 	srcu_read_unlock(&srcu, id);
159 
160 	return young;
161 }
162 
163 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
164 			       pte_t pte)
165 {
166 	struct mmu_notifier *mn;
167 	int id;
168 
169 	id = srcu_read_lock(&srcu);
170 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
171 		if (mn->ops->change_pte)
172 			mn->ops->change_pte(mn, mm, address, pte);
173 	}
174 	srcu_read_unlock(&srcu, id);
175 }
176 
177 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
178 				  unsigned long start, unsigned long end)
179 {
180 	struct mmu_notifier *mn;
181 	int id;
182 
183 	id = srcu_read_lock(&srcu);
184 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
185 		if (mn->ops->invalidate_range_start)
186 			mn->ops->invalidate_range_start(mn, mm, start, end);
187 	}
188 	srcu_read_unlock(&srcu, id);
189 }
190 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
191 
192 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
193 					 unsigned long start,
194 					 unsigned long end,
195 					 bool only_end)
196 {
197 	struct mmu_notifier *mn;
198 	int id;
199 
200 	id = srcu_read_lock(&srcu);
201 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
202 		/*
203 		 * Call invalidate_range here too to avoid the need for the
204 		 * subsystem of having to register an invalidate_range_end
205 		 * call-back when there is invalidate_range already. Usually a
206 		 * subsystem registers either invalidate_range_start()/end() or
207 		 * invalidate_range(), so this will be no additional overhead
208 		 * (besides the pointer check).
209 		 *
210 		 * We skip call to invalidate_range() if we know it is safe ie
211 		 * call site use mmu_notifier_invalidate_range_only_end() which
212 		 * is safe to do when we know that a call to invalidate_range()
213 		 * already happen under page table lock.
214 		 */
215 		if (!only_end && mn->ops->invalidate_range)
216 			mn->ops->invalidate_range(mn, mm, start, end);
217 		if (mn->ops->invalidate_range_end)
218 			mn->ops->invalidate_range_end(mn, mm, start, end);
219 	}
220 	srcu_read_unlock(&srcu, id);
221 }
222 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
223 
224 void __mmu_notifier_invalidate_range(struct mm_struct *mm,
225 				  unsigned long start, unsigned long end)
226 {
227 	struct mmu_notifier *mn;
228 	int id;
229 
230 	id = srcu_read_lock(&srcu);
231 	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
232 		if (mn->ops->invalidate_range)
233 			mn->ops->invalidate_range(mn, mm, start, end);
234 	}
235 	srcu_read_unlock(&srcu, id);
236 }
237 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
238 
239 static int do_mmu_notifier_register(struct mmu_notifier *mn,
240 				    struct mm_struct *mm,
241 				    int take_mmap_sem)
242 {
243 	struct mmu_notifier_mm *mmu_notifier_mm;
244 	int ret;
245 
246 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
247 
248 	ret = -ENOMEM;
249 	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
250 	if (unlikely(!mmu_notifier_mm))
251 		goto out;
252 
253 	if (take_mmap_sem)
254 		down_write(&mm->mmap_sem);
255 	ret = mm_take_all_locks(mm);
256 	if (unlikely(ret))
257 		goto out_clean;
258 
259 	if (!mm_has_notifiers(mm)) {
260 		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
261 		spin_lock_init(&mmu_notifier_mm->lock);
262 
263 		mm->mmu_notifier_mm = mmu_notifier_mm;
264 		mmu_notifier_mm = NULL;
265 	}
266 	mmgrab(mm);
267 
268 	/*
269 	 * Serialize the update against mmu_notifier_unregister. A
270 	 * side note: mmu_notifier_release can't run concurrently with
271 	 * us because we hold the mm_users pin (either implicitly as
272 	 * current->mm or explicitly with get_task_mm() or similar).
273 	 * We can't race against any other mmu notifier method either
274 	 * thanks to mm_take_all_locks().
275 	 */
276 	spin_lock(&mm->mmu_notifier_mm->lock);
277 	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
278 	spin_unlock(&mm->mmu_notifier_mm->lock);
279 
280 	mm_drop_all_locks(mm);
281 out_clean:
282 	if (take_mmap_sem)
283 		up_write(&mm->mmap_sem);
284 	kfree(mmu_notifier_mm);
285 out:
286 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
287 	return ret;
288 }
289 
290 /*
291  * Must not hold mmap_sem nor any other VM related lock when calling
292  * this registration function. Must also ensure mm_users can't go down
293  * to zero while this runs to avoid races with mmu_notifier_release,
294  * so mm has to be current->mm or the mm should be pinned safely such
295  * as with get_task_mm(). If the mm is not current->mm, the mm_users
296  * pin should be released by calling mmput after mmu_notifier_register
297  * returns. mmu_notifier_unregister must be always called to
298  * unregister the notifier. mm_count is automatically pinned to allow
299  * mmu_notifier_unregister to safely run at any time later, before or
300  * after exit_mmap. ->release will always be called before exit_mmap
301  * frees the pages.
302  */
303 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
304 {
305 	return do_mmu_notifier_register(mn, mm, 1);
306 }
307 EXPORT_SYMBOL_GPL(mmu_notifier_register);
308 
309 /*
310  * Same as mmu_notifier_register but here the caller must hold the
311  * mmap_sem in write mode.
312  */
313 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
314 {
315 	return do_mmu_notifier_register(mn, mm, 0);
316 }
317 EXPORT_SYMBOL_GPL(__mmu_notifier_register);
318 
319 /* this is called after the last mmu_notifier_unregister() returned */
320 void __mmu_notifier_mm_destroy(struct mm_struct *mm)
321 {
322 	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
323 	kfree(mm->mmu_notifier_mm);
324 	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
325 }
326 
327 /*
328  * This releases the mm_count pin automatically and frees the mm
329  * structure if it was the last user of it. It serializes against
330  * running mmu notifiers with SRCU and against mmu_notifier_unregister
331  * with the unregister lock + SRCU. All sptes must be dropped before
332  * calling mmu_notifier_unregister. ->release or any other notifier
333  * method may be invoked concurrently with mmu_notifier_unregister,
334  * and only after mmu_notifier_unregister returned we're guaranteed
335  * that ->release or any other method can't run anymore.
336  */
337 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
338 {
339 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
340 
341 	if (!hlist_unhashed(&mn->hlist)) {
342 		/*
343 		 * SRCU here will force exit_mmap to wait for ->release to
344 		 * finish before freeing the pages.
345 		 */
346 		int id;
347 
348 		id = srcu_read_lock(&srcu);
349 		/*
350 		 * exit_mmap will block in mmu_notifier_release to guarantee
351 		 * that ->release is called before freeing the pages.
352 		 */
353 		if (mn->ops->release)
354 			mn->ops->release(mn, mm);
355 		srcu_read_unlock(&srcu, id);
356 
357 		spin_lock(&mm->mmu_notifier_mm->lock);
358 		/*
359 		 * Can not use list_del_rcu() since __mmu_notifier_release
360 		 * can delete it before we hold the lock.
361 		 */
362 		hlist_del_init_rcu(&mn->hlist);
363 		spin_unlock(&mm->mmu_notifier_mm->lock);
364 	}
365 
366 	/*
367 	 * Wait for any running method to finish, of course including
368 	 * ->release if it was run by mmu_notifier_release instead of us.
369 	 */
370 	synchronize_srcu(&srcu);
371 
372 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
373 
374 	mmdrop(mm);
375 }
376 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
377 
378 /*
379  * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
380  */
381 void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
382 					struct mm_struct *mm)
383 {
384 	spin_lock(&mm->mmu_notifier_mm->lock);
385 	/*
386 	 * Can not use list_del_rcu() since __mmu_notifier_release
387 	 * can delete it before we hold the lock.
388 	 */
389 	hlist_del_init_rcu(&mn->hlist);
390 	spin_unlock(&mm->mmu_notifier_mm->lock);
391 
392 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
393 	mmdrop(mm);
394 }
395 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
396