1 /* 2 * linux/mm/mmu_notifier.c 3 * 4 * Copyright (C) 2008 Qumranet, Inc. 5 * Copyright (C) 2008 SGI 6 * Christoph Lameter <cl@linux.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2. See 9 * the COPYING file in the top-level directory. 10 */ 11 12 #include <linux/rculist.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/export.h> 15 #include <linux/mm.h> 16 #include <linux/err.h> 17 #include <linux/srcu.h> 18 #include <linux/rcupdate.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/slab.h> 22 23 /* global SRCU for all MMs */ 24 static struct srcu_struct srcu; 25 26 /* 27 * This function allows mmu_notifier::release callback to delay a call to 28 * a function that will free appropriate resources. The function must be 29 * quick and must not block. 30 */ 31 void mmu_notifier_call_srcu(struct rcu_head *rcu, 32 void (*func)(struct rcu_head *rcu)) 33 { 34 call_srcu(&srcu, rcu, func); 35 } 36 EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); 37 38 void mmu_notifier_synchronize(void) 39 { 40 /* Wait for any running method to finish. */ 41 srcu_barrier(&srcu); 42 } 43 EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); 44 45 /* 46 * This function can't run concurrently against mmu_notifier_register 47 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 48 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 49 * in parallel despite there being no task using this mm any more, 50 * through the vmas outside of the exit_mmap context, such as with 51 * vmtruncate. This serializes against mmu_notifier_unregister with 52 * the mmu_notifier_mm->lock in addition to SRCU and it serializes 53 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm 54 * can't go away from under us as exit_mmap holds an mm_count pin 55 * itself. 56 */ 57 void __mmu_notifier_release(struct mm_struct *mm) 58 { 59 struct mmu_notifier *mn; 60 int id; 61 62 /* 63 * SRCU here will block mmu_notifier_unregister until 64 * ->release returns. 65 */ 66 id = srcu_read_lock(&srcu); 67 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) 68 /* 69 * If ->release runs before mmu_notifier_unregister it must be 70 * handled, as it's the only way for the driver to flush all 71 * existing sptes and stop the driver from establishing any more 72 * sptes before all the pages in the mm are freed. 73 */ 74 if (mn->ops->release) 75 mn->ops->release(mn, mm); 76 77 spin_lock(&mm->mmu_notifier_mm->lock); 78 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { 79 mn = hlist_entry(mm->mmu_notifier_mm->list.first, 80 struct mmu_notifier, 81 hlist); 82 /* 83 * We arrived before mmu_notifier_unregister so 84 * mmu_notifier_unregister will do nothing other than to wait 85 * for ->release to finish and for mmu_notifier_unregister to 86 * return. 87 */ 88 hlist_del_init_rcu(&mn->hlist); 89 } 90 spin_unlock(&mm->mmu_notifier_mm->lock); 91 srcu_read_unlock(&srcu, id); 92 93 /* 94 * synchronize_srcu here prevents mmu_notifier_release from returning to 95 * exit_mmap (which would proceed with freeing all pages in the mm) 96 * until the ->release method returns, if it was invoked by 97 * mmu_notifier_unregister. 98 * 99 * The mmu_notifier_mm can't go away from under us because one mm_count 100 * is held by exit_mmap. 101 */ 102 synchronize_srcu(&srcu); 103 } 104 105 /* 106 * If no young bitflag is supported by the hardware, ->clear_flush_young can 107 * unmap the address and return 1 or 0 depending if the mapping previously 108 * existed or not. 109 */ 110 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 111 unsigned long start, 112 unsigned long end) 113 { 114 struct mmu_notifier *mn; 115 int young = 0, id; 116 117 id = srcu_read_lock(&srcu); 118 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 119 if (mn->ops->clear_flush_young) 120 young |= mn->ops->clear_flush_young(mn, mm, start, end); 121 } 122 srcu_read_unlock(&srcu, id); 123 124 return young; 125 } 126 127 int __mmu_notifier_clear_young(struct mm_struct *mm, 128 unsigned long start, 129 unsigned long end) 130 { 131 struct mmu_notifier *mn; 132 int young = 0, id; 133 134 id = srcu_read_lock(&srcu); 135 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 136 if (mn->ops->clear_young) 137 young |= mn->ops->clear_young(mn, mm, start, end); 138 } 139 srcu_read_unlock(&srcu, id); 140 141 return young; 142 } 143 144 int __mmu_notifier_test_young(struct mm_struct *mm, 145 unsigned long address) 146 { 147 struct mmu_notifier *mn; 148 int young = 0, id; 149 150 id = srcu_read_lock(&srcu); 151 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 152 if (mn->ops->test_young) { 153 young = mn->ops->test_young(mn, mm, address); 154 if (young) 155 break; 156 } 157 } 158 srcu_read_unlock(&srcu, id); 159 160 return young; 161 } 162 163 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, 164 pte_t pte) 165 { 166 struct mmu_notifier *mn; 167 int id; 168 169 id = srcu_read_lock(&srcu); 170 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 171 if (mn->ops->change_pte) 172 mn->ops->change_pte(mn, mm, address, pte); 173 } 174 srcu_read_unlock(&srcu, id); 175 } 176 177 void __mmu_notifier_invalidate_page(struct mm_struct *mm, 178 unsigned long address) 179 { 180 struct mmu_notifier *mn; 181 int id; 182 183 id = srcu_read_lock(&srcu); 184 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 185 if (mn->ops->invalidate_page) 186 mn->ops->invalidate_page(mn, mm, address); 187 } 188 srcu_read_unlock(&srcu, id); 189 } 190 191 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, 192 unsigned long start, unsigned long end) 193 { 194 struct mmu_notifier *mn; 195 int id; 196 197 id = srcu_read_lock(&srcu); 198 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 199 if (mn->ops->invalidate_range_start) 200 mn->ops->invalidate_range_start(mn, mm, start, end); 201 } 202 srcu_read_unlock(&srcu, id); 203 } 204 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); 205 206 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 207 unsigned long start, unsigned long end) 208 { 209 struct mmu_notifier *mn; 210 int id; 211 212 id = srcu_read_lock(&srcu); 213 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 214 /* 215 * Call invalidate_range here too to avoid the need for the 216 * subsystem of having to register an invalidate_range_end 217 * call-back when there is invalidate_range already. Usually a 218 * subsystem registers either invalidate_range_start()/end() or 219 * invalidate_range(), so this will be no additional overhead 220 * (besides the pointer check). 221 */ 222 if (mn->ops->invalidate_range) 223 mn->ops->invalidate_range(mn, mm, start, end); 224 if (mn->ops->invalidate_range_end) 225 mn->ops->invalidate_range_end(mn, mm, start, end); 226 } 227 srcu_read_unlock(&srcu, id); 228 } 229 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); 230 231 void __mmu_notifier_invalidate_range(struct mm_struct *mm, 232 unsigned long start, unsigned long end) 233 { 234 struct mmu_notifier *mn; 235 int id; 236 237 id = srcu_read_lock(&srcu); 238 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 239 if (mn->ops->invalidate_range) 240 mn->ops->invalidate_range(mn, mm, start, end); 241 } 242 srcu_read_unlock(&srcu, id); 243 } 244 EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); 245 246 static int do_mmu_notifier_register(struct mmu_notifier *mn, 247 struct mm_struct *mm, 248 int take_mmap_sem) 249 { 250 struct mmu_notifier_mm *mmu_notifier_mm; 251 int ret; 252 253 BUG_ON(atomic_read(&mm->mm_users) <= 0); 254 255 /* 256 * Verify that mmu_notifier_init() already run and the global srcu is 257 * initialized. 258 */ 259 BUG_ON(!srcu.per_cpu_ref); 260 261 ret = -ENOMEM; 262 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); 263 if (unlikely(!mmu_notifier_mm)) 264 goto out; 265 266 if (take_mmap_sem) 267 down_write(&mm->mmap_sem); 268 ret = mm_take_all_locks(mm); 269 if (unlikely(ret)) 270 goto out_clean; 271 272 if (!mm_has_notifiers(mm)) { 273 INIT_HLIST_HEAD(&mmu_notifier_mm->list); 274 spin_lock_init(&mmu_notifier_mm->lock); 275 276 mm->mmu_notifier_mm = mmu_notifier_mm; 277 mmu_notifier_mm = NULL; 278 } 279 mmgrab(mm); 280 281 /* 282 * Serialize the update against mmu_notifier_unregister. A 283 * side note: mmu_notifier_release can't run concurrently with 284 * us because we hold the mm_users pin (either implicitly as 285 * current->mm or explicitly with get_task_mm() or similar). 286 * We can't race against any other mmu notifier method either 287 * thanks to mm_take_all_locks(). 288 */ 289 spin_lock(&mm->mmu_notifier_mm->lock); 290 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 291 spin_unlock(&mm->mmu_notifier_mm->lock); 292 293 mm_drop_all_locks(mm); 294 out_clean: 295 if (take_mmap_sem) 296 up_write(&mm->mmap_sem); 297 kfree(mmu_notifier_mm); 298 out: 299 BUG_ON(atomic_read(&mm->mm_users) <= 0); 300 return ret; 301 } 302 303 /* 304 * Must not hold mmap_sem nor any other VM related lock when calling 305 * this registration function. Must also ensure mm_users can't go down 306 * to zero while this runs to avoid races with mmu_notifier_release, 307 * so mm has to be current->mm or the mm should be pinned safely such 308 * as with get_task_mm(). If the mm is not current->mm, the mm_users 309 * pin should be released by calling mmput after mmu_notifier_register 310 * returns. mmu_notifier_unregister must be always called to 311 * unregister the notifier. mm_count is automatically pinned to allow 312 * mmu_notifier_unregister to safely run at any time later, before or 313 * after exit_mmap. ->release will always be called before exit_mmap 314 * frees the pages. 315 */ 316 int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 317 { 318 return do_mmu_notifier_register(mn, mm, 1); 319 } 320 EXPORT_SYMBOL_GPL(mmu_notifier_register); 321 322 /* 323 * Same as mmu_notifier_register but here the caller must hold the 324 * mmap_sem in write mode. 325 */ 326 int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) 327 { 328 return do_mmu_notifier_register(mn, mm, 0); 329 } 330 EXPORT_SYMBOL_GPL(__mmu_notifier_register); 331 332 /* this is called after the last mmu_notifier_unregister() returned */ 333 void __mmu_notifier_mm_destroy(struct mm_struct *mm) 334 { 335 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); 336 kfree(mm->mmu_notifier_mm); 337 mm->mmu_notifier_mm = LIST_POISON1; /* debug */ 338 } 339 340 /* 341 * This releases the mm_count pin automatically and frees the mm 342 * structure if it was the last user of it. It serializes against 343 * running mmu notifiers with SRCU and against mmu_notifier_unregister 344 * with the unregister lock + SRCU. All sptes must be dropped before 345 * calling mmu_notifier_unregister. ->release or any other notifier 346 * method may be invoked concurrently with mmu_notifier_unregister, 347 * and only after mmu_notifier_unregister returned we're guaranteed 348 * that ->release or any other method can't run anymore. 349 */ 350 void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) 351 { 352 BUG_ON(atomic_read(&mm->mm_count) <= 0); 353 354 if (!hlist_unhashed(&mn->hlist)) { 355 /* 356 * SRCU here will force exit_mmap to wait for ->release to 357 * finish before freeing the pages. 358 */ 359 int id; 360 361 id = srcu_read_lock(&srcu); 362 /* 363 * exit_mmap will block in mmu_notifier_release to guarantee 364 * that ->release is called before freeing the pages. 365 */ 366 if (mn->ops->release) 367 mn->ops->release(mn, mm); 368 srcu_read_unlock(&srcu, id); 369 370 spin_lock(&mm->mmu_notifier_mm->lock); 371 /* 372 * Can not use list_del_rcu() since __mmu_notifier_release 373 * can delete it before we hold the lock. 374 */ 375 hlist_del_init_rcu(&mn->hlist); 376 spin_unlock(&mm->mmu_notifier_mm->lock); 377 } 378 379 /* 380 * Wait for any running method to finish, of course including 381 * ->release if it was run by mmu_notifier_release instead of us. 382 */ 383 synchronize_srcu(&srcu); 384 385 BUG_ON(atomic_read(&mm->mm_count) <= 0); 386 387 mmdrop(mm); 388 } 389 EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 390 391 /* 392 * Same as mmu_notifier_unregister but no callback and no srcu synchronization. 393 */ 394 void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, 395 struct mm_struct *mm) 396 { 397 spin_lock(&mm->mmu_notifier_mm->lock); 398 /* 399 * Can not use list_del_rcu() since __mmu_notifier_release 400 * can delete it before we hold the lock. 401 */ 402 hlist_del_init_rcu(&mn->hlist); 403 spin_unlock(&mm->mmu_notifier_mm->lock); 404 405 BUG_ON(atomic_read(&mm->mm_count) <= 0); 406 mmdrop(mm); 407 } 408 EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); 409 410 static int __init mmu_notifier_init(void) 411 { 412 return init_srcu_struct(&srcu); 413 } 414 subsys_initcall(mmu_notifier_init); 415