1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
43
44 static LIST_HEAD(super_blocks);
45 static DEFINE_SPINLOCK(sb_lock);
46
47 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 "sb_writers",
49 "sb_pagefaults",
50 "sb_internal",
51 };
52
__super_lock(struct super_block * sb,bool excl)53 static inline void __super_lock(struct super_block *sb, bool excl)
54 {
55 if (excl)
56 down_write(&sb->s_umount);
57 else
58 down_read(&sb->s_umount);
59 }
60
super_unlock(struct super_block * sb,bool excl)61 static inline void super_unlock(struct super_block *sb, bool excl)
62 {
63 if (excl)
64 up_write(&sb->s_umount);
65 else
66 up_read(&sb->s_umount);
67 }
68
__super_lock_excl(struct super_block * sb)69 static inline void __super_lock_excl(struct super_block *sb)
70 {
71 __super_lock(sb, true);
72 }
73
super_unlock_excl(struct super_block * sb)74 static inline void super_unlock_excl(struct super_block *sb)
75 {
76 super_unlock(sb, true);
77 }
78
super_unlock_shared(struct super_block * sb)79 static inline void super_unlock_shared(struct super_block *sb)
80 {
81 super_unlock(sb, false);
82 }
83
wait_born(struct super_block * sb)84 static inline bool wait_born(struct super_block *sb)
85 {
86 unsigned int flags;
87
88 /*
89 * Pairs with smp_store_release() in super_wake() and ensures
90 * that we see SB_BORN or SB_DYING after we're woken.
91 */
92 flags = smp_load_acquire(&sb->s_flags);
93 return flags & (SB_BORN | SB_DYING);
94 }
95
96 /**
97 * super_lock - wait for superblock to become ready and lock it
98 * @sb: superblock to wait for
99 * @excl: whether exclusive access is required
100 *
101 * If the superblock has neither passed through vfs_get_tree() or
102 * generic_shutdown_super() yet wait for it to happen. Either superblock
103 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
104 * woken and we'll see SB_DYING.
105 *
106 * The caller must have acquired a temporary reference on @sb->s_count.
107 *
108 * Return: This returns true if SB_BORN was set, false if SB_DYING was
109 * set. The function acquires s_umount and returns with it held.
110 */
super_lock(struct super_block * sb,bool excl)111 static __must_check bool super_lock(struct super_block *sb, bool excl)
112 {
113
114 lockdep_assert_not_held(&sb->s_umount);
115
116 relock:
117 __super_lock(sb, excl);
118
119 /*
120 * Has gone through generic_shutdown_super() in the meantime.
121 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
122 * grab a reference to this. Tell them so.
123 */
124 if (sb->s_flags & SB_DYING)
125 return false;
126
127 /* Has called ->get_tree() successfully. */
128 if (sb->s_flags & SB_BORN)
129 return true;
130
131 super_unlock(sb, excl);
132
133 /* wait until the superblock is ready or dying */
134 wait_var_event(&sb->s_flags, wait_born(sb));
135
136 /*
137 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
138 * Just reacquire @sb->s_umount for the caller.
139 */
140 goto relock;
141 }
142
143 /* wait and acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)144 static inline bool super_lock_shared(struct super_block *sb)
145 {
146 return super_lock(sb, false);
147 }
148
149 /* wait and acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)150 static inline bool super_lock_excl(struct super_block *sb)
151 {
152 return super_lock(sb, true);
153 }
154
155 /* wake waiters */
156 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)157 static void super_wake(struct super_block *sb, unsigned int flag)
158 {
159 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
160 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
161
162 /*
163 * Pairs with smp_load_acquire() in super_lock() to make sure
164 * all initializations in the superblock are seen by the user
165 * seeing SB_BORN sent.
166 */
167 smp_store_release(&sb->s_flags, sb->s_flags | flag);
168 /*
169 * Pairs with the barrier in prepare_to_wait_event() to make sure
170 * ___wait_var_event() either sees SB_BORN set or
171 * waitqueue_active() check in wake_up_var() sees the waiter.
172 */
173 smp_mb();
174 wake_up_var(&sb->s_flags);
175 }
176
177 /*
178 * One thing we have to be careful of with a per-sb shrinker is that we don't
179 * drop the last active reference to the superblock from within the shrinker.
180 * If that happens we could trigger unregistering the shrinker from within the
181 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
182 * take a passive reference to the superblock to avoid this from occurring.
183 */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)184 static unsigned long super_cache_scan(struct shrinker *shrink,
185 struct shrink_control *sc)
186 {
187 struct super_block *sb;
188 long fs_objects = 0;
189 long total_objects;
190 long freed = 0;
191 long dentries;
192 long inodes;
193
194 sb = container_of(shrink, struct super_block, s_shrink);
195
196 /*
197 * Deadlock avoidance. We may hold various FS locks, and we don't want
198 * to recurse into the FS that called us in clear_inode() and friends..
199 */
200 if (!(sc->gfp_mask & __GFP_FS))
201 return SHRINK_STOP;
202
203 if (!super_trylock_shared(sb))
204 return SHRINK_STOP;
205
206 if (sb->s_op->nr_cached_objects)
207 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
208
209 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
210 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
211 total_objects = dentries + inodes + fs_objects + 1;
212 if (!total_objects)
213 total_objects = 1;
214
215 /* proportion the scan between the caches */
216 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
217 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
218 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
219
220 /*
221 * prune the dcache first as the icache is pinned by it, then
222 * prune the icache, followed by the filesystem specific caches
223 *
224 * Ensure that we always scan at least one object - memcg kmem
225 * accounting uses this to fully empty the caches.
226 */
227 sc->nr_to_scan = dentries + 1;
228 freed = prune_dcache_sb(sb, sc);
229 sc->nr_to_scan = inodes + 1;
230 freed += prune_icache_sb(sb, sc);
231
232 if (fs_objects) {
233 sc->nr_to_scan = fs_objects + 1;
234 freed += sb->s_op->free_cached_objects(sb, sc);
235 }
236
237 super_unlock_shared(sb);
238 return freed;
239 }
240
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)241 static unsigned long super_cache_count(struct shrinker *shrink,
242 struct shrink_control *sc)
243 {
244 struct super_block *sb;
245 long total_objects = 0;
246
247 sb = container_of(shrink, struct super_block, s_shrink);
248
249 /*
250 * We don't call super_trylock_shared() here as it is a scalability
251 * bottleneck, so we're exposed to partial setup state. The shrinker
252 * rwsem does not protect filesystem operations backing
253 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
254 * change between super_cache_count and super_cache_scan, so we really
255 * don't need locks here.
256 *
257 * However, if we are currently mounting the superblock, the underlying
258 * filesystem might be in a state of partial construction and hence it
259 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
260 * to avoid this situation, so do the same here. The memory barrier is
261 * matched with the one in mount_fs() as we don't hold locks here.
262 */
263 if (!(sb->s_flags & SB_BORN))
264 return 0;
265 smp_rmb();
266
267 if (sb->s_op && sb->s_op->nr_cached_objects)
268 total_objects = sb->s_op->nr_cached_objects(sb, sc);
269
270 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
271 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
272
273 if (!total_objects)
274 return SHRINK_EMPTY;
275
276 total_objects = vfs_pressure_ratio(total_objects);
277 return total_objects;
278 }
279
destroy_super_work(struct work_struct * work)280 static void destroy_super_work(struct work_struct *work)
281 {
282 struct super_block *s = container_of(work, struct super_block,
283 destroy_work);
284 int i;
285
286 for (i = 0; i < SB_FREEZE_LEVELS; i++)
287 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
288 kfree(s);
289 }
290
destroy_super_rcu(struct rcu_head * head)291 static void destroy_super_rcu(struct rcu_head *head)
292 {
293 struct super_block *s = container_of(head, struct super_block, rcu);
294 INIT_WORK(&s->destroy_work, destroy_super_work);
295 schedule_work(&s->destroy_work);
296 }
297
298 /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)299 static void destroy_unused_super(struct super_block *s)
300 {
301 if (!s)
302 return;
303 super_unlock_excl(s);
304 list_lru_destroy(&s->s_dentry_lru);
305 list_lru_destroy(&s->s_inode_lru);
306 security_sb_free(s);
307 put_user_ns(s->s_user_ns);
308 kfree(s->s_subtype);
309 free_prealloced_shrinker(&s->s_shrink);
310 /* no delays needed */
311 destroy_super_work(&s->destroy_work);
312 }
313
314 /**
315 * alloc_super - create new superblock
316 * @type: filesystem type superblock should belong to
317 * @flags: the mount flags
318 * @user_ns: User namespace for the super_block
319 *
320 * Allocates and initializes a new &struct super_block. alloc_super()
321 * returns a pointer new superblock or %NULL if allocation had failed.
322 */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)323 static struct super_block *alloc_super(struct file_system_type *type, int flags,
324 struct user_namespace *user_ns)
325 {
326 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
327 static const struct super_operations default_op;
328 int i;
329
330 if (!s)
331 return NULL;
332
333 INIT_LIST_HEAD(&s->s_mounts);
334 s->s_user_ns = get_user_ns(user_ns);
335 init_rwsem(&s->s_umount);
336 lockdep_set_class(&s->s_umount, &type->s_umount_key);
337 /*
338 * sget() can have s_umount recursion.
339 *
340 * When it cannot find a suitable sb, it allocates a new
341 * one (this one), and tries again to find a suitable old
342 * one.
343 *
344 * In case that succeeds, it will acquire the s_umount
345 * lock of the old one. Since these are clearly distrinct
346 * locks, and this object isn't exposed yet, there's no
347 * risk of deadlocks.
348 *
349 * Annotate this by putting this lock in a different
350 * subclass.
351 */
352 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
353
354 if (security_sb_alloc(s))
355 goto fail;
356
357 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
358 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
359 sb_writers_name[i],
360 &type->s_writers_key[i]))
361 goto fail;
362 }
363 s->s_bdi = &noop_backing_dev_info;
364 s->s_flags = flags;
365 if (s->s_user_ns != &init_user_ns)
366 s->s_iflags |= SB_I_NODEV;
367 INIT_HLIST_NODE(&s->s_instances);
368 INIT_HLIST_BL_HEAD(&s->s_roots);
369 mutex_init(&s->s_sync_lock);
370 INIT_LIST_HEAD(&s->s_inodes);
371 spin_lock_init(&s->s_inode_list_lock);
372 INIT_LIST_HEAD(&s->s_inodes_wb);
373 spin_lock_init(&s->s_inode_wblist_lock);
374
375 s->s_count = 1;
376 atomic_set(&s->s_active, 1);
377 mutex_init(&s->s_vfs_rename_mutex);
378 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
379 init_rwsem(&s->s_dquot.dqio_sem);
380 s->s_maxbytes = MAX_NON_LFS;
381 s->s_op = &default_op;
382 s->s_time_gran = 1000000000;
383 s->s_time_min = TIME64_MIN;
384 s->s_time_max = TIME64_MAX;
385
386 s->s_shrink.seeks = DEFAULT_SEEKS;
387 s->s_shrink.scan_objects = super_cache_scan;
388 s->s_shrink.count_objects = super_cache_count;
389 s->s_shrink.batch = 1024;
390 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
391 if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
392 goto fail;
393 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
394 goto fail;
395 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
396 goto fail;
397 return s;
398
399 fail:
400 destroy_unused_super(s);
401 return NULL;
402 }
403
404 /* Superblock refcounting */
405
406 /*
407 * Drop a superblock's refcount. The caller must hold sb_lock.
408 */
__put_super(struct super_block * s)409 static void __put_super(struct super_block *s)
410 {
411 if (!--s->s_count) {
412 list_del_init(&s->s_list);
413 WARN_ON(s->s_dentry_lru.node);
414 WARN_ON(s->s_inode_lru.node);
415 WARN_ON(!list_empty(&s->s_mounts));
416 security_sb_free(s);
417 put_user_ns(s->s_user_ns);
418 kfree(s->s_subtype);
419 call_rcu(&s->rcu, destroy_super_rcu);
420 }
421 }
422
423 /**
424 * put_super - drop a temporary reference to superblock
425 * @sb: superblock in question
426 *
427 * Drops a temporary reference, frees superblock if there's no
428 * references left.
429 */
put_super(struct super_block * sb)430 void put_super(struct super_block *sb)
431 {
432 spin_lock(&sb_lock);
433 __put_super(sb);
434 spin_unlock(&sb_lock);
435 }
436
kill_super_notify(struct super_block * sb)437 static void kill_super_notify(struct super_block *sb)
438 {
439 lockdep_assert_not_held(&sb->s_umount);
440
441 /* already notified earlier */
442 if (sb->s_flags & SB_DEAD)
443 return;
444
445 /*
446 * Remove it from @fs_supers so it isn't found by new
447 * sget{_fc}() walkers anymore. Any concurrent mounter still
448 * managing to grab a temporary reference is guaranteed to
449 * already see SB_DYING and will wait until we notify them about
450 * SB_DEAD.
451 */
452 spin_lock(&sb_lock);
453 hlist_del_init(&sb->s_instances);
454 spin_unlock(&sb_lock);
455
456 /*
457 * Let concurrent mounts know that this thing is really dead.
458 * We don't need @sb->s_umount here as every concurrent caller
459 * will see SB_DYING and either discard the superblock or wait
460 * for SB_DEAD.
461 */
462 super_wake(sb, SB_DEAD);
463 }
464
465 /**
466 * deactivate_locked_super - drop an active reference to superblock
467 * @s: superblock to deactivate
468 *
469 * Drops an active reference to superblock, converting it into a temporary
470 * one if there is no other active references left. In that case we
471 * tell fs driver to shut it down and drop the temporary reference we
472 * had just acquired.
473 *
474 * Caller holds exclusive lock on superblock; that lock is released.
475 */
deactivate_locked_super(struct super_block * s)476 void deactivate_locked_super(struct super_block *s)
477 {
478 struct file_system_type *fs = s->s_type;
479 if (atomic_dec_and_test(&s->s_active)) {
480 unregister_shrinker(&s->s_shrink);
481 fs->kill_sb(s);
482
483 kill_super_notify(s);
484
485 /*
486 * Since list_lru_destroy() may sleep, we cannot call it from
487 * put_super(), where we hold the sb_lock. Therefore we destroy
488 * the lru lists right now.
489 */
490 list_lru_destroy(&s->s_dentry_lru);
491 list_lru_destroy(&s->s_inode_lru);
492
493 put_filesystem(fs);
494 put_super(s);
495 } else {
496 super_unlock_excl(s);
497 }
498 }
499
500 EXPORT_SYMBOL(deactivate_locked_super);
501
502 /**
503 * deactivate_super - drop an active reference to superblock
504 * @s: superblock to deactivate
505 *
506 * Variant of deactivate_locked_super(), except that superblock is *not*
507 * locked by caller. If we are going to drop the final active reference,
508 * lock will be acquired prior to that.
509 */
deactivate_super(struct super_block * s)510 void deactivate_super(struct super_block *s)
511 {
512 if (!atomic_add_unless(&s->s_active, -1, 1)) {
513 __super_lock_excl(s);
514 deactivate_locked_super(s);
515 }
516 }
517
518 EXPORT_SYMBOL(deactivate_super);
519
520 /**
521 * grab_super - acquire an active reference
522 * @s: reference we are trying to make active
523 *
524 * Tries to acquire an active reference. grab_super() is used when we
525 * had just found a superblock in super_blocks or fs_type->fs_supers
526 * and want to turn it into a full-blown active reference. grab_super()
527 * is called with sb_lock held and drops it. Returns 1 in case of
528 * success, 0 if we had failed (superblock contents was already dead or
529 * dying when grab_super() had been called). Note that this is only
530 * called for superblocks not in rundown mode (== ones still on ->fs_supers
531 * of their type), so increment of ->s_count is OK here.
532 */
grab_super(struct super_block * s)533 static int grab_super(struct super_block *s) __releases(sb_lock)
534 {
535 bool born;
536
537 s->s_count++;
538 spin_unlock(&sb_lock);
539 born = super_lock_excl(s);
540 if (born && atomic_inc_not_zero(&s->s_active)) {
541 put_super(s);
542 return 1;
543 }
544 super_unlock_excl(s);
545 put_super(s);
546 return 0;
547 }
548
wait_dead(struct super_block * sb)549 static inline bool wait_dead(struct super_block *sb)
550 {
551 unsigned int flags;
552
553 /*
554 * Pairs with memory barrier in super_wake() and ensures
555 * that we see SB_DEAD after we're woken.
556 */
557 flags = smp_load_acquire(&sb->s_flags);
558 return flags & SB_DEAD;
559 }
560
561 /**
562 * grab_super_dead - acquire an active reference to a superblock
563 * @sb: superblock to acquire
564 *
565 * Acquire a temporary reference on a superblock and try to trade it for
566 * an active reference. This is used in sget{_fc}() to wait for a
567 * superblock to either become SB_BORN or for it to pass through
568 * sb->kill() and be marked as SB_DEAD.
569 *
570 * Return: This returns true if an active reference could be acquired,
571 * false if not.
572 */
grab_super_dead(struct super_block * sb)573 static bool grab_super_dead(struct super_block *sb)
574 {
575
576 sb->s_count++;
577 if (grab_super(sb)) {
578 put_super(sb);
579 lockdep_assert_held(&sb->s_umount);
580 return true;
581 }
582 wait_var_event(&sb->s_flags, wait_dead(sb));
583 lockdep_assert_not_held(&sb->s_umount);
584 put_super(sb);
585 return false;
586 }
587
588 /*
589 * super_trylock_shared - try to grab ->s_umount shared
590 * @sb: reference we are trying to grab
591 *
592 * Try to prevent fs shutdown. This is used in places where we
593 * cannot take an active reference but we need to ensure that the
594 * filesystem is not shut down while we are working on it. It returns
595 * false if we cannot acquire s_umount or if we lose the race and
596 * filesystem already got into shutdown, and returns true with the s_umount
597 * lock held in read mode in case of success. On successful return,
598 * the caller must drop the s_umount lock when done.
599 *
600 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
601 * The reason why it's safe is that we are OK with doing trylock instead
602 * of down_read(). There's a couple of places that are OK with that, but
603 * it's very much not a general-purpose interface.
604 */
super_trylock_shared(struct super_block * sb)605 bool super_trylock_shared(struct super_block *sb)
606 {
607 if (down_read_trylock(&sb->s_umount)) {
608 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
609 (sb->s_flags & SB_BORN))
610 return true;
611 super_unlock_shared(sb);
612 }
613
614 return false;
615 }
616
617 /**
618 * retire_super - prevents superblock from being reused
619 * @sb: superblock to retire
620 *
621 * The function marks superblock to be ignored in superblock test, which
622 * prevents it from being reused for any new mounts. If the superblock has
623 * a private bdi, it also unregisters it, but doesn't reduce the refcount
624 * of the superblock to prevent potential races. The refcount is reduced
625 * by generic_shutdown_super(). The function can not be called
626 * concurrently with generic_shutdown_super(). It is safe to call the
627 * function multiple times, subsequent calls have no effect.
628 *
629 * The marker will affect the re-use only for block-device-based
630 * superblocks. Other superblocks will still get marked if this function
631 * is used, but that will not affect their reusability.
632 */
retire_super(struct super_block * sb)633 void retire_super(struct super_block *sb)
634 {
635 WARN_ON(!sb->s_bdev);
636 __super_lock_excl(sb);
637 if (sb->s_iflags & SB_I_PERSB_BDI) {
638 bdi_unregister(sb->s_bdi);
639 sb->s_iflags &= ~SB_I_PERSB_BDI;
640 }
641 sb->s_iflags |= SB_I_RETIRED;
642 super_unlock_excl(sb);
643 }
644 EXPORT_SYMBOL(retire_super);
645
646 /**
647 * generic_shutdown_super - common helper for ->kill_sb()
648 * @sb: superblock to kill
649 *
650 * generic_shutdown_super() does all fs-independent work on superblock
651 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
652 * that need destruction out of superblock, call generic_shutdown_super()
653 * and release aforementioned objects. Note: dentries and inodes _are_
654 * taken care of and do not need specific handling.
655 *
656 * Upon calling this function, the filesystem may no longer alter or
657 * rearrange the set of dentries belonging to this super_block, nor may it
658 * change the attachments of dentries to inodes.
659 */
generic_shutdown_super(struct super_block * sb)660 void generic_shutdown_super(struct super_block *sb)
661 {
662 const struct super_operations *sop = sb->s_op;
663
664 if (sb->s_root) {
665 shrink_dcache_for_umount(sb);
666 sync_filesystem(sb);
667 sb->s_flags &= ~SB_ACTIVE;
668
669 cgroup_writeback_umount();
670
671 /* Evict all inodes with zero refcount. */
672 evict_inodes(sb);
673
674 /*
675 * Clean up and evict any inodes that still have references due
676 * to fsnotify or the security policy.
677 */
678 fsnotify_sb_delete(sb);
679 security_sb_delete(sb);
680
681 /*
682 * Now that all potentially-encrypted inodes have been evicted,
683 * the fscrypt keyring can be destroyed.
684 */
685 fscrypt_destroy_keyring(sb);
686
687 if (sb->s_dio_done_wq) {
688 destroy_workqueue(sb->s_dio_done_wq);
689 sb->s_dio_done_wq = NULL;
690 }
691
692 if (sop->put_super)
693 sop->put_super(sb);
694
695 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
696 "VFS: Busy inodes after unmount of %s (%s)",
697 sb->s_id, sb->s_type->name)) {
698 /*
699 * Adding a proper bailout path here would be hard, but
700 * we can at least make it more likely that a later
701 * iput_final() or such crashes cleanly.
702 */
703 struct inode *inode;
704
705 spin_lock(&sb->s_inode_list_lock);
706 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
707 inode->i_op = VFS_PTR_POISON;
708 inode->i_sb = VFS_PTR_POISON;
709 inode->i_mapping = VFS_PTR_POISON;
710 }
711 spin_unlock(&sb->s_inode_list_lock);
712 }
713 }
714 /*
715 * Broadcast to everyone that grabbed a temporary reference to this
716 * superblock before we removed it from @fs_supers that the superblock
717 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
718 * discard this superblock and treat it as dead.
719 *
720 * We leave the superblock on @fs_supers so it can be found by
721 * sget{_fc}() until we passed sb->kill_sb().
722 */
723 super_wake(sb, SB_DYING);
724 super_unlock_excl(sb);
725 if (sb->s_bdi != &noop_backing_dev_info) {
726 if (sb->s_iflags & SB_I_PERSB_BDI)
727 bdi_unregister(sb->s_bdi);
728 bdi_put(sb->s_bdi);
729 sb->s_bdi = &noop_backing_dev_info;
730 }
731 }
732
733 EXPORT_SYMBOL(generic_shutdown_super);
734
mount_capable(struct fs_context * fc)735 bool mount_capable(struct fs_context *fc)
736 {
737 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
738 return capable(CAP_SYS_ADMIN);
739 else
740 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
741 }
742
743 /**
744 * sget_fc - Find or create a superblock
745 * @fc: Filesystem context.
746 * @test: Comparison callback
747 * @set: Setup callback
748 *
749 * Create a new superblock or find an existing one.
750 *
751 * The @test callback is used to find a matching existing superblock.
752 * Whether or not the requested parameters in @fc are taken into account
753 * is specific to the @test callback that is used. They may even be
754 * completely ignored.
755 *
756 * If an extant superblock is matched, it will be returned unless:
757 *
758 * (1) the namespace the filesystem context @fc and the extant
759 * superblock's namespace differ
760 *
761 * (2) the filesystem context @fc has requested that reusing an extant
762 * superblock is not allowed
763 *
764 * In both cases EBUSY will be returned.
765 *
766 * If no match is made, a new superblock will be allocated and basic
767 * initialisation will be performed (s_type, s_fs_info and s_id will be
768 * set and the @set callback will be invoked), the superblock will be
769 * published and it will be returned in a partially constructed state
770 * with SB_BORN and SB_ACTIVE as yet unset.
771 *
772 * Return: On success, an extant or newly created superblock is
773 * returned. On failure an error pointer is returned.
774 */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))775 struct super_block *sget_fc(struct fs_context *fc,
776 int (*test)(struct super_block *, struct fs_context *),
777 int (*set)(struct super_block *, struct fs_context *))
778 {
779 struct super_block *s = NULL;
780 struct super_block *old;
781 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
782 int err;
783
784 /*
785 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
786 * not set, as the filesystem is likely unprepared to handle it.
787 * This can happen when fsconfig() is called from init_user_ns with
788 * an fs_fd opened in another user namespace.
789 */
790 if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
791 errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
792 return ERR_PTR(-EPERM);
793 }
794
795 retry:
796 spin_lock(&sb_lock);
797 if (test) {
798 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
799 if (test(old, fc))
800 goto share_extant_sb;
801 }
802 }
803 if (!s) {
804 spin_unlock(&sb_lock);
805 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
806 if (!s)
807 return ERR_PTR(-ENOMEM);
808 goto retry;
809 }
810
811 s->s_fs_info = fc->s_fs_info;
812 err = set(s, fc);
813 if (err) {
814 s->s_fs_info = NULL;
815 spin_unlock(&sb_lock);
816 destroy_unused_super(s);
817 return ERR_PTR(err);
818 }
819 fc->s_fs_info = NULL;
820 s->s_type = fc->fs_type;
821 s->s_iflags |= fc->s_iflags;
822 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
823 /*
824 * Make the superblock visible on @super_blocks and @fs_supers.
825 * It's in a nascent state and users should wait on SB_BORN or
826 * SB_DYING to be set.
827 */
828 list_add_tail(&s->s_list, &super_blocks);
829 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
830 spin_unlock(&sb_lock);
831 get_filesystem(s->s_type);
832 register_shrinker_prepared(&s->s_shrink);
833 return s;
834
835 share_extant_sb:
836 if (user_ns != old->s_user_ns || fc->exclusive) {
837 spin_unlock(&sb_lock);
838 destroy_unused_super(s);
839 if (fc->exclusive)
840 warnfc(fc, "reusing existing filesystem not allowed");
841 else
842 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
843 return ERR_PTR(-EBUSY);
844 }
845 if (!grab_super_dead(old))
846 goto retry;
847 destroy_unused_super(s);
848 return old;
849 }
850 EXPORT_SYMBOL(sget_fc);
851
852 /**
853 * sget - find or create a superblock
854 * @type: filesystem type superblock should belong to
855 * @test: comparison callback
856 * @set: setup callback
857 * @flags: mount flags
858 * @data: argument to each of them
859 */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)860 struct super_block *sget(struct file_system_type *type,
861 int (*test)(struct super_block *,void *),
862 int (*set)(struct super_block *,void *),
863 int flags,
864 void *data)
865 {
866 struct user_namespace *user_ns = current_user_ns();
867 struct super_block *s = NULL;
868 struct super_block *old;
869 int err;
870
871 /* We don't yet pass the user namespace of the parent
872 * mount through to here so always use &init_user_ns
873 * until that changes.
874 */
875 if (flags & SB_SUBMOUNT)
876 user_ns = &init_user_ns;
877
878 retry:
879 spin_lock(&sb_lock);
880 if (test) {
881 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
882 if (!test(old, data))
883 continue;
884 if (user_ns != old->s_user_ns) {
885 spin_unlock(&sb_lock);
886 destroy_unused_super(s);
887 return ERR_PTR(-EBUSY);
888 }
889 if (!grab_super_dead(old))
890 goto retry;
891 destroy_unused_super(s);
892 return old;
893 }
894 }
895 if (!s) {
896 spin_unlock(&sb_lock);
897 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
898 if (!s)
899 return ERR_PTR(-ENOMEM);
900 goto retry;
901 }
902
903 err = set(s, data);
904 if (err) {
905 spin_unlock(&sb_lock);
906 destroy_unused_super(s);
907 return ERR_PTR(err);
908 }
909 s->s_type = type;
910 strscpy(s->s_id, type->name, sizeof(s->s_id));
911 list_add_tail(&s->s_list, &super_blocks);
912 hlist_add_head(&s->s_instances, &type->fs_supers);
913 spin_unlock(&sb_lock);
914 get_filesystem(type);
915 register_shrinker_prepared(&s->s_shrink);
916 return s;
917 }
918 EXPORT_SYMBOL(sget);
919
drop_super(struct super_block * sb)920 void drop_super(struct super_block *sb)
921 {
922 super_unlock_shared(sb);
923 put_super(sb);
924 }
925
926 EXPORT_SYMBOL(drop_super);
927
drop_super_exclusive(struct super_block * sb)928 void drop_super_exclusive(struct super_block *sb)
929 {
930 super_unlock_excl(sb);
931 put_super(sb);
932 }
933 EXPORT_SYMBOL(drop_super_exclusive);
934
__iterate_supers(void (* f)(struct super_block *))935 static void __iterate_supers(void (*f)(struct super_block *))
936 {
937 struct super_block *sb, *p = NULL;
938
939 spin_lock(&sb_lock);
940 list_for_each_entry(sb, &super_blocks, s_list) {
941 /* Pairs with memory marrier in super_wake(). */
942 if (smp_load_acquire(&sb->s_flags) & SB_DYING)
943 continue;
944 sb->s_count++;
945 spin_unlock(&sb_lock);
946
947 f(sb);
948
949 spin_lock(&sb_lock);
950 if (p)
951 __put_super(p);
952 p = sb;
953 }
954 if (p)
955 __put_super(p);
956 spin_unlock(&sb_lock);
957 }
958 /**
959 * iterate_supers - call function for all active superblocks
960 * @f: function to call
961 * @arg: argument to pass to it
962 *
963 * Scans the superblock list and calls given function, passing it
964 * locked superblock and given argument.
965 */
iterate_supers(void (* f)(struct super_block *,void *),void * arg)966 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
967 {
968 struct super_block *sb, *p = NULL;
969
970 spin_lock(&sb_lock);
971 list_for_each_entry(sb, &super_blocks, s_list) {
972 bool born;
973
974 sb->s_count++;
975 spin_unlock(&sb_lock);
976
977 born = super_lock_shared(sb);
978 if (born && sb->s_root)
979 f(sb, arg);
980 super_unlock_shared(sb);
981
982 spin_lock(&sb_lock);
983 if (p)
984 __put_super(p);
985 p = sb;
986 }
987 if (p)
988 __put_super(p);
989 spin_unlock(&sb_lock);
990 }
991
992 /**
993 * iterate_supers_type - call function for superblocks of given type
994 * @type: fs type
995 * @f: function to call
996 * @arg: argument to pass to it
997 *
998 * Scans the superblock list and calls given function, passing it
999 * locked superblock and given argument.
1000 */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)1001 void iterate_supers_type(struct file_system_type *type,
1002 void (*f)(struct super_block *, void *), void *arg)
1003 {
1004 struct super_block *sb, *p = NULL;
1005
1006 spin_lock(&sb_lock);
1007 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
1008 bool born;
1009
1010 sb->s_count++;
1011 spin_unlock(&sb_lock);
1012
1013 born = super_lock_shared(sb);
1014 if (born && sb->s_root)
1015 f(sb, arg);
1016 super_unlock_shared(sb);
1017
1018 spin_lock(&sb_lock);
1019 if (p)
1020 __put_super(p);
1021 p = sb;
1022 }
1023 if (p)
1024 __put_super(p);
1025 spin_unlock(&sb_lock);
1026 }
1027
1028 EXPORT_SYMBOL(iterate_supers_type);
1029
1030 /**
1031 * get_active_super - get an active reference to the superblock of a device
1032 * @bdev: device to get the superblock for
1033 *
1034 * Scans the superblock list and finds the superblock of the file system
1035 * mounted on the device given. Returns the superblock with an active
1036 * reference or %NULL if none was found.
1037 */
get_active_super(struct block_device * bdev)1038 struct super_block *get_active_super(struct block_device *bdev)
1039 {
1040 struct super_block *sb;
1041
1042 if (!bdev)
1043 return NULL;
1044
1045 spin_lock(&sb_lock);
1046 list_for_each_entry(sb, &super_blocks, s_list) {
1047 if (sb->s_bdev == bdev) {
1048 if (!grab_super(sb))
1049 return NULL;
1050 super_unlock_excl(sb);
1051 return sb;
1052 }
1053 }
1054 spin_unlock(&sb_lock);
1055 return NULL;
1056 }
1057
user_get_super(dev_t dev,bool excl)1058 struct super_block *user_get_super(dev_t dev, bool excl)
1059 {
1060 struct super_block *sb;
1061
1062 spin_lock(&sb_lock);
1063 list_for_each_entry(sb, &super_blocks, s_list) {
1064 if (sb->s_dev == dev) {
1065 bool born;
1066
1067 sb->s_count++;
1068 spin_unlock(&sb_lock);
1069 /* still alive? */
1070 born = super_lock(sb, excl);
1071 if (born && sb->s_root)
1072 return sb;
1073 super_unlock(sb, excl);
1074 /* nope, got unmounted */
1075 spin_lock(&sb_lock);
1076 __put_super(sb);
1077 break;
1078 }
1079 }
1080 spin_unlock(&sb_lock);
1081 return NULL;
1082 }
1083
1084 /**
1085 * reconfigure_super - asks filesystem to change superblock parameters
1086 * @fc: The superblock and configuration
1087 *
1088 * Alters the configuration parameters of a live superblock.
1089 */
reconfigure_super(struct fs_context * fc)1090 int reconfigure_super(struct fs_context *fc)
1091 {
1092 struct super_block *sb = fc->root->d_sb;
1093 int retval;
1094 bool remount_ro = false;
1095 bool remount_rw = false;
1096 bool force = fc->sb_flags & SB_FORCE;
1097
1098 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1099 return -EINVAL;
1100 if (sb->s_writers.frozen != SB_UNFROZEN)
1101 return -EBUSY;
1102
1103 retval = security_sb_remount(sb, fc->security);
1104 if (retval)
1105 return retval;
1106
1107 if (fc->sb_flags_mask & SB_RDONLY) {
1108 #ifdef CONFIG_BLOCK
1109 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1110 bdev_read_only(sb->s_bdev))
1111 return -EACCES;
1112 #endif
1113 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1114 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1115 }
1116
1117 if (remount_ro) {
1118 if (!hlist_empty(&sb->s_pins)) {
1119 super_unlock_excl(sb);
1120 group_pin_kill(&sb->s_pins);
1121 __super_lock_excl(sb);
1122 if (!sb->s_root)
1123 return 0;
1124 if (sb->s_writers.frozen != SB_UNFROZEN)
1125 return -EBUSY;
1126 remount_ro = !sb_rdonly(sb);
1127 }
1128 }
1129 shrink_dcache_sb(sb);
1130
1131 /* If we are reconfiguring to RDONLY and current sb is read/write,
1132 * make sure there are no files open for writing.
1133 */
1134 if (remount_ro) {
1135 if (force) {
1136 sb_start_ro_state_change(sb);
1137 } else {
1138 retval = sb_prepare_remount_readonly(sb);
1139 if (retval)
1140 return retval;
1141 }
1142 } else if (remount_rw) {
1143 /*
1144 * Protect filesystem's reconfigure code from writes from
1145 * userspace until reconfigure finishes.
1146 */
1147 sb_start_ro_state_change(sb);
1148 }
1149
1150 if (fc->ops->reconfigure) {
1151 retval = fc->ops->reconfigure(fc);
1152 if (retval) {
1153 if (!force)
1154 goto cancel_readonly;
1155 /* If forced remount, go ahead despite any errors */
1156 WARN(1, "forced remount of a %s fs returned %i\n",
1157 sb->s_type->name, retval);
1158 }
1159 }
1160
1161 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1162 (fc->sb_flags & fc->sb_flags_mask)));
1163 sb_end_ro_state_change(sb);
1164
1165 /*
1166 * Some filesystems modify their metadata via some other path than the
1167 * bdev buffer cache (eg. use a private mapping, or directories in
1168 * pagecache, etc). Also file data modifications go via their own
1169 * mappings. So If we try to mount readonly then copy the filesystem
1170 * from bdev, we could get stale data, so invalidate it to give a best
1171 * effort at coherency.
1172 */
1173 if (remount_ro && sb->s_bdev)
1174 invalidate_bdev(sb->s_bdev);
1175 return 0;
1176
1177 cancel_readonly:
1178 sb_end_ro_state_change(sb);
1179 return retval;
1180 }
1181
do_emergency_remount_callback(struct super_block * sb)1182 static void do_emergency_remount_callback(struct super_block *sb)
1183 {
1184 bool born = super_lock_excl(sb);
1185
1186 if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1187 struct fs_context *fc;
1188
1189 fc = fs_context_for_reconfigure(sb->s_root,
1190 SB_RDONLY | SB_FORCE, SB_RDONLY);
1191 if (!IS_ERR(fc)) {
1192 if (parse_monolithic_mount_data(fc, NULL) == 0)
1193 (void)reconfigure_super(fc);
1194 put_fs_context(fc);
1195 }
1196 }
1197 super_unlock_excl(sb);
1198 }
1199
do_emergency_remount(struct work_struct * work)1200 static void do_emergency_remount(struct work_struct *work)
1201 {
1202 __iterate_supers(do_emergency_remount_callback);
1203 kfree(work);
1204 printk("Emergency Remount complete\n");
1205 }
1206
emergency_remount(void)1207 void emergency_remount(void)
1208 {
1209 struct work_struct *work;
1210
1211 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1212 if (work) {
1213 INIT_WORK(work, do_emergency_remount);
1214 schedule_work(work);
1215 }
1216 }
1217
do_thaw_all_callback(struct super_block * sb)1218 static void do_thaw_all_callback(struct super_block *sb)
1219 {
1220 bool born = super_lock_excl(sb);
1221
1222 if (born && sb->s_root) {
1223 if (IS_ENABLED(CONFIG_BLOCK))
1224 while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
1225 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1226 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1227 } else {
1228 super_unlock_excl(sb);
1229 }
1230 }
1231
do_thaw_all(struct work_struct * work)1232 static void do_thaw_all(struct work_struct *work)
1233 {
1234 __iterate_supers(do_thaw_all_callback);
1235 kfree(work);
1236 printk(KERN_WARNING "Emergency Thaw complete\n");
1237 }
1238
1239 /**
1240 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1241 *
1242 * Used for emergency unfreeze of all filesystems via SysRq
1243 */
emergency_thaw_all(void)1244 void emergency_thaw_all(void)
1245 {
1246 struct work_struct *work;
1247
1248 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1249 if (work) {
1250 INIT_WORK(work, do_thaw_all);
1251 schedule_work(work);
1252 }
1253 }
1254
1255 static DEFINE_IDA(unnamed_dev_ida);
1256
1257 /**
1258 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1259 * @p: Pointer to a dev_t.
1260 *
1261 * Filesystems which don't use real block devices can call this function
1262 * to allocate a virtual block device.
1263 *
1264 * Context: Any context. Frequently called while holding sb_lock.
1265 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1266 * or -ENOMEM if memory allocation failed.
1267 */
get_anon_bdev(dev_t * p)1268 int get_anon_bdev(dev_t *p)
1269 {
1270 int dev;
1271
1272 /*
1273 * Many userspace utilities consider an FSID of 0 invalid.
1274 * Always return at least 1 from get_anon_bdev.
1275 */
1276 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1277 GFP_ATOMIC);
1278 if (dev == -ENOSPC)
1279 dev = -EMFILE;
1280 if (dev < 0)
1281 return dev;
1282
1283 *p = MKDEV(0, dev);
1284 return 0;
1285 }
1286 EXPORT_SYMBOL(get_anon_bdev);
1287
free_anon_bdev(dev_t dev)1288 void free_anon_bdev(dev_t dev)
1289 {
1290 ida_free(&unnamed_dev_ida, MINOR(dev));
1291 }
1292 EXPORT_SYMBOL(free_anon_bdev);
1293
set_anon_super(struct super_block * s,void * data)1294 int set_anon_super(struct super_block *s, void *data)
1295 {
1296 return get_anon_bdev(&s->s_dev);
1297 }
1298 EXPORT_SYMBOL(set_anon_super);
1299
kill_anon_super(struct super_block * sb)1300 void kill_anon_super(struct super_block *sb)
1301 {
1302 dev_t dev = sb->s_dev;
1303 generic_shutdown_super(sb);
1304 kill_super_notify(sb);
1305 free_anon_bdev(dev);
1306 }
1307 EXPORT_SYMBOL(kill_anon_super);
1308
kill_litter_super(struct super_block * sb)1309 void kill_litter_super(struct super_block *sb)
1310 {
1311 if (sb->s_root)
1312 d_genocide(sb->s_root);
1313 kill_anon_super(sb);
1314 }
1315 EXPORT_SYMBOL(kill_litter_super);
1316
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1317 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1318 {
1319 return set_anon_super(sb, NULL);
1320 }
1321 EXPORT_SYMBOL(set_anon_super_fc);
1322
test_keyed_super(struct super_block * sb,struct fs_context * fc)1323 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1324 {
1325 return sb->s_fs_info == fc->s_fs_info;
1326 }
1327
test_single_super(struct super_block * s,struct fs_context * fc)1328 static int test_single_super(struct super_block *s, struct fs_context *fc)
1329 {
1330 return 1;
1331 }
1332
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1333 static int vfs_get_super(struct fs_context *fc,
1334 int (*test)(struct super_block *, struct fs_context *),
1335 int (*fill_super)(struct super_block *sb,
1336 struct fs_context *fc))
1337 {
1338 struct super_block *sb;
1339 int err;
1340
1341 sb = sget_fc(fc, test, set_anon_super_fc);
1342 if (IS_ERR(sb))
1343 return PTR_ERR(sb);
1344
1345 if (!sb->s_root) {
1346 err = fill_super(sb, fc);
1347 if (err)
1348 goto error;
1349
1350 sb->s_flags |= SB_ACTIVE;
1351 }
1352
1353 fc->root = dget(sb->s_root);
1354 return 0;
1355
1356 error:
1357 deactivate_locked_super(sb);
1358 return err;
1359 }
1360
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1361 int get_tree_nodev(struct fs_context *fc,
1362 int (*fill_super)(struct super_block *sb,
1363 struct fs_context *fc))
1364 {
1365 return vfs_get_super(fc, NULL, fill_super);
1366 }
1367 EXPORT_SYMBOL(get_tree_nodev);
1368
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1369 int get_tree_single(struct fs_context *fc,
1370 int (*fill_super)(struct super_block *sb,
1371 struct fs_context *fc))
1372 {
1373 return vfs_get_super(fc, test_single_super, fill_super);
1374 }
1375 EXPORT_SYMBOL(get_tree_single);
1376
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1377 int get_tree_keyed(struct fs_context *fc,
1378 int (*fill_super)(struct super_block *sb,
1379 struct fs_context *fc),
1380 void *key)
1381 {
1382 fc->s_fs_info = key;
1383 return vfs_get_super(fc, test_keyed_super, fill_super);
1384 }
1385 EXPORT_SYMBOL(get_tree_keyed);
1386
set_bdev_super(struct super_block * s,void * data)1387 static int set_bdev_super(struct super_block *s, void *data)
1388 {
1389 s->s_dev = *(dev_t *)data;
1390 return 0;
1391 }
1392
super_s_dev_set(struct super_block * s,struct fs_context * fc)1393 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1394 {
1395 return set_bdev_super(s, fc->sget_key);
1396 }
1397
super_s_dev_test(struct super_block * s,struct fs_context * fc)1398 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1399 {
1400 return !(s->s_iflags & SB_I_RETIRED) &&
1401 s->s_dev == *(dev_t *)fc->sget_key;
1402 }
1403
1404 /**
1405 * sget_dev - Find or create a superblock by device number
1406 * @fc: Filesystem context.
1407 * @dev: device number
1408 *
1409 * Find or create a superblock using the provided device number that
1410 * will be stored in fc->sget_key.
1411 *
1412 * If an extant superblock is matched, then that will be returned with
1413 * an elevated reference count that the caller must transfer or discard.
1414 *
1415 * If no match is made, a new superblock will be allocated and basic
1416 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1417 * be set). The superblock will be published and it will be returned in
1418 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1419 * unset.
1420 *
1421 * Return: an existing or newly created superblock on success, an error
1422 * pointer on failure.
1423 */
sget_dev(struct fs_context * fc,dev_t dev)1424 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1425 {
1426 fc->sget_key = &dev;
1427 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1428 }
1429 EXPORT_SYMBOL(sget_dev);
1430
1431 #ifdef CONFIG_BLOCK
1432 /*
1433 * Lock a super block that the callers holds a reference to.
1434 *
1435 * The caller needs to ensure that the super_block isn't being freed while
1436 * calling this function, e.g. by holding a lock over the call to this function
1437 * and the place that clears the pointer to the superblock used by this function
1438 * before freeing the superblock.
1439 */
super_lock_shared_active(struct super_block * sb)1440 static bool super_lock_shared_active(struct super_block *sb)
1441 {
1442 bool born = super_lock_shared(sb);
1443
1444 if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1445 super_unlock_shared(sb);
1446 return false;
1447 }
1448 return true;
1449 }
1450
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1451 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1452 {
1453 struct super_block *sb = bdev->bd_holder;
1454
1455 /* bd_holder_lock ensures that the sb isn't freed */
1456 lockdep_assert_held(&bdev->bd_holder_lock);
1457
1458 if (!super_lock_shared_active(sb))
1459 return;
1460
1461 if (!surprise)
1462 sync_filesystem(sb);
1463 shrink_dcache_sb(sb);
1464 invalidate_inodes(sb);
1465 if (sb->s_op->shutdown)
1466 sb->s_op->shutdown(sb);
1467
1468 super_unlock_shared(sb);
1469 }
1470
fs_bdev_sync(struct block_device * bdev)1471 static void fs_bdev_sync(struct block_device *bdev)
1472 {
1473 struct super_block *sb = bdev->bd_holder;
1474
1475 lockdep_assert_held(&bdev->bd_holder_lock);
1476
1477 if (!super_lock_shared_active(sb))
1478 return;
1479 sync_filesystem(sb);
1480 super_unlock_shared(sb);
1481 }
1482
1483 const struct blk_holder_ops fs_holder_ops = {
1484 .mark_dead = fs_bdev_mark_dead,
1485 .sync = fs_bdev_sync,
1486 };
1487 EXPORT_SYMBOL_GPL(fs_holder_ops);
1488
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1489 int setup_bdev_super(struct super_block *sb, int sb_flags,
1490 struct fs_context *fc)
1491 {
1492 blk_mode_t mode = sb_open_mode(sb_flags);
1493 struct bdev_handle *bdev_handle;
1494 struct block_device *bdev;
1495
1496 bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1497 if (IS_ERR(bdev_handle)) {
1498 if (fc)
1499 errorf(fc, "%s: Can't open blockdev", fc->source);
1500 return PTR_ERR(bdev_handle);
1501 }
1502 bdev = bdev_handle->bdev;
1503
1504 /*
1505 * This really should be in blkdev_get_by_dev, but right now can't due
1506 * to legacy issues that require us to allow opening a block device node
1507 * writable from userspace even for a read-only block device.
1508 */
1509 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1510 bdev_release(bdev_handle);
1511 return -EACCES;
1512 }
1513
1514 /*
1515 * Until SB_BORN flag is set, there can be no active superblock
1516 * references and thus no filesystem freezing. get_active_super() will
1517 * just loop waiting for SB_BORN so even freeze_bdev() cannot proceed.
1518 *
1519 * It is enough to check bdev was not frozen before we set s_bdev.
1520 */
1521 mutex_lock(&bdev->bd_fsfreeze_mutex);
1522 if (bdev->bd_fsfreeze_count > 0) {
1523 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1524 if (fc)
1525 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1526 bdev_release(bdev_handle);
1527 return -EBUSY;
1528 }
1529 spin_lock(&sb_lock);
1530 sb->s_bdev_handle = bdev_handle;
1531 sb->s_bdev = bdev;
1532 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1533 if (bdev_stable_writes(bdev))
1534 sb->s_iflags |= SB_I_STABLE_WRITES;
1535 spin_unlock(&sb_lock);
1536 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1537
1538 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1539 shrinker_debugfs_rename(&sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1540 sb->s_id);
1541 sb_set_blocksize(sb, block_size(bdev));
1542 return 0;
1543 }
1544 EXPORT_SYMBOL_GPL(setup_bdev_super);
1545
1546 /**
1547 * get_tree_bdev - Get a superblock based on a single block device
1548 * @fc: The filesystem context holding the parameters
1549 * @fill_super: Helper to initialise a new superblock
1550 */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1551 int get_tree_bdev(struct fs_context *fc,
1552 int (*fill_super)(struct super_block *,
1553 struct fs_context *))
1554 {
1555 struct super_block *s;
1556 int error = 0;
1557 dev_t dev;
1558
1559 if (!fc->source)
1560 return invalf(fc, "No source specified");
1561
1562 error = lookup_bdev(fc->source, &dev);
1563 if (error) {
1564 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1565 return error;
1566 }
1567
1568 fc->sb_flags |= SB_NOSEC;
1569 s = sget_dev(fc, dev);
1570 if (IS_ERR(s))
1571 return PTR_ERR(s);
1572
1573 if (s->s_root) {
1574 /* Don't summarily change the RO/RW state. */
1575 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1576 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1577 deactivate_locked_super(s);
1578 return -EBUSY;
1579 }
1580 } else {
1581 /*
1582 * We drop s_umount here because we need to open the bdev and
1583 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1584 * bdev_mark_dead()). It is safe because we have active sb
1585 * reference and SB_BORN is not set yet.
1586 */
1587 super_unlock_excl(s);
1588 error = setup_bdev_super(s, fc->sb_flags, fc);
1589 __super_lock_excl(s);
1590 if (!error)
1591 error = fill_super(s, fc);
1592 if (error) {
1593 deactivate_locked_super(s);
1594 return error;
1595 }
1596 s->s_flags |= SB_ACTIVE;
1597 }
1598
1599 BUG_ON(fc->root);
1600 fc->root = dget(s->s_root);
1601 return 0;
1602 }
1603 EXPORT_SYMBOL(get_tree_bdev);
1604
test_bdev_super(struct super_block * s,void * data)1605 static int test_bdev_super(struct super_block *s, void *data)
1606 {
1607 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1608 }
1609
mount_bdev(struct file_system_type * fs_type,int flags,const char * dev_name,void * data,int (* fill_super)(struct super_block *,void *,int))1610 struct dentry *mount_bdev(struct file_system_type *fs_type,
1611 int flags, const char *dev_name, void *data,
1612 int (*fill_super)(struct super_block *, void *, int))
1613 {
1614 struct super_block *s;
1615 int error;
1616 dev_t dev;
1617
1618 error = lookup_bdev(dev_name, &dev);
1619 if (error)
1620 return ERR_PTR(error);
1621
1622 flags |= SB_NOSEC;
1623 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1624 if (IS_ERR(s))
1625 return ERR_CAST(s);
1626
1627 if (s->s_root) {
1628 if ((flags ^ s->s_flags) & SB_RDONLY) {
1629 deactivate_locked_super(s);
1630 return ERR_PTR(-EBUSY);
1631 }
1632 } else {
1633 /*
1634 * We drop s_umount here because we need to open the bdev and
1635 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1636 * bdev_mark_dead()). It is safe because we have active sb
1637 * reference and SB_BORN is not set yet.
1638 */
1639 super_unlock_excl(s);
1640 error = setup_bdev_super(s, flags, NULL);
1641 __super_lock_excl(s);
1642 if (!error)
1643 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1644 if (error) {
1645 deactivate_locked_super(s);
1646 return ERR_PTR(error);
1647 }
1648
1649 s->s_flags |= SB_ACTIVE;
1650 }
1651
1652 return dget(s->s_root);
1653 }
1654 EXPORT_SYMBOL(mount_bdev);
1655
kill_block_super(struct super_block * sb)1656 void kill_block_super(struct super_block *sb)
1657 {
1658 struct block_device *bdev = sb->s_bdev;
1659
1660 generic_shutdown_super(sb);
1661 if (bdev) {
1662 sync_blockdev(bdev);
1663 bdev_release(sb->s_bdev_handle);
1664 }
1665 }
1666
1667 EXPORT_SYMBOL(kill_block_super);
1668 #endif
1669
mount_nodev(struct file_system_type * fs_type,int flags,void * data,int (* fill_super)(struct super_block *,void *,int))1670 struct dentry *mount_nodev(struct file_system_type *fs_type,
1671 int flags, void *data,
1672 int (*fill_super)(struct super_block *, void *, int))
1673 {
1674 int error;
1675 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1676
1677 if (IS_ERR(s))
1678 return ERR_CAST(s);
1679
1680 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1681 if (error) {
1682 deactivate_locked_super(s);
1683 return ERR_PTR(error);
1684 }
1685 s->s_flags |= SB_ACTIVE;
1686 return dget(s->s_root);
1687 }
1688 EXPORT_SYMBOL(mount_nodev);
1689
reconfigure_single(struct super_block * s,int flags,void * data)1690 int reconfigure_single(struct super_block *s,
1691 int flags, void *data)
1692 {
1693 struct fs_context *fc;
1694 int ret;
1695
1696 /* The caller really need to be passing fc down into mount_single(),
1697 * then a chunk of this can be removed. [Bollocks -- AV]
1698 * Better yet, reconfiguration shouldn't happen, but rather the second
1699 * mount should be rejected if the parameters are not compatible.
1700 */
1701 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1702 if (IS_ERR(fc))
1703 return PTR_ERR(fc);
1704
1705 ret = parse_monolithic_mount_data(fc, data);
1706 if (ret < 0)
1707 goto out;
1708
1709 ret = reconfigure_super(fc);
1710 out:
1711 put_fs_context(fc);
1712 return ret;
1713 }
1714
compare_single(struct super_block * s,void * p)1715 static int compare_single(struct super_block *s, void *p)
1716 {
1717 return 1;
1718 }
1719
mount_single(struct file_system_type * fs_type,int flags,void * data,int (* fill_super)(struct super_block *,void *,int))1720 struct dentry *mount_single(struct file_system_type *fs_type,
1721 int flags, void *data,
1722 int (*fill_super)(struct super_block *, void *, int))
1723 {
1724 struct super_block *s;
1725 int error;
1726
1727 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1728 if (IS_ERR(s))
1729 return ERR_CAST(s);
1730 if (!s->s_root) {
1731 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1732 if (!error)
1733 s->s_flags |= SB_ACTIVE;
1734 } else {
1735 error = reconfigure_single(s, flags, data);
1736 }
1737 if (unlikely(error)) {
1738 deactivate_locked_super(s);
1739 return ERR_PTR(error);
1740 }
1741 return dget(s->s_root);
1742 }
1743 EXPORT_SYMBOL(mount_single);
1744
1745 /**
1746 * vfs_get_tree - Get the mountable root
1747 * @fc: The superblock configuration context.
1748 *
1749 * The filesystem is invoked to get or create a superblock which can then later
1750 * be used for mounting. The filesystem places a pointer to the root to be
1751 * used for mounting in @fc->root.
1752 */
vfs_get_tree(struct fs_context * fc)1753 int vfs_get_tree(struct fs_context *fc)
1754 {
1755 struct super_block *sb;
1756 int error;
1757
1758 if (fc->root)
1759 return -EBUSY;
1760
1761 /* Get the mountable root in fc->root, with a ref on the root and a ref
1762 * on the superblock.
1763 */
1764 error = fc->ops->get_tree(fc);
1765 if (error < 0)
1766 return error;
1767
1768 if (!fc->root) {
1769 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1770 fc->fs_type->name);
1771 /* We don't know what the locking state of the superblock is -
1772 * if there is a superblock.
1773 */
1774 BUG();
1775 }
1776
1777 sb = fc->root->d_sb;
1778 WARN_ON(!sb->s_bdi);
1779
1780 /*
1781 * super_wake() contains a memory barrier which also care of
1782 * ordering for super_cache_count(). We place it before setting
1783 * SB_BORN as the data dependency between the two functions is
1784 * the superblock structure contents that we just set up, not
1785 * the SB_BORN flag.
1786 */
1787 super_wake(sb, SB_BORN);
1788
1789 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1790 if (unlikely(error)) {
1791 fc_drop_locked(fc);
1792 return error;
1793 }
1794
1795 /*
1796 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1797 * but s_maxbytes was an unsigned long long for many releases. Throw
1798 * this warning for a little while to try and catch filesystems that
1799 * violate this rule.
1800 */
1801 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1802 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1803
1804 return 0;
1805 }
1806 EXPORT_SYMBOL(vfs_get_tree);
1807
1808 /*
1809 * Setup private BDI for given superblock. It gets automatically cleaned up
1810 * in generic_shutdown_super().
1811 */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1812 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1813 {
1814 struct backing_dev_info *bdi;
1815 int err;
1816 va_list args;
1817
1818 bdi = bdi_alloc(NUMA_NO_NODE);
1819 if (!bdi)
1820 return -ENOMEM;
1821
1822 va_start(args, fmt);
1823 err = bdi_register_va(bdi, fmt, args);
1824 va_end(args);
1825 if (err) {
1826 bdi_put(bdi);
1827 return err;
1828 }
1829 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1830 sb->s_bdi = bdi;
1831 sb->s_iflags |= SB_I_PERSB_BDI;
1832
1833 return 0;
1834 }
1835 EXPORT_SYMBOL(super_setup_bdi_name);
1836
1837 /*
1838 * Setup private BDI for given superblock. I gets automatically cleaned up
1839 * in generic_shutdown_super().
1840 */
super_setup_bdi(struct super_block * sb)1841 int super_setup_bdi(struct super_block *sb)
1842 {
1843 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1844
1845 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1846 atomic_long_inc_return(&bdi_seq));
1847 }
1848 EXPORT_SYMBOL(super_setup_bdi);
1849
1850 /**
1851 * sb_wait_write - wait until all writers to given file system finish
1852 * @sb: the super for which we wait
1853 * @level: type of writers we wait for (normal vs page fault)
1854 *
1855 * This function waits until there are no writers of given type to given file
1856 * system.
1857 */
sb_wait_write(struct super_block * sb,int level)1858 static void sb_wait_write(struct super_block *sb, int level)
1859 {
1860 percpu_down_write(sb->s_writers.rw_sem + level-1);
1861 }
1862
1863 /*
1864 * We are going to return to userspace and forget about these locks, the
1865 * ownership goes to the caller of thaw_super() which does unlock().
1866 */
lockdep_sb_freeze_release(struct super_block * sb)1867 static void lockdep_sb_freeze_release(struct super_block *sb)
1868 {
1869 int level;
1870
1871 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1872 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1873 }
1874
1875 /*
1876 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1877 */
lockdep_sb_freeze_acquire(struct super_block * sb)1878 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1879 {
1880 int level;
1881
1882 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1883 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1884 }
1885
sb_freeze_unlock(struct super_block * sb,int level)1886 static void sb_freeze_unlock(struct super_block *sb, int level)
1887 {
1888 for (level--; level >= 0; level--)
1889 percpu_up_write(sb->s_writers.rw_sem + level);
1890 }
1891
wait_for_partially_frozen(struct super_block * sb)1892 static int wait_for_partially_frozen(struct super_block *sb)
1893 {
1894 int ret = 0;
1895
1896 do {
1897 unsigned short old = sb->s_writers.frozen;
1898
1899 up_write(&sb->s_umount);
1900 ret = wait_var_event_killable(&sb->s_writers.frozen,
1901 sb->s_writers.frozen != old);
1902 down_write(&sb->s_umount);
1903 } while (ret == 0 &&
1904 sb->s_writers.frozen != SB_UNFROZEN &&
1905 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1906
1907 return ret;
1908 }
1909
1910 /**
1911 * freeze_super - lock the filesystem and force it into a consistent state
1912 * @sb: the super to lock
1913 * @who: context that wants to freeze
1914 *
1915 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1916 * freeze_fs. Subsequent calls to this without first thawing the fs may return
1917 * -EBUSY.
1918 *
1919 * @who should be:
1920 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
1921 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
1922 *
1923 * The @who argument distinguishes between the kernel and userspace trying to
1924 * freeze the filesystem. Although there cannot be multiple kernel freezes or
1925 * multiple userspace freezes in effect at any given time, the kernel and
1926 * userspace can both hold a filesystem frozen. The filesystem remains frozen
1927 * until there are no kernel or userspace freezes in effect.
1928 *
1929 * During this function, sb->s_writers.frozen goes through these values:
1930 *
1931 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1932 *
1933 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1934 * writes should be blocked, though page faults are still allowed. We wait for
1935 * all writes to complete and then proceed to the next stage.
1936 *
1937 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1938 * but internal fs threads can still modify the filesystem (although they
1939 * should not dirty new pages or inodes), writeback can run etc. After waiting
1940 * for all running page faults we sync the filesystem which will clean all
1941 * dirty pages and inodes (no new dirty pages or inodes can be created when
1942 * sync is running).
1943 *
1944 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1945 * modification are blocked (e.g. XFS preallocation truncation on inode
1946 * reclaim). This is usually implemented by blocking new transactions for
1947 * filesystems that have them and need this additional guard. After all
1948 * internal writers are finished we call ->freeze_fs() to finish filesystem
1949 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1950 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1951 *
1952 * sb->s_writers.frozen is protected by sb->s_umount.
1953 */
freeze_super(struct super_block * sb,enum freeze_holder who)1954 int freeze_super(struct super_block *sb, enum freeze_holder who)
1955 {
1956 int ret;
1957
1958 atomic_inc(&sb->s_active);
1959 if (!super_lock_excl(sb))
1960 WARN(1, "Dying superblock while freezing!");
1961
1962 retry:
1963 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
1964 if (sb->s_writers.freeze_holders & who) {
1965 deactivate_locked_super(sb);
1966 return -EBUSY;
1967 }
1968
1969 WARN_ON(sb->s_writers.freeze_holders == 0);
1970
1971 /*
1972 * Someone else already holds this type of freeze; share the
1973 * freeze and assign the active ref to the freeze.
1974 */
1975 sb->s_writers.freeze_holders |= who;
1976 super_unlock_excl(sb);
1977 return 0;
1978 }
1979
1980 if (sb->s_writers.frozen != SB_UNFROZEN) {
1981 ret = wait_for_partially_frozen(sb);
1982 if (ret) {
1983 deactivate_locked_super(sb);
1984 return ret;
1985 }
1986
1987 goto retry;
1988 }
1989
1990 if (!(sb->s_flags & SB_BORN)) {
1991 super_unlock_excl(sb);
1992 return 0; /* sic - it's "nothing to do" */
1993 }
1994
1995 if (sb_rdonly(sb)) {
1996 /* Nothing to do really... */
1997 sb->s_writers.freeze_holders |= who;
1998 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1999 wake_up_var(&sb->s_writers.frozen);
2000 super_unlock_excl(sb);
2001 return 0;
2002 }
2003
2004 sb->s_writers.frozen = SB_FREEZE_WRITE;
2005 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
2006 super_unlock_excl(sb);
2007 sb_wait_write(sb, SB_FREEZE_WRITE);
2008 if (!super_lock_excl(sb))
2009 WARN(1, "Dying superblock while freezing!");
2010
2011 /* Now we go and block page faults... */
2012 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2013 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2014
2015 /* All writers are done so after syncing there won't be dirty data */
2016 ret = sync_filesystem(sb);
2017 if (ret) {
2018 sb->s_writers.frozen = SB_UNFROZEN;
2019 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2020 wake_up_var(&sb->s_writers.frozen);
2021 deactivate_locked_super(sb);
2022 return ret;
2023 }
2024
2025 /* Now wait for internal filesystem counter */
2026 sb->s_writers.frozen = SB_FREEZE_FS;
2027 sb_wait_write(sb, SB_FREEZE_FS);
2028
2029 if (sb->s_op->freeze_fs) {
2030 ret = sb->s_op->freeze_fs(sb);
2031 if (ret) {
2032 printk(KERN_ERR
2033 "VFS:Filesystem freeze failed\n");
2034 sb->s_writers.frozen = SB_UNFROZEN;
2035 sb_freeze_unlock(sb, SB_FREEZE_FS);
2036 wake_up_var(&sb->s_writers.frozen);
2037 deactivate_locked_super(sb);
2038 return ret;
2039 }
2040 }
2041 /*
2042 * For debugging purposes so that fs can warn if it sees write activity
2043 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2044 */
2045 sb->s_writers.freeze_holders |= who;
2046 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2047 wake_up_var(&sb->s_writers.frozen);
2048 lockdep_sb_freeze_release(sb);
2049 super_unlock_excl(sb);
2050 return 0;
2051 }
2052 EXPORT_SYMBOL(freeze_super);
2053
2054 /*
2055 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2056 * frozen both by userspace and the kernel, a thaw call from either source
2057 * removes that state without releasing the other state or unlocking the
2058 * filesystem.
2059 */
thaw_super_locked(struct super_block * sb,enum freeze_holder who)2060 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2061 {
2062 int error;
2063
2064 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2065 if (!(sb->s_writers.freeze_holders & who)) {
2066 super_unlock_excl(sb);
2067 return -EINVAL;
2068 }
2069
2070 /*
2071 * Freeze is shared with someone else. Release our hold and
2072 * drop the active ref that freeze_super assigned to the
2073 * freezer.
2074 */
2075 if (sb->s_writers.freeze_holders & ~who) {
2076 sb->s_writers.freeze_holders &= ~who;
2077 deactivate_locked_super(sb);
2078 return 0;
2079 }
2080 } else {
2081 super_unlock_excl(sb);
2082 return -EINVAL;
2083 }
2084
2085 if (sb_rdonly(sb)) {
2086 sb->s_writers.freeze_holders &= ~who;
2087 sb->s_writers.frozen = SB_UNFROZEN;
2088 wake_up_var(&sb->s_writers.frozen);
2089 goto out;
2090 }
2091
2092 lockdep_sb_freeze_acquire(sb);
2093
2094 if (sb->s_op->unfreeze_fs) {
2095 error = sb->s_op->unfreeze_fs(sb);
2096 if (error) {
2097 printk(KERN_ERR "VFS:Filesystem thaw failed\n");
2098 lockdep_sb_freeze_release(sb);
2099 super_unlock_excl(sb);
2100 return error;
2101 }
2102 }
2103
2104 sb->s_writers.freeze_holders &= ~who;
2105 sb->s_writers.frozen = SB_UNFROZEN;
2106 wake_up_var(&sb->s_writers.frozen);
2107 sb_freeze_unlock(sb, SB_FREEZE_FS);
2108 out:
2109 deactivate_locked_super(sb);
2110 return 0;
2111 }
2112
2113 /**
2114 * thaw_super -- unlock filesystem
2115 * @sb: the super to thaw
2116 * @who: context that wants to freeze
2117 *
2118 * Unlocks the filesystem and marks it writeable again after freeze_super()
2119 * if there are no remaining freezes on the filesystem.
2120 *
2121 * @who should be:
2122 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2123 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2124 */
thaw_super(struct super_block * sb,enum freeze_holder who)2125 int thaw_super(struct super_block *sb, enum freeze_holder who)
2126 {
2127 if (!super_lock_excl(sb))
2128 WARN(1, "Dying superblock while thawing!");
2129 return thaw_super_locked(sb, who);
2130 }
2131 EXPORT_SYMBOL(thaw_super);
2132
2133 /*
2134 * Create workqueue for deferred direct IO completions. We allocate the
2135 * workqueue when it's first needed. This avoids creating workqueue for
2136 * filesystems that don't need it and also allows us to create the workqueue
2137 * late enough so the we can include s_id in the name of the workqueue.
2138 */
sb_init_dio_done_wq(struct super_block * sb)2139 int sb_init_dio_done_wq(struct super_block *sb)
2140 {
2141 struct workqueue_struct *old;
2142 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2143 WQ_MEM_RECLAIM, 0,
2144 sb->s_id);
2145 if (!wq)
2146 return -ENOMEM;
2147 /*
2148 * This has to be atomic as more DIOs can race to create the workqueue
2149 */
2150 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2151 /* Someone created workqueue before us? Free ours... */
2152 if (old)
2153 destroy_workqueue(wq);
2154 return 0;
2155 }
2156