xref: /openbmc/linux/fs/super.c (revision 479965a2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/super.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  super.c contains code to handle: - mount structures
8  *                                   - super-block tables
9  *                                   - filesystem drivers list
10  *                                   - mount system call
11  *                                   - umount system call
12  *                                   - ustat system call
13  *
14  * GK 2/5/95  -  Changed to support mounting the root fs via NFS
15  *
16  *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17  *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18  *  Added options to /proc/mounts:
19  *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20  *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21  *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22  */
23 
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h>		/* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
40 #include "internal.h"
41 
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
43 
44 static LIST_HEAD(super_blocks);
45 static DEFINE_SPINLOCK(sb_lock);
46 
47 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 	"sb_writers",
49 	"sb_pagefaults",
50 	"sb_internal",
51 };
52 
53 static inline void __super_lock(struct super_block *sb, bool excl)
54 {
55 	if (excl)
56 		down_write(&sb->s_umount);
57 	else
58 		down_read(&sb->s_umount);
59 }
60 
61 static inline void super_unlock(struct super_block *sb, bool excl)
62 {
63 	if (excl)
64 		up_write(&sb->s_umount);
65 	else
66 		up_read(&sb->s_umount);
67 }
68 
69 static inline void __super_lock_excl(struct super_block *sb)
70 {
71 	__super_lock(sb, true);
72 }
73 
74 static inline void super_unlock_excl(struct super_block *sb)
75 {
76 	super_unlock(sb, true);
77 }
78 
79 static inline void super_unlock_shared(struct super_block *sb)
80 {
81 	super_unlock(sb, false);
82 }
83 
84 static inline bool wait_born(struct super_block *sb)
85 {
86 	unsigned int flags;
87 
88 	/*
89 	 * Pairs with smp_store_release() in super_wake() and ensures
90 	 * that we see SB_BORN or SB_DYING after we're woken.
91 	 */
92 	flags = smp_load_acquire(&sb->s_flags);
93 	return flags & (SB_BORN | SB_DYING);
94 }
95 
96 /**
97  * super_lock - wait for superblock to become ready and lock it
98  * @sb: superblock to wait for
99  * @excl: whether exclusive access is required
100  *
101  * If the superblock has neither passed through vfs_get_tree() or
102  * generic_shutdown_super() yet wait for it to happen. Either superblock
103  * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
104  * woken and we'll see SB_DYING.
105  *
106  * The caller must have acquired a temporary reference on @sb->s_count.
107  *
108  * Return: This returns true if SB_BORN was set, false if SB_DYING was
109  *         set. The function acquires s_umount and returns with it held.
110  */
111 static __must_check bool super_lock(struct super_block *sb, bool excl)
112 {
113 
114 	lockdep_assert_not_held(&sb->s_umount);
115 
116 relock:
117 	__super_lock(sb, excl);
118 
119 	/*
120 	 * Has gone through generic_shutdown_super() in the meantime.
121 	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
122 	 * grab a reference to this. Tell them so.
123 	 */
124 	if (sb->s_flags & SB_DYING)
125 		return false;
126 
127 	/* Has called ->get_tree() successfully. */
128 	if (sb->s_flags & SB_BORN)
129 		return true;
130 
131 	super_unlock(sb, excl);
132 
133 	/* wait until the superblock is ready or dying */
134 	wait_var_event(&sb->s_flags, wait_born(sb));
135 
136 	/*
137 	 * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
138 	 * Just reacquire @sb->s_umount for the caller.
139 	 */
140 	goto relock;
141 }
142 
143 /* wait and acquire read-side of @sb->s_umount */
144 static inline bool super_lock_shared(struct super_block *sb)
145 {
146 	return super_lock(sb, false);
147 }
148 
149 /* wait and acquire write-side of @sb->s_umount */
150 static inline bool super_lock_excl(struct super_block *sb)
151 {
152 	return super_lock(sb, true);
153 }
154 
155 /* wake waiters */
156 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
157 static void super_wake(struct super_block *sb, unsigned int flag)
158 {
159 	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
160 	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
161 
162 	/*
163 	 * Pairs with smp_load_acquire() in super_lock() to make sure
164 	 * all initializations in the superblock are seen by the user
165 	 * seeing SB_BORN sent.
166 	 */
167 	smp_store_release(&sb->s_flags, sb->s_flags | flag);
168 	/*
169 	 * Pairs with the barrier in prepare_to_wait_event() to make sure
170 	 * ___wait_var_event() either sees SB_BORN set or
171 	 * waitqueue_active() check in wake_up_var() sees the waiter.
172 	 */
173 	smp_mb();
174 	wake_up_var(&sb->s_flags);
175 }
176 
177 /*
178  * One thing we have to be careful of with a per-sb shrinker is that we don't
179  * drop the last active reference to the superblock from within the shrinker.
180  * If that happens we could trigger unregistering the shrinker from within the
181  * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
182  * take a passive reference to the superblock to avoid this from occurring.
183  */
184 static unsigned long super_cache_scan(struct shrinker *shrink,
185 				      struct shrink_control *sc)
186 {
187 	struct super_block *sb;
188 	long	fs_objects = 0;
189 	long	total_objects;
190 	long	freed = 0;
191 	long	dentries;
192 	long	inodes;
193 
194 	sb = container_of(shrink, struct super_block, s_shrink);
195 
196 	/*
197 	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
198 	 * to recurse into the FS that called us in clear_inode() and friends..
199 	 */
200 	if (!(sc->gfp_mask & __GFP_FS))
201 		return SHRINK_STOP;
202 
203 	if (!super_trylock_shared(sb))
204 		return SHRINK_STOP;
205 
206 	if (sb->s_op->nr_cached_objects)
207 		fs_objects = sb->s_op->nr_cached_objects(sb, sc);
208 
209 	inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
210 	dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
211 	total_objects = dentries + inodes + fs_objects + 1;
212 	if (!total_objects)
213 		total_objects = 1;
214 
215 	/* proportion the scan between the caches */
216 	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
217 	inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
218 	fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
219 
220 	/*
221 	 * prune the dcache first as the icache is pinned by it, then
222 	 * prune the icache, followed by the filesystem specific caches
223 	 *
224 	 * Ensure that we always scan at least one object - memcg kmem
225 	 * accounting uses this to fully empty the caches.
226 	 */
227 	sc->nr_to_scan = dentries + 1;
228 	freed = prune_dcache_sb(sb, sc);
229 	sc->nr_to_scan = inodes + 1;
230 	freed += prune_icache_sb(sb, sc);
231 
232 	if (fs_objects) {
233 		sc->nr_to_scan = fs_objects + 1;
234 		freed += sb->s_op->free_cached_objects(sb, sc);
235 	}
236 
237 	super_unlock_shared(sb);
238 	return freed;
239 }
240 
241 static unsigned long super_cache_count(struct shrinker *shrink,
242 				       struct shrink_control *sc)
243 {
244 	struct super_block *sb;
245 	long	total_objects = 0;
246 
247 	sb = container_of(shrink, struct super_block, s_shrink);
248 
249 	/*
250 	 * We don't call super_trylock_shared() here as it is a scalability
251 	 * bottleneck, so we're exposed to partial setup state. The shrinker
252 	 * rwsem does not protect filesystem operations backing
253 	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
254 	 * change between super_cache_count and super_cache_scan, so we really
255 	 * don't need locks here.
256 	 *
257 	 * However, if we are currently mounting the superblock, the underlying
258 	 * filesystem might be in a state of partial construction and hence it
259 	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
260 	 * to avoid this situation, so do the same here. The memory barrier is
261 	 * matched with the one in mount_fs() as we don't hold locks here.
262 	 */
263 	if (!(sb->s_flags & SB_BORN))
264 		return 0;
265 	smp_rmb();
266 
267 	if (sb->s_op && sb->s_op->nr_cached_objects)
268 		total_objects = sb->s_op->nr_cached_objects(sb, sc);
269 
270 	total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
271 	total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
272 
273 	if (!total_objects)
274 		return SHRINK_EMPTY;
275 
276 	total_objects = vfs_pressure_ratio(total_objects);
277 	return total_objects;
278 }
279 
280 static void destroy_super_work(struct work_struct *work)
281 {
282 	struct super_block *s = container_of(work, struct super_block,
283 							destroy_work);
284 	int i;
285 
286 	for (i = 0; i < SB_FREEZE_LEVELS; i++)
287 		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
288 	kfree(s);
289 }
290 
291 static void destroy_super_rcu(struct rcu_head *head)
292 {
293 	struct super_block *s = container_of(head, struct super_block, rcu);
294 	INIT_WORK(&s->destroy_work, destroy_super_work);
295 	schedule_work(&s->destroy_work);
296 }
297 
298 /* Free a superblock that has never been seen by anyone */
299 static void destroy_unused_super(struct super_block *s)
300 {
301 	if (!s)
302 		return;
303 	super_unlock_excl(s);
304 	list_lru_destroy(&s->s_dentry_lru);
305 	list_lru_destroy(&s->s_inode_lru);
306 	security_sb_free(s);
307 	put_user_ns(s->s_user_ns);
308 	kfree(s->s_subtype);
309 	free_prealloced_shrinker(&s->s_shrink);
310 	/* no delays needed */
311 	destroy_super_work(&s->destroy_work);
312 }
313 
314 /**
315  *	alloc_super	-	create new superblock
316  *	@type:	filesystem type superblock should belong to
317  *	@flags: the mount flags
318  *	@user_ns: User namespace for the super_block
319  *
320  *	Allocates and initializes a new &struct super_block.  alloc_super()
321  *	returns a pointer new superblock or %NULL if allocation had failed.
322  */
323 static struct super_block *alloc_super(struct file_system_type *type, int flags,
324 				       struct user_namespace *user_ns)
325 {
326 	struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
327 	static const struct super_operations default_op;
328 	int i;
329 
330 	if (!s)
331 		return NULL;
332 
333 	INIT_LIST_HEAD(&s->s_mounts);
334 	s->s_user_ns = get_user_ns(user_ns);
335 	init_rwsem(&s->s_umount);
336 	lockdep_set_class(&s->s_umount, &type->s_umount_key);
337 	/*
338 	 * sget() can have s_umount recursion.
339 	 *
340 	 * When it cannot find a suitable sb, it allocates a new
341 	 * one (this one), and tries again to find a suitable old
342 	 * one.
343 	 *
344 	 * In case that succeeds, it will acquire the s_umount
345 	 * lock of the old one. Since these are clearly distrinct
346 	 * locks, and this object isn't exposed yet, there's no
347 	 * risk of deadlocks.
348 	 *
349 	 * Annotate this by putting this lock in a different
350 	 * subclass.
351 	 */
352 	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
353 
354 	if (security_sb_alloc(s))
355 		goto fail;
356 
357 	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
358 		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
359 					sb_writers_name[i],
360 					&type->s_writers_key[i]))
361 			goto fail;
362 	}
363 	s->s_bdi = &noop_backing_dev_info;
364 	s->s_flags = flags;
365 	if (s->s_user_ns != &init_user_ns)
366 		s->s_iflags |= SB_I_NODEV;
367 	INIT_HLIST_NODE(&s->s_instances);
368 	INIT_HLIST_BL_HEAD(&s->s_roots);
369 	mutex_init(&s->s_sync_lock);
370 	INIT_LIST_HEAD(&s->s_inodes);
371 	spin_lock_init(&s->s_inode_list_lock);
372 	INIT_LIST_HEAD(&s->s_inodes_wb);
373 	spin_lock_init(&s->s_inode_wblist_lock);
374 
375 	s->s_count = 1;
376 	atomic_set(&s->s_active, 1);
377 	mutex_init(&s->s_vfs_rename_mutex);
378 	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
379 	init_rwsem(&s->s_dquot.dqio_sem);
380 	s->s_maxbytes = MAX_NON_LFS;
381 	s->s_op = &default_op;
382 	s->s_time_gran = 1000000000;
383 	s->s_time_min = TIME64_MIN;
384 	s->s_time_max = TIME64_MAX;
385 
386 	s->s_shrink.seeks = DEFAULT_SEEKS;
387 	s->s_shrink.scan_objects = super_cache_scan;
388 	s->s_shrink.count_objects = super_cache_count;
389 	s->s_shrink.batch = 1024;
390 	s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
391 	if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
392 		goto fail;
393 	if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
394 		goto fail;
395 	if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
396 		goto fail;
397 	return s;
398 
399 fail:
400 	destroy_unused_super(s);
401 	return NULL;
402 }
403 
404 /* Superblock refcounting  */
405 
406 /*
407  * Drop a superblock's refcount.  The caller must hold sb_lock.
408  */
409 static void __put_super(struct super_block *s)
410 {
411 	if (!--s->s_count) {
412 		list_del_init(&s->s_list);
413 		WARN_ON(s->s_dentry_lru.node);
414 		WARN_ON(s->s_inode_lru.node);
415 		WARN_ON(!list_empty(&s->s_mounts));
416 		security_sb_free(s);
417 		put_user_ns(s->s_user_ns);
418 		kfree(s->s_subtype);
419 		call_rcu(&s->rcu, destroy_super_rcu);
420 	}
421 }
422 
423 /**
424  *	put_super	-	drop a temporary reference to superblock
425  *	@sb: superblock in question
426  *
427  *	Drops a temporary reference, frees superblock if there's no
428  *	references left.
429  */
430 void put_super(struct super_block *sb)
431 {
432 	spin_lock(&sb_lock);
433 	__put_super(sb);
434 	spin_unlock(&sb_lock);
435 }
436 
437 static void kill_super_notify(struct super_block *sb)
438 {
439 	lockdep_assert_not_held(&sb->s_umount);
440 
441 	/* already notified earlier */
442 	if (sb->s_flags & SB_DEAD)
443 		return;
444 
445 	/*
446 	 * Remove it from @fs_supers so it isn't found by new
447 	 * sget{_fc}() walkers anymore. Any concurrent mounter still
448 	 * managing to grab a temporary reference is guaranteed to
449 	 * already see SB_DYING and will wait until we notify them about
450 	 * SB_DEAD.
451 	 */
452 	spin_lock(&sb_lock);
453 	hlist_del_init(&sb->s_instances);
454 	spin_unlock(&sb_lock);
455 
456 	/*
457 	 * Let concurrent mounts know that this thing is really dead.
458 	 * We don't need @sb->s_umount here as every concurrent caller
459 	 * will see SB_DYING and either discard the superblock or wait
460 	 * for SB_DEAD.
461 	 */
462 	super_wake(sb, SB_DEAD);
463 }
464 
465 /**
466  *	deactivate_locked_super	-	drop an active reference to superblock
467  *	@s: superblock to deactivate
468  *
469  *	Drops an active reference to superblock, converting it into a temporary
470  *	one if there is no other active references left.  In that case we
471  *	tell fs driver to shut it down and drop the temporary reference we
472  *	had just acquired.
473  *
474  *	Caller holds exclusive lock on superblock; that lock is released.
475  */
476 void deactivate_locked_super(struct super_block *s)
477 {
478 	struct file_system_type *fs = s->s_type;
479 	if (atomic_dec_and_test(&s->s_active)) {
480 		unregister_shrinker(&s->s_shrink);
481 		fs->kill_sb(s);
482 
483 		kill_super_notify(s);
484 
485 		/*
486 		 * Since list_lru_destroy() may sleep, we cannot call it from
487 		 * put_super(), where we hold the sb_lock. Therefore we destroy
488 		 * the lru lists right now.
489 		 */
490 		list_lru_destroy(&s->s_dentry_lru);
491 		list_lru_destroy(&s->s_inode_lru);
492 
493 		put_filesystem(fs);
494 		put_super(s);
495 	} else {
496 		super_unlock_excl(s);
497 	}
498 }
499 
500 EXPORT_SYMBOL(deactivate_locked_super);
501 
502 /**
503  *	deactivate_super	-	drop an active reference to superblock
504  *	@s: superblock to deactivate
505  *
506  *	Variant of deactivate_locked_super(), except that superblock is *not*
507  *	locked by caller.  If we are going to drop the final active reference,
508  *	lock will be acquired prior to that.
509  */
510 void deactivate_super(struct super_block *s)
511 {
512 	if (!atomic_add_unless(&s->s_active, -1, 1)) {
513 		__super_lock_excl(s);
514 		deactivate_locked_super(s);
515 	}
516 }
517 
518 EXPORT_SYMBOL(deactivate_super);
519 
520 /**
521  *	grab_super - acquire an active reference
522  *	@s: reference we are trying to make active
523  *
524  *	Tries to acquire an active reference.  grab_super() is used when we
525  * 	had just found a superblock in super_blocks or fs_type->fs_supers
526  *	and want to turn it into a full-blown active reference.  grab_super()
527  *	is called with sb_lock held and drops it.  Returns 1 in case of
528  *	success, 0 if we had failed (superblock contents was already dead or
529  *	dying when grab_super() had been called).  Note that this is only
530  *	called for superblocks not in rundown mode (== ones still on ->fs_supers
531  *	of their type), so increment of ->s_count is OK here.
532  */
533 static int grab_super(struct super_block *s) __releases(sb_lock)
534 {
535 	bool born;
536 
537 	s->s_count++;
538 	spin_unlock(&sb_lock);
539 	born = super_lock_excl(s);
540 	if (born && atomic_inc_not_zero(&s->s_active)) {
541 		put_super(s);
542 		return 1;
543 	}
544 	super_unlock_excl(s);
545 	put_super(s);
546 	return 0;
547 }
548 
549 static inline bool wait_dead(struct super_block *sb)
550 {
551 	unsigned int flags;
552 
553 	/*
554 	 * Pairs with memory barrier in super_wake() and ensures
555 	 * that we see SB_DEAD after we're woken.
556 	 */
557 	flags = smp_load_acquire(&sb->s_flags);
558 	return flags & SB_DEAD;
559 }
560 
561 /**
562  * grab_super_dead - acquire an active reference to a superblock
563  * @sb: superblock to acquire
564  *
565  * Acquire a temporary reference on a superblock and try to trade it for
566  * an active reference. This is used in sget{_fc}() to wait for a
567  * superblock to either become SB_BORN or for it to pass through
568  * sb->kill() and be marked as SB_DEAD.
569  *
570  * Return: This returns true if an active reference could be acquired,
571  *         false if not.
572  */
573 static bool grab_super_dead(struct super_block *sb)
574 {
575 
576 	sb->s_count++;
577 	if (grab_super(sb)) {
578 		put_super(sb);
579 		lockdep_assert_held(&sb->s_umount);
580 		return true;
581 	}
582 	wait_var_event(&sb->s_flags, wait_dead(sb));
583 	lockdep_assert_not_held(&sb->s_umount);
584 	put_super(sb);
585 	return false;
586 }
587 
588 /*
589  *	super_trylock_shared - try to grab ->s_umount shared
590  *	@sb: reference we are trying to grab
591  *
592  *	Try to prevent fs shutdown.  This is used in places where we
593  *	cannot take an active reference but we need to ensure that the
594  *	filesystem is not shut down while we are working on it. It returns
595  *	false if we cannot acquire s_umount or if we lose the race and
596  *	filesystem already got into shutdown, and returns true with the s_umount
597  *	lock held in read mode in case of success. On successful return,
598  *	the caller must drop the s_umount lock when done.
599  *
600  *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
601  *	The reason why it's safe is that we are OK with doing trylock instead
602  *	of down_read().  There's a couple of places that are OK with that, but
603  *	it's very much not a general-purpose interface.
604  */
605 bool super_trylock_shared(struct super_block *sb)
606 {
607 	if (down_read_trylock(&sb->s_umount)) {
608 		if (!(sb->s_flags & SB_DYING) && sb->s_root &&
609 		    (sb->s_flags & SB_BORN))
610 			return true;
611 		super_unlock_shared(sb);
612 	}
613 
614 	return false;
615 }
616 
617 /**
618  *	retire_super	-	prevents superblock from being reused
619  *	@sb: superblock to retire
620  *
621  *	The function marks superblock to be ignored in superblock test, which
622  *	prevents it from being reused for any new mounts.  If the superblock has
623  *	a private bdi, it also unregisters it, but doesn't reduce the refcount
624  *	of the superblock to prevent potential races.  The refcount is reduced
625  *	by generic_shutdown_super().  The function can not be called
626  *	concurrently with generic_shutdown_super().  It is safe to call the
627  *	function multiple times, subsequent calls have no effect.
628  *
629  *	The marker will affect the re-use only for block-device-based
630  *	superblocks.  Other superblocks will still get marked if this function
631  *	is used, but that will not affect their reusability.
632  */
633 void retire_super(struct super_block *sb)
634 {
635 	WARN_ON(!sb->s_bdev);
636 	__super_lock_excl(sb);
637 	if (sb->s_iflags & SB_I_PERSB_BDI) {
638 		bdi_unregister(sb->s_bdi);
639 		sb->s_iflags &= ~SB_I_PERSB_BDI;
640 	}
641 	sb->s_iflags |= SB_I_RETIRED;
642 	super_unlock_excl(sb);
643 }
644 EXPORT_SYMBOL(retire_super);
645 
646 /**
647  *	generic_shutdown_super	-	common helper for ->kill_sb()
648  *	@sb: superblock to kill
649  *
650  *	generic_shutdown_super() does all fs-independent work on superblock
651  *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
652  *	that need destruction out of superblock, call generic_shutdown_super()
653  *	and release aforementioned objects.  Note: dentries and inodes _are_
654  *	taken care of and do not need specific handling.
655  *
656  *	Upon calling this function, the filesystem may no longer alter or
657  *	rearrange the set of dentries belonging to this super_block, nor may it
658  *	change the attachments of dentries to inodes.
659  */
660 void generic_shutdown_super(struct super_block *sb)
661 {
662 	const struct super_operations *sop = sb->s_op;
663 
664 	if (sb->s_root) {
665 		shrink_dcache_for_umount(sb);
666 		sync_filesystem(sb);
667 		sb->s_flags &= ~SB_ACTIVE;
668 
669 		cgroup_writeback_umount();
670 
671 		/* Evict all inodes with zero refcount. */
672 		evict_inodes(sb);
673 
674 		/*
675 		 * Clean up and evict any inodes that still have references due
676 		 * to fsnotify or the security policy.
677 		 */
678 		fsnotify_sb_delete(sb);
679 		security_sb_delete(sb);
680 
681 		/*
682 		 * Now that all potentially-encrypted inodes have been evicted,
683 		 * the fscrypt keyring can be destroyed.
684 		 */
685 		fscrypt_destroy_keyring(sb);
686 
687 		if (sb->s_dio_done_wq) {
688 			destroy_workqueue(sb->s_dio_done_wq);
689 			sb->s_dio_done_wq = NULL;
690 		}
691 
692 		if (sop->put_super)
693 			sop->put_super(sb);
694 
695 		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
696 				"VFS: Busy inodes after unmount of %s (%s)",
697 				sb->s_id, sb->s_type->name)) {
698 			/*
699 			 * Adding a proper bailout path here would be hard, but
700 			 * we can at least make it more likely that a later
701 			 * iput_final() or such crashes cleanly.
702 			 */
703 			struct inode *inode;
704 
705 			spin_lock(&sb->s_inode_list_lock);
706 			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
707 				inode->i_op = VFS_PTR_POISON;
708 				inode->i_sb = VFS_PTR_POISON;
709 				inode->i_mapping = VFS_PTR_POISON;
710 			}
711 			spin_unlock(&sb->s_inode_list_lock);
712 		}
713 	}
714 	/*
715 	 * Broadcast to everyone that grabbed a temporary reference to this
716 	 * superblock before we removed it from @fs_supers that the superblock
717 	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
718 	 * discard this superblock and treat it as dead.
719 	 *
720 	 * We leave the superblock on @fs_supers so it can be found by
721 	 * sget{_fc}() until we passed sb->kill_sb().
722 	 */
723 	super_wake(sb, SB_DYING);
724 	super_unlock_excl(sb);
725 	if (sb->s_bdi != &noop_backing_dev_info) {
726 		if (sb->s_iflags & SB_I_PERSB_BDI)
727 			bdi_unregister(sb->s_bdi);
728 		bdi_put(sb->s_bdi);
729 		sb->s_bdi = &noop_backing_dev_info;
730 	}
731 }
732 
733 EXPORT_SYMBOL(generic_shutdown_super);
734 
735 bool mount_capable(struct fs_context *fc)
736 {
737 	if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
738 		return capable(CAP_SYS_ADMIN);
739 	else
740 		return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
741 }
742 
743 /**
744  * sget_fc - Find or create a superblock
745  * @fc:	Filesystem context.
746  * @test: Comparison callback
747  * @set: Setup callback
748  *
749  * Create a new superblock or find an existing one.
750  *
751  * The @test callback is used to find a matching existing superblock.
752  * Whether or not the requested parameters in @fc are taken into account
753  * is specific to the @test callback that is used. They may even be
754  * completely ignored.
755  *
756  * If an extant superblock is matched, it will be returned unless:
757  *
758  * (1) the namespace the filesystem context @fc and the extant
759  *     superblock's namespace differ
760  *
761  * (2) the filesystem context @fc has requested that reusing an extant
762  *     superblock is not allowed
763  *
764  * In both cases EBUSY will be returned.
765  *
766  * If no match is made, a new superblock will be allocated and basic
767  * initialisation will be performed (s_type, s_fs_info and s_id will be
768  * set and the @set callback will be invoked), the superblock will be
769  * published and it will be returned in a partially constructed state
770  * with SB_BORN and SB_ACTIVE as yet unset.
771  *
772  * Return: On success, an extant or newly created superblock is
773  *         returned. On failure an error pointer is returned.
774  */
775 struct super_block *sget_fc(struct fs_context *fc,
776 			    int (*test)(struct super_block *, struct fs_context *),
777 			    int (*set)(struct super_block *, struct fs_context *))
778 {
779 	struct super_block *s = NULL;
780 	struct super_block *old;
781 	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
782 	int err;
783 
784 retry:
785 	spin_lock(&sb_lock);
786 	if (test) {
787 		hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
788 			if (test(old, fc))
789 				goto share_extant_sb;
790 		}
791 	}
792 	if (!s) {
793 		spin_unlock(&sb_lock);
794 		s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
795 		if (!s)
796 			return ERR_PTR(-ENOMEM);
797 		goto retry;
798 	}
799 
800 	s->s_fs_info = fc->s_fs_info;
801 	err = set(s, fc);
802 	if (err) {
803 		s->s_fs_info = NULL;
804 		spin_unlock(&sb_lock);
805 		destroy_unused_super(s);
806 		return ERR_PTR(err);
807 	}
808 	fc->s_fs_info = NULL;
809 	s->s_type = fc->fs_type;
810 	s->s_iflags |= fc->s_iflags;
811 	strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
812 	/*
813 	 * Make the superblock visible on @super_blocks and @fs_supers.
814 	 * It's in a nascent state and users should wait on SB_BORN or
815 	 * SB_DYING to be set.
816 	 */
817 	list_add_tail(&s->s_list, &super_blocks);
818 	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
819 	spin_unlock(&sb_lock);
820 	get_filesystem(s->s_type);
821 	register_shrinker_prepared(&s->s_shrink);
822 	return s;
823 
824 share_extant_sb:
825 	if (user_ns != old->s_user_ns || fc->exclusive) {
826 		spin_unlock(&sb_lock);
827 		destroy_unused_super(s);
828 		if (fc->exclusive)
829 			warnfc(fc, "reusing existing filesystem not allowed");
830 		else
831 			warnfc(fc, "reusing existing filesystem in another namespace not allowed");
832 		return ERR_PTR(-EBUSY);
833 	}
834 	if (!grab_super_dead(old))
835 		goto retry;
836 	destroy_unused_super(s);
837 	return old;
838 }
839 EXPORT_SYMBOL(sget_fc);
840 
841 /**
842  *	sget	-	find or create a superblock
843  *	@type:	  filesystem type superblock should belong to
844  *	@test:	  comparison callback
845  *	@set:	  setup callback
846  *	@flags:	  mount flags
847  *	@data:	  argument to each of them
848  */
849 struct super_block *sget(struct file_system_type *type,
850 			int (*test)(struct super_block *,void *),
851 			int (*set)(struct super_block *,void *),
852 			int flags,
853 			void *data)
854 {
855 	struct user_namespace *user_ns = current_user_ns();
856 	struct super_block *s = NULL;
857 	struct super_block *old;
858 	int err;
859 
860 	/* We don't yet pass the user namespace of the parent
861 	 * mount through to here so always use &init_user_ns
862 	 * until that changes.
863 	 */
864 	if (flags & SB_SUBMOUNT)
865 		user_ns = &init_user_ns;
866 
867 retry:
868 	spin_lock(&sb_lock);
869 	if (test) {
870 		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
871 			if (!test(old, data))
872 				continue;
873 			if (user_ns != old->s_user_ns) {
874 				spin_unlock(&sb_lock);
875 				destroy_unused_super(s);
876 				return ERR_PTR(-EBUSY);
877 			}
878 			if (!grab_super_dead(old))
879 				goto retry;
880 			destroy_unused_super(s);
881 			return old;
882 		}
883 	}
884 	if (!s) {
885 		spin_unlock(&sb_lock);
886 		s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
887 		if (!s)
888 			return ERR_PTR(-ENOMEM);
889 		goto retry;
890 	}
891 
892 	err = set(s, data);
893 	if (err) {
894 		spin_unlock(&sb_lock);
895 		destroy_unused_super(s);
896 		return ERR_PTR(err);
897 	}
898 	s->s_type = type;
899 	strscpy(s->s_id, type->name, sizeof(s->s_id));
900 	list_add_tail(&s->s_list, &super_blocks);
901 	hlist_add_head(&s->s_instances, &type->fs_supers);
902 	spin_unlock(&sb_lock);
903 	get_filesystem(type);
904 	register_shrinker_prepared(&s->s_shrink);
905 	return s;
906 }
907 EXPORT_SYMBOL(sget);
908 
909 void drop_super(struct super_block *sb)
910 {
911 	super_unlock_shared(sb);
912 	put_super(sb);
913 }
914 
915 EXPORT_SYMBOL(drop_super);
916 
917 void drop_super_exclusive(struct super_block *sb)
918 {
919 	super_unlock_excl(sb);
920 	put_super(sb);
921 }
922 EXPORT_SYMBOL(drop_super_exclusive);
923 
924 static void __iterate_supers(void (*f)(struct super_block *))
925 {
926 	struct super_block *sb, *p = NULL;
927 
928 	spin_lock(&sb_lock);
929 	list_for_each_entry(sb, &super_blocks, s_list) {
930 		/* Pairs with memory marrier in super_wake(). */
931 		if (smp_load_acquire(&sb->s_flags) & SB_DYING)
932 			continue;
933 		sb->s_count++;
934 		spin_unlock(&sb_lock);
935 
936 		f(sb);
937 
938 		spin_lock(&sb_lock);
939 		if (p)
940 			__put_super(p);
941 		p = sb;
942 	}
943 	if (p)
944 		__put_super(p);
945 	spin_unlock(&sb_lock);
946 }
947 /**
948  *	iterate_supers - call function for all active superblocks
949  *	@f: function to call
950  *	@arg: argument to pass to it
951  *
952  *	Scans the superblock list and calls given function, passing it
953  *	locked superblock and given argument.
954  */
955 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
956 {
957 	struct super_block *sb, *p = NULL;
958 
959 	spin_lock(&sb_lock);
960 	list_for_each_entry(sb, &super_blocks, s_list) {
961 		bool born;
962 
963 		sb->s_count++;
964 		spin_unlock(&sb_lock);
965 
966 		born = super_lock_shared(sb);
967 		if (born && sb->s_root)
968 			f(sb, arg);
969 		super_unlock_shared(sb);
970 
971 		spin_lock(&sb_lock);
972 		if (p)
973 			__put_super(p);
974 		p = sb;
975 	}
976 	if (p)
977 		__put_super(p);
978 	spin_unlock(&sb_lock);
979 }
980 
981 /**
982  *	iterate_supers_type - call function for superblocks of given type
983  *	@type: fs type
984  *	@f: function to call
985  *	@arg: argument to pass to it
986  *
987  *	Scans the superblock list and calls given function, passing it
988  *	locked superblock and given argument.
989  */
990 void iterate_supers_type(struct file_system_type *type,
991 	void (*f)(struct super_block *, void *), void *arg)
992 {
993 	struct super_block *sb, *p = NULL;
994 
995 	spin_lock(&sb_lock);
996 	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
997 		bool born;
998 
999 		sb->s_count++;
1000 		spin_unlock(&sb_lock);
1001 
1002 		born = super_lock_shared(sb);
1003 		if (born && sb->s_root)
1004 			f(sb, arg);
1005 		super_unlock_shared(sb);
1006 
1007 		spin_lock(&sb_lock);
1008 		if (p)
1009 			__put_super(p);
1010 		p = sb;
1011 	}
1012 	if (p)
1013 		__put_super(p);
1014 	spin_unlock(&sb_lock);
1015 }
1016 
1017 EXPORT_SYMBOL(iterate_supers_type);
1018 
1019 /**
1020  * get_active_super - get an active reference to the superblock of a device
1021  * @bdev: device to get the superblock for
1022  *
1023  * Scans the superblock list and finds the superblock of the file system
1024  * mounted on the device given.  Returns the superblock with an active
1025  * reference or %NULL if none was found.
1026  */
1027 struct super_block *get_active_super(struct block_device *bdev)
1028 {
1029 	struct super_block *sb;
1030 
1031 	if (!bdev)
1032 		return NULL;
1033 
1034 	spin_lock(&sb_lock);
1035 	list_for_each_entry(sb, &super_blocks, s_list) {
1036 		if (sb->s_bdev == bdev) {
1037 			if (!grab_super(sb))
1038 				return NULL;
1039 			super_unlock_excl(sb);
1040 			return sb;
1041 		}
1042 	}
1043 	spin_unlock(&sb_lock);
1044 	return NULL;
1045 }
1046 
1047 struct super_block *user_get_super(dev_t dev, bool excl)
1048 {
1049 	struct super_block *sb;
1050 
1051 	spin_lock(&sb_lock);
1052 	list_for_each_entry(sb, &super_blocks, s_list) {
1053 		if (sb->s_dev ==  dev) {
1054 			bool born;
1055 
1056 			sb->s_count++;
1057 			spin_unlock(&sb_lock);
1058 			/* still alive? */
1059 			born = super_lock(sb, excl);
1060 			if (born && sb->s_root)
1061 				return sb;
1062 			super_unlock(sb, excl);
1063 			/* nope, got unmounted */
1064 			spin_lock(&sb_lock);
1065 			__put_super(sb);
1066 			break;
1067 		}
1068 	}
1069 	spin_unlock(&sb_lock);
1070 	return NULL;
1071 }
1072 
1073 /**
1074  * reconfigure_super - asks filesystem to change superblock parameters
1075  * @fc: The superblock and configuration
1076  *
1077  * Alters the configuration parameters of a live superblock.
1078  */
1079 int reconfigure_super(struct fs_context *fc)
1080 {
1081 	struct super_block *sb = fc->root->d_sb;
1082 	int retval;
1083 	bool remount_ro = false;
1084 	bool remount_rw = false;
1085 	bool force = fc->sb_flags & SB_FORCE;
1086 
1087 	if (fc->sb_flags_mask & ~MS_RMT_MASK)
1088 		return -EINVAL;
1089 	if (sb->s_writers.frozen != SB_UNFROZEN)
1090 		return -EBUSY;
1091 
1092 	retval = security_sb_remount(sb, fc->security);
1093 	if (retval)
1094 		return retval;
1095 
1096 	if (fc->sb_flags_mask & SB_RDONLY) {
1097 #ifdef CONFIG_BLOCK
1098 		if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1099 		    bdev_read_only(sb->s_bdev))
1100 			return -EACCES;
1101 #endif
1102 		remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1103 		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1104 	}
1105 
1106 	if (remount_ro) {
1107 		if (!hlist_empty(&sb->s_pins)) {
1108 			super_unlock_excl(sb);
1109 			group_pin_kill(&sb->s_pins);
1110 			__super_lock_excl(sb);
1111 			if (!sb->s_root)
1112 				return 0;
1113 			if (sb->s_writers.frozen != SB_UNFROZEN)
1114 				return -EBUSY;
1115 			remount_ro = !sb_rdonly(sb);
1116 		}
1117 	}
1118 	shrink_dcache_sb(sb);
1119 
1120 	/* If we are reconfiguring to RDONLY and current sb is read/write,
1121 	 * make sure there are no files open for writing.
1122 	 */
1123 	if (remount_ro) {
1124 		if (force) {
1125 			sb_start_ro_state_change(sb);
1126 		} else {
1127 			retval = sb_prepare_remount_readonly(sb);
1128 			if (retval)
1129 				return retval;
1130 		}
1131 	} else if (remount_rw) {
1132 		/*
1133 		 * Protect filesystem's reconfigure code from writes from
1134 		 * userspace until reconfigure finishes.
1135 		 */
1136 		sb_start_ro_state_change(sb);
1137 	}
1138 
1139 	if (fc->ops->reconfigure) {
1140 		retval = fc->ops->reconfigure(fc);
1141 		if (retval) {
1142 			if (!force)
1143 				goto cancel_readonly;
1144 			/* If forced remount, go ahead despite any errors */
1145 			WARN(1, "forced remount of a %s fs returned %i\n",
1146 			     sb->s_type->name, retval);
1147 		}
1148 	}
1149 
1150 	WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1151 				 (fc->sb_flags & fc->sb_flags_mask)));
1152 	sb_end_ro_state_change(sb);
1153 
1154 	/*
1155 	 * Some filesystems modify their metadata via some other path than the
1156 	 * bdev buffer cache (eg. use a private mapping, or directories in
1157 	 * pagecache, etc). Also file data modifications go via their own
1158 	 * mappings. So If we try to mount readonly then copy the filesystem
1159 	 * from bdev, we could get stale data, so invalidate it to give a best
1160 	 * effort at coherency.
1161 	 */
1162 	if (remount_ro && sb->s_bdev)
1163 		invalidate_bdev(sb->s_bdev);
1164 	return 0;
1165 
1166 cancel_readonly:
1167 	sb_end_ro_state_change(sb);
1168 	return retval;
1169 }
1170 
1171 static void do_emergency_remount_callback(struct super_block *sb)
1172 {
1173 	bool born = super_lock_excl(sb);
1174 
1175 	if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1176 		struct fs_context *fc;
1177 
1178 		fc = fs_context_for_reconfigure(sb->s_root,
1179 					SB_RDONLY | SB_FORCE, SB_RDONLY);
1180 		if (!IS_ERR(fc)) {
1181 			if (parse_monolithic_mount_data(fc, NULL) == 0)
1182 				(void)reconfigure_super(fc);
1183 			put_fs_context(fc);
1184 		}
1185 	}
1186 	super_unlock_excl(sb);
1187 }
1188 
1189 static void do_emergency_remount(struct work_struct *work)
1190 {
1191 	__iterate_supers(do_emergency_remount_callback);
1192 	kfree(work);
1193 	printk("Emergency Remount complete\n");
1194 }
1195 
1196 void emergency_remount(void)
1197 {
1198 	struct work_struct *work;
1199 
1200 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1201 	if (work) {
1202 		INIT_WORK(work, do_emergency_remount);
1203 		schedule_work(work);
1204 	}
1205 }
1206 
1207 static void do_thaw_all_callback(struct super_block *sb)
1208 {
1209 	bool born = super_lock_excl(sb);
1210 
1211 	if (born && sb->s_root) {
1212 		if (IS_ENABLED(CONFIG_BLOCK))
1213 			while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
1214 				pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1215 		thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1216 	} else {
1217 		super_unlock_excl(sb);
1218 	}
1219 }
1220 
1221 static void do_thaw_all(struct work_struct *work)
1222 {
1223 	__iterate_supers(do_thaw_all_callback);
1224 	kfree(work);
1225 	printk(KERN_WARNING "Emergency Thaw complete\n");
1226 }
1227 
1228 /**
1229  * emergency_thaw_all -- forcibly thaw every frozen filesystem
1230  *
1231  * Used for emergency unfreeze of all filesystems via SysRq
1232  */
1233 void emergency_thaw_all(void)
1234 {
1235 	struct work_struct *work;
1236 
1237 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1238 	if (work) {
1239 		INIT_WORK(work, do_thaw_all);
1240 		schedule_work(work);
1241 	}
1242 }
1243 
1244 static DEFINE_IDA(unnamed_dev_ida);
1245 
1246 /**
1247  * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1248  * @p: Pointer to a dev_t.
1249  *
1250  * Filesystems which don't use real block devices can call this function
1251  * to allocate a virtual block device.
1252  *
1253  * Context: Any context.  Frequently called while holding sb_lock.
1254  * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1255  * or -ENOMEM if memory allocation failed.
1256  */
1257 int get_anon_bdev(dev_t *p)
1258 {
1259 	int dev;
1260 
1261 	/*
1262 	 * Many userspace utilities consider an FSID of 0 invalid.
1263 	 * Always return at least 1 from get_anon_bdev.
1264 	 */
1265 	dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1266 			GFP_ATOMIC);
1267 	if (dev == -ENOSPC)
1268 		dev = -EMFILE;
1269 	if (dev < 0)
1270 		return dev;
1271 
1272 	*p = MKDEV(0, dev);
1273 	return 0;
1274 }
1275 EXPORT_SYMBOL(get_anon_bdev);
1276 
1277 void free_anon_bdev(dev_t dev)
1278 {
1279 	ida_free(&unnamed_dev_ida, MINOR(dev));
1280 }
1281 EXPORT_SYMBOL(free_anon_bdev);
1282 
1283 int set_anon_super(struct super_block *s, void *data)
1284 {
1285 	return get_anon_bdev(&s->s_dev);
1286 }
1287 EXPORT_SYMBOL(set_anon_super);
1288 
1289 void kill_anon_super(struct super_block *sb)
1290 {
1291 	dev_t dev = sb->s_dev;
1292 	generic_shutdown_super(sb);
1293 	kill_super_notify(sb);
1294 	free_anon_bdev(dev);
1295 }
1296 EXPORT_SYMBOL(kill_anon_super);
1297 
1298 void kill_litter_super(struct super_block *sb)
1299 {
1300 	if (sb->s_root)
1301 		d_genocide(sb->s_root);
1302 	kill_anon_super(sb);
1303 }
1304 EXPORT_SYMBOL(kill_litter_super);
1305 
1306 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1307 {
1308 	return set_anon_super(sb, NULL);
1309 }
1310 EXPORT_SYMBOL(set_anon_super_fc);
1311 
1312 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1313 {
1314 	return sb->s_fs_info == fc->s_fs_info;
1315 }
1316 
1317 static int test_single_super(struct super_block *s, struct fs_context *fc)
1318 {
1319 	return 1;
1320 }
1321 
1322 static int vfs_get_super(struct fs_context *fc,
1323 		int (*test)(struct super_block *, struct fs_context *),
1324 		int (*fill_super)(struct super_block *sb,
1325 				  struct fs_context *fc))
1326 {
1327 	struct super_block *sb;
1328 	int err;
1329 
1330 	sb = sget_fc(fc, test, set_anon_super_fc);
1331 	if (IS_ERR(sb))
1332 		return PTR_ERR(sb);
1333 
1334 	if (!sb->s_root) {
1335 		err = fill_super(sb, fc);
1336 		if (err)
1337 			goto error;
1338 
1339 		sb->s_flags |= SB_ACTIVE;
1340 	}
1341 
1342 	fc->root = dget(sb->s_root);
1343 	return 0;
1344 
1345 error:
1346 	deactivate_locked_super(sb);
1347 	return err;
1348 }
1349 
1350 int get_tree_nodev(struct fs_context *fc,
1351 		  int (*fill_super)(struct super_block *sb,
1352 				    struct fs_context *fc))
1353 {
1354 	return vfs_get_super(fc, NULL, fill_super);
1355 }
1356 EXPORT_SYMBOL(get_tree_nodev);
1357 
1358 int get_tree_single(struct fs_context *fc,
1359 		  int (*fill_super)(struct super_block *sb,
1360 				    struct fs_context *fc))
1361 {
1362 	return vfs_get_super(fc, test_single_super, fill_super);
1363 }
1364 EXPORT_SYMBOL(get_tree_single);
1365 
1366 int get_tree_keyed(struct fs_context *fc,
1367 		  int (*fill_super)(struct super_block *sb,
1368 				    struct fs_context *fc),
1369 		void *key)
1370 {
1371 	fc->s_fs_info = key;
1372 	return vfs_get_super(fc, test_keyed_super, fill_super);
1373 }
1374 EXPORT_SYMBOL(get_tree_keyed);
1375 
1376 static int set_bdev_super(struct super_block *s, void *data)
1377 {
1378 	s->s_dev = *(dev_t *)data;
1379 	return 0;
1380 }
1381 
1382 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1383 {
1384 	return set_bdev_super(s, fc->sget_key);
1385 }
1386 
1387 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1388 {
1389 	return !(s->s_iflags & SB_I_RETIRED) &&
1390 		s->s_dev == *(dev_t *)fc->sget_key;
1391 }
1392 
1393 /**
1394  * sget_dev - Find or create a superblock by device number
1395  * @fc: Filesystem context.
1396  * @dev: device number
1397  *
1398  * Find or create a superblock using the provided device number that
1399  * will be stored in fc->sget_key.
1400  *
1401  * If an extant superblock is matched, then that will be returned with
1402  * an elevated reference count that the caller must transfer or discard.
1403  *
1404  * If no match is made, a new superblock will be allocated and basic
1405  * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1406  * be set). The superblock will be published and it will be returned in
1407  * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1408  * unset.
1409  *
1410  * Return: an existing or newly created superblock on success, an error
1411  *         pointer on failure.
1412  */
1413 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1414 {
1415 	fc->sget_key = &dev;
1416 	return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1417 }
1418 EXPORT_SYMBOL(sget_dev);
1419 
1420 #ifdef CONFIG_BLOCK
1421 /*
1422  * Lock a super block that the callers holds a reference to.
1423  *
1424  * The caller needs to ensure that the super_block isn't being freed while
1425  * calling this function, e.g. by holding a lock over the call to this function
1426  * and the place that clears the pointer to the superblock used by this function
1427  * before freeing the superblock.
1428  */
1429 static bool super_lock_shared_active(struct super_block *sb)
1430 {
1431 	bool born = super_lock_shared(sb);
1432 
1433 	if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1434 		super_unlock_shared(sb);
1435 		return false;
1436 	}
1437 	return true;
1438 }
1439 
1440 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1441 {
1442 	struct super_block *sb = bdev->bd_holder;
1443 
1444 	/* bd_holder_lock ensures that the sb isn't freed */
1445 	lockdep_assert_held(&bdev->bd_holder_lock);
1446 
1447 	if (!super_lock_shared_active(sb))
1448 		return;
1449 
1450 	if (!surprise)
1451 		sync_filesystem(sb);
1452 	shrink_dcache_sb(sb);
1453 	invalidate_inodes(sb);
1454 	if (sb->s_op->shutdown)
1455 		sb->s_op->shutdown(sb);
1456 
1457 	super_unlock_shared(sb);
1458 }
1459 
1460 static void fs_bdev_sync(struct block_device *bdev)
1461 {
1462 	struct super_block *sb = bdev->bd_holder;
1463 
1464 	lockdep_assert_held(&bdev->bd_holder_lock);
1465 
1466 	if (!super_lock_shared_active(sb))
1467 		return;
1468 	sync_filesystem(sb);
1469 	super_unlock_shared(sb);
1470 }
1471 
1472 const struct blk_holder_ops fs_holder_ops = {
1473 	.mark_dead		= fs_bdev_mark_dead,
1474 	.sync			= fs_bdev_sync,
1475 };
1476 EXPORT_SYMBOL_GPL(fs_holder_ops);
1477 
1478 int setup_bdev_super(struct super_block *sb, int sb_flags,
1479 		struct fs_context *fc)
1480 {
1481 	blk_mode_t mode = sb_open_mode(sb_flags);
1482 	struct block_device *bdev;
1483 
1484 	bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1485 	if (IS_ERR(bdev)) {
1486 		if (fc)
1487 			errorf(fc, "%s: Can't open blockdev", fc->source);
1488 		return PTR_ERR(bdev);
1489 	}
1490 
1491 	/*
1492 	 * This really should be in blkdev_get_by_dev, but right now can't due
1493 	 * to legacy issues that require us to allow opening a block device node
1494 	 * writable from userspace even for a read-only block device.
1495 	 */
1496 	if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1497 		blkdev_put(bdev, sb);
1498 		return -EACCES;
1499 	}
1500 
1501 	/*
1502 	 * Until SB_BORN flag is set, there can be no active superblock
1503 	 * references and thus no filesystem freezing. get_active_super() will
1504 	 * just loop waiting for SB_BORN so even freeze_bdev() cannot proceed.
1505 	 *
1506 	 * It is enough to check bdev was not frozen before we set s_bdev.
1507 	 */
1508 	mutex_lock(&bdev->bd_fsfreeze_mutex);
1509 	if (bdev->bd_fsfreeze_count > 0) {
1510 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
1511 		if (fc)
1512 			warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1513 		blkdev_put(bdev, sb);
1514 		return -EBUSY;
1515 	}
1516 	spin_lock(&sb_lock);
1517 	sb->s_bdev = bdev;
1518 	sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1519 	if (bdev_stable_writes(bdev))
1520 		sb->s_iflags |= SB_I_STABLE_WRITES;
1521 	spin_unlock(&sb_lock);
1522 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
1523 
1524 	snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1525 	shrinker_debugfs_rename(&sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1526 				sb->s_id);
1527 	sb_set_blocksize(sb, block_size(bdev));
1528 	return 0;
1529 }
1530 EXPORT_SYMBOL_GPL(setup_bdev_super);
1531 
1532 /**
1533  * get_tree_bdev - Get a superblock based on a single block device
1534  * @fc: The filesystem context holding the parameters
1535  * @fill_super: Helper to initialise a new superblock
1536  */
1537 int get_tree_bdev(struct fs_context *fc,
1538 		int (*fill_super)(struct super_block *,
1539 				  struct fs_context *))
1540 {
1541 	struct super_block *s;
1542 	int error = 0;
1543 	dev_t dev;
1544 
1545 	if (!fc->source)
1546 		return invalf(fc, "No source specified");
1547 
1548 	error = lookup_bdev(fc->source, &dev);
1549 	if (error) {
1550 		errorf(fc, "%s: Can't lookup blockdev", fc->source);
1551 		return error;
1552 	}
1553 
1554 	fc->sb_flags |= SB_NOSEC;
1555 	s = sget_dev(fc, dev);
1556 	if (IS_ERR(s))
1557 		return PTR_ERR(s);
1558 
1559 	if (s->s_root) {
1560 		/* Don't summarily change the RO/RW state. */
1561 		if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1562 			warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1563 			deactivate_locked_super(s);
1564 			return -EBUSY;
1565 		}
1566 	} else {
1567 		/*
1568 		 * We drop s_umount here because we need to open the bdev and
1569 		 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1570 		 * bdev_mark_dead()). It is safe because we have active sb
1571 		 * reference and SB_BORN is not set yet.
1572 		 */
1573 		super_unlock_excl(s);
1574 		error = setup_bdev_super(s, fc->sb_flags, fc);
1575 		__super_lock_excl(s);
1576 		if (!error)
1577 			error = fill_super(s, fc);
1578 		if (error) {
1579 			deactivate_locked_super(s);
1580 			return error;
1581 		}
1582 		s->s_flags |= SB_ACTIVE;
1583 	}
1584 
1585 	BUG_ON(fc->root);
1586 	fc->root = dget(s->s_root);
1587 	return 0;
1588 }
1589 EXPORT_SYMBOL(get_tree_bdev);
1590 
1591 static int test_bdev_super(struct super_block *s, void *data)
1592 {
1593 	return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1594 }
1595 
1596 struct dentry *mount_bdev(struct file_system_type *fs_type,
1597 	int flags, const char *dev_name, void *data,
1598 	int (*fill_super)(struct super_block *, void *, int))
1599 {
1600 	struct super_block *s;
1601 	int error;
1602 	dev_t dev;
1603 
1604 	error = lookup_bdev(dev_name, &dev);
1605 	if (error)
1606 		return ERR_PTR(error);
1607 
1608 	flags |= SB_NOSEC;
1609 	s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1610 	if (IS_ERR(s))
1611 		return ERR_CAST(s);
1612 
1613 	if (s->s_root) {
1614 		if ((flags ^ s->s_flags) & SB_RDONLY) {
1615 			deactivate_locked_super(s);
1616 			return ERR_PTR(-EBUSY);
1617 		}
1618 	} else {
1619 		/*
1620 		 * We drop s_umount here because we need to open the bdev and
1621 		 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1622 		 * bdev_mark_dead()). It is safe because we have active sb
1623 		 * reference and SB_BORN is not set yet.
1624 		 */
1625 		super_unlock_excl(s);
1626 		error = setup_bdev_super(s, flags, NULL);
1627 		__super_lock_excl(s);
1628 		if (!error)
1629 			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1630 		if (error) {
1631 			deactivate_locked_super(s);
1632 			return ERR_PTR(error);
1633 		}
1634 
1635 		s->s_flags |= SB_ACTIVE;
1636 	}
1637 
1638 	return dget(s->s_root);
1639 }
1640 EXPORT_SYMBOL(mount_bdev);
1641 
1642 void kill_block_super(struct super_block *sb)
1643 {
1644 	struct block_device *bdev = sb->s_bdev;
1645 
1646 	generic_shutdown_super(sb);
1647 	if (bdev) {
1648 		sync_blockdev(bdev);
1649 		blkdev_put(bdev, sb);
1650 	}
1651 }
1652 
1653 EXPORT_SYMBOL(kill_block_super);
1654 #endif
1655 
1656 struct dentry *mount_nodev(struct file_system_type *fs_type,
1657 	int flags, void *data,
1658 	int (*fill_super)(struct super_block *, void *, int))
1659 {
1660 	int error;
1661 	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1662 
1663 	if (IS_ERR(s))
1664 		return ERR_CAST(s);
1665 
1666 	error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1667 	if (error) {
1668 		deactivate_locked_super(s);
1669 		return ERR_PTR(error);
1670 	}
1671 	s->s_flags |= SB_ACTIVE;
1672 	return dget(s->s_root);
1673 }
1674 EXPORT_SYMBOL(mount_nodev);
1675 
1676 int reconfigure_single(struct super_block *s,
1677 		       int flags, void *data)
1678 {
1679 	struct fs_context *fc;
1680 	int ret;
1681 
1682 	/* The caller really need to be passing fc down into mount_single(),
1683 	 * then a chunk of this can be removed.  [Bollocks -- AV]
1684 	 * Better yet, reconfiguration shouldn't happen, but rather the second
1685 	 * mount should be rejected if the parameters are not compatible.
1686 	 */
1687 	fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1688 	if (IS_ERR(fc))
1689 		return PTR_ERR(fc);
1690 
1691 	ret = parse_monolithic_mount_data(fc, data);
1692 	if (ret < 0)
1693 		goto out;
1694 
1695 	ret = reconfigure_super(fc);
1696 out:
1697 	put_fs_context(fc);
1698 	return ret;
1699 }
1700 
1701 static int compare_single(struct super_block *s, void *p)
1702 {
1703 	return 1;
1704 }
1705 
1706 struct dentry *mount_single(struct file_system_type *fs_type,
1707 	int flags, void *data,
1708 	int (*fill_super)(struct super_block *, void *, int))
1709 {
1710 	struct super_block *s;
1711 	int error;
1712 
1713 	s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1714 	if (IS_ERR(s))
1715 		return ERR_CAST(s);
1716 	if (!s->s_root) {
1717 		error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1718 		if (!error)
1719 			s->s_flags |= SB_ACTIVE;
1720 	} else {
1721 		error = reconfigure_single(s, flags, data);
1722 	}
1723 	if (unlikely(error)) {
1724 		deactivate_locked_super(s);
1725 		return ERR_PTR(error);
1726 	}
1727 	return dget(s->s_root);
1728 }
1729 EXPORT_SYMBOL(mount_single);
1730 
1731 /**
1732  * vfs_get_tree - Get the mountable root
1733  * @fc: The superblock configuration context.
1734  *
1735  * The filesystem is invoked to get or create a superblock which can then later
1736  * be used for mounting.  The filesystem places a pointer to the root to be
1737  * used for mounting in @fc->root.
1738  */
1739 int vfs_get_tree(struct fs_context *fc)
1740 {
1741 	struct super_block *sb;
1742 	int error;
1743 
1744 	if (fc->root)
1745 		return -EBUSY;
1746 
1747 	/* Get the mountable root in fc->root, with a ref on the root and a ref
1748 	 * on the superblock.
1749 	 */
1750 	error = fc->ops->get_tree(fc);
1751 	if (error < 0)
1752 		return error;
1753 
1754 	if (!fc->root) {
1755 		pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1756 		       fc->fs_type->name);
1757 		/* We don't know what the locking state of the superblock is -
1758 		 * if there is a superblock.
1759 		 */
1760 		BUG();
1761 	}
1762 
1763 	sb = fc->root->d_sb;
1764 	WARN_ON(!sb->s_bdi);
1765 
1766 	/*
1767 	 * super_wake() contains a memory barrier which also care of
1768 	 * ordering for super_cache_count(). We place it before setting
1769 	 * SB_BORN as the data dependency between the two functions is
1770 	 * the superblock structure contents that we just set up, not
1771 	 * the SB_BORN flag.
1772 	 */
1773 	super_wake(sb, SB_BORN);
1774 
1775 	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1776 	if (unlikely(error)) {
1777 		fc_drop_locked(fc);
1778 		return error;
1779 	}
1780 
1781 	/*
1782 	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1783 	 * but s_maxbytes was an unsigned long long for many releases. Throw
1784 	 * this warning for a little while to try and catch filesystems that
1785 	 * violate this rule.
1786 	 */
1787 	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1788 		"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1789 
1790 	return 0;
1791 }
1792 EXPORT_SYMBOL(vfs_get_tree);
1793 
1794 /*
1795  * Setup private BDI for given superblock. It gets automatically cleaned up
1796  * in generic_shutdown_super().
1797  */
1798 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1799 {
1800 	struct backing_dev_info *bdi;
1801 	int err;
1802 	va_list args;
1803 
1804 	bdi = bdi_alloc(NUMA_NO_NODE);
1805 	if (!bdi)
1806 		return -ENOMEM;
1807 
1808 	va_start(args, fmt);
1809 	err = bdi_register_va(bdi, fmt, args);
1810 	va_end(args);
1811 	if (err) {
1812 		bdi_put(bdi);
1813 		return err;
1814 	}
1815 	WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1816 	sb->s_bdi = bdi;
1817 	sb->s_iflags |= SB_I_PERSB_BDI;
1818 
1819 	return 0;
1820 }
1821 EXPORT_SYMBOL(super_setup_bdi_name);
1822 
1823 /*
1824  * Setup private BDI for given superblock. I gets automatically cleaned up
1825  * in generic_shutdown_super().
1826  */
1827 int super_setup_bdi(struct super_block *sb)
1828 {
1829 	static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1830 
1831 	return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1832 				    atomic_long_inc_return(&bdi_seq));
1833 }
1834 EXPORT_SYMBOL(super_setup_bdi);
1835 
1836 /**
1837  * sb_wait_write - wait until all writers to given file system finish
1838  * @sb: the super for which we wait
1839  * @level: type of writers we wait for (normal vs page fault)
1840  *
1841  * This function waits until there are no writers of given type to given file
1842  * system.
1843  */
1844 static void sb_wait_write(struct super_block *sb, int level)
1845 {
1846 	percpu_down_write(sb->s_writers.rw_sem + level-1);
1847 }
1848 
1849 /*
1850  * We are going to return to userspace and forget about these locks, the
1851  * ownership goes to the caller of thaw_super() which does unlock().
1852  */
1853 static void lockdep_sb_freeze_release(struct super_block *sb)
1854 {
1855 	int level;
1856 
1857 	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1858 		percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1859 }
1860 
1861 /*
1862  * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1863  */
1864 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1865 {
1866 	int level;
1867 
1868 	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1869 		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1870 }
1871 
1872 static void sb_freeze_unlock(struct super_block *sb, int level)
1873 {
1874 	for (level--; level >= 0; level--)
1875 		percpu_up_write(sb->s_writers.rw_sem + level);
1876 }
1877 
1878 static int wait_for_partially_frozen(struct super_block *sb)
1879 {
1880 	int ret = 0;
1881 
1882 	do {
1883 		unsigned short old = sb->s_writers.frozen;
1884 
1885 		up_write(&sb->s_umount);
1886 		ret = wait_var_event_killable(&sb->s_writers.frozen,
1887 					       sb->s_writers.frozen != old);
1888 		down_write(&sb->s_umount);
1889 	} while (ret == 0 &&
1890 		 sb->s_writers.frozen != SB_UNFROZEN &&
1891 		 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1892 
1893 	return ret;
1894 }
1895 
1896 /**
1897  * freeze_super - lock the filesystem and force it into a consistent state
1898  * @sb: the super to lock
1899  * @who: context that wants to freeze
1900  *
1901  * Syncs the super to make sure the filesystem is consistent and calls the fs's
1902  * freeze_fs.  Subsequent calls to this without first thawing the fs may return
1903  * -EBUSY.
1904  *
1905  * @who should be:
1906  * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
1907  * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
1908  *
1909  * The @who argument distinguishes between the kernel and userspace trying to
1910  * freeze the filesystem.  Although there cannot be multiple kernel freezes or
1911  * multiple userspace freezes in effect at any given time, the kernel and
1912  * userspace can both hold a filesystem frozen.  The filesystem remains frozen
1913  * until there are no kernel or userspace freezes in effect.
1914  *
1915  * During this function, sb->s_writers.frozen goes through these values:
1916  *
1917  * SB_UNFROZEN: File system is normal, all writes progress as usual.
1918  *
1919  * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
1920  * writes should be blocked, though page faults are still allowed. We wait for
1921  * all writes to complete and then proceed to the next stage.
1922  *
1923  * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1924  * but internal fs threads can still modify the filesystem (although they
1925  * should not dirty new pages or inodes), writeback can run etc. After waiting
1926  * for all running page faults we sync the filesystem which will clean all
1927  * dirty pages and inodes (no new dirty pages or inodes can be created when
1928  * sync is running).
1929  *
1930  * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1931  * modification are blocked (e.g. XFS preallocation truncation on inode
1932  * reclaim). This is usually implemented by blocking new transactions for
1933  * filesystems that have them and need this additional guard. After all
1934  * internal writers are finished we call ->freeze_fs() to finish filesystem
1935  * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1936  * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1937  *
1938  * sb->s_writers.frozen is protected by sb->s_umount.
1939  */
1940 int freeze_super(struct super_block *sb, enum freeze_holder who)
1941 {
1942 	int ret;
1943 
1944 	atomic_inc(&sb->s_active);
1945 	if (!super_lock_excl(sb))
1946 		WARN(1, "Dying superblock while freezing!");
1947 
1948 retry:
1949 	if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
1950 		if (sb->s_writers.freeze_holders & who) {
1951 			deactivate_locked_super(sb);
1952 			return -EBUSY;
1953 		}
1954 
1955 		WARN_ON(sb->s_writers.freeze_holders == 0);
1956 
1957 		/*
1958 		 * Someone else already holds this type of freeze; share the
1959 		 * freeze and assign the active ref to the freeze.
1960 		 */
1961 		sb->s_writers.freeze_holders |= who;
1962 		super_unlock_excl(sb);
1963 		return 0;
1964 	}
1965 
1966 	if (sb->s_writers.frozen != SB_UNFROZEN) {
1967 		ret = wait_for_partially_frozen(sb);
1968 		if (ret) {
1969 			deactivate_locked_super(sb);
1970 			return ret;
1971 		}
1972 
1973 		goto retry;
1974 	}
1975 
1976 	if (!(sb->s_flags & SB_BORN)) {
1977 		super_unlock_excl(sb);
1978 		return 0;	/* sic - it's "nothing to do" */
1979 	}
1980 
1981 	if (sb_rdonly(sb)) {
1982 		/* Nothing to do really... */
1983 		sb->s_writers.freeze_holders |= who;
1984 		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1985 		wake_up_var(&sb->s_writers.frozen);
1986 		super_unlock_excl(sb);
1987 		return 0;
1988 	}
1989 
1990 	sb->s_writers.frozen = SB_FREEZE_WRITE;
1991 	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
1992 	super_unlock_excl(sb);
1993 	sb_wait_write(sb, SB_FREEZE_WRITE);
1994 	if (!super_lock_excl(sb))
1995 		WARN(1, "Dying superblock while freezing!");
1996 
1997 	/* Now we go and block page faults... */
1998 	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1999 	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2000 
2001 	/* All writers are done so after syncing there won't be dirty data */
2002 	ret = sync_filesystem(sb);
2003 	if (ret) {
2004 		sb->s_writers.frozen = SB_UNFROZEN;
2005 		sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2006 		wake_up_var(&sb->s_writers.frozen);
2007 		deactivate_locked_super(sb);
2008 		return ret;
2009 	}
2010 
2011 	/* Now wait for internal filesystem counter */
2012 	sb->s_writers.frozen = SB_FREEZE_FS;
2013 	sb_wait_write(sb, SB_FREEZE_FS);
2014 
2015 	if (sb->s_op->freeze_fs) {
2016 		ret = sb->s_op->freeze_fs(sb);
2017 		if (ret) {
2018 			printk(KERN_ERR
2019 				"VFS:Filesystem freeze failed\n");
2020 			sb->s_writers.frozen = SB_UNFROZEN;
2021 			sb_freeze_unlock(sb, SB_FREEZE_FS);
2022 			wake_up_var(&sb->s_writers.frozen);
2023 			deactivate_locked_super(sb);
2024 			return ret;
2025 		}
2026 	}
2027 	/*
2028 	 * For debugging purposes so that fs can warn if it sees write activity
2029 	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2030 	 */
2031 	sb->s_writers.freeze_holders |= who;
2032 	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2033 	wake_up_var(&sb->s_writers.frozen);
2034 	lockdep_sb_freeze_release(sb);
2035 	super_unlock_excl(sb);
2036 	return 0;
2037 }
2038 EXPORT_SYMBOL(freeze_super);
2039 
2040 /*
2041  * Undoes the effect of a freeze_super_locked call.  If the filesystem is
2042  * frozen both by userspace and the kernel, a thaw call from either source
2043  * removes that state without releasing the other state or unlocking the
2044  * filesystem.
2045  */
2046 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2047 {
2048 	int error;
2049 
2050 	if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2051 		if (!(sb->s_writers.freeze_holders & who)) {
2052 			super_unlock_excl(sb);
2053 			return -EINVAL;
2054 		}
2055 
2056 		/*
2057 		 * Freeze is shared with someone else.  Release our hold and
2058 		 * drop the active ref that freeze_super assigned to the
2059 		 * freezer.
2060 		 */
2061 		if (sb->s_writers.freeze_holders & ~who) {
2062 			sb->s_writers.freeze_holders &= ~who;
2063 			deactivate_locked_super(sb);
2064 			return 0;
2065 		}
2066 	} else {
2067 		super_unlock_excl(sb);
2068 		return -EINVAL;
2069 	}
2070 
2071 	if (sb_rdonly(sb)) {
2072 		sb->s_writers.freeze_holders &= ~who;
2073 		sb->s_writers.frozen = SB_UNFROZEN;
2074 		wake_up_var(&sb->s_writers.frozen);
2075 		goto out;
2076 	}
2077 
2078 	lockdep_sb_freeze_acquire(sb);
2079 
2080 	if (sb->s_op->unfreeze_fs) {
2081 		error = sb->s_op->unfreeze_fs(sb);
2082 		if (error) {
2083 			printk(KERN_ERR "VFS:Filesystem thaw failed\n");
2084 			lockdep_sb_freeze_release(sb);
2085 			super_unlock_excl(sb);
2086 			return error;
2087 		}
2088 	}
2089 
2090 	sb->s_writers.freeze_holders &= ~who;
2091 	sb->s_writers.frozen = SB_UNFROZEN;
2092 	wake_up_var(&sb->s_writers.frozen);
2093 	sb_freeze_unlock(sb, SB_FREEZE_FS);
2094 out:
2095 	deactivate_locked_super(sb);
2096 	return 0;
2097 }
2098 
2099 /**
2100  * thaw_super -- unlock filesystem
2101  * @sb: the super to thaw
2102  * @who: context that wants to freeze
2103  *
2104  * Unlocks the filesystem and marks it writeable again after freeze_super()
2105  * if there are no remaining freezes on the filesystem.
2106  *
2107  * @who should be:
2108  * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2109  * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2110  */
2111 int thaw_super(struct super_block *sb, enum freeze_holder who)
2112 {
2113 	if (!super_lock_excl(sb))
2114 		WARN(1, "Dying superblock while thawing!");
2115 	return thaw_super_locked(sb, who);
2116 }
2117 EXPORT_SYMBOL(thaw_super);
2118 
2119 /*
2120  * Create workqueue for deferred direct IO completions. We allocate the
2121  * workqueue when it's first needed. This avoids creating workqueue for
2122  * filesystems that don't need it and also allows us to create the workqueue
2123  * late enough so the we can include s_id in the name of the workqueue.
2124  */
2125 int sb_init_dio_done_wq(struct super_block *sb)
2126 {
2127 	struct workqueue_struct *old;
2128 	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2129 						      WQ_MEM_RECLAIM, 0,
2130 						      sb->s_id);
2131 	if (!wq)
2132 		return -ENOMEM;
2133 	/*
2134 	 * This has to be atomic as more DIOs can race to create the workqueue
2135 	 */
2136 	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2137 	/* Someone created workqueue before us? Free ours... */
2138 	if (old)
2139 		destroy_workqueue(wq);
2140 	return 0;
2141 }
2142