159bd9dedSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
207b20889SRam Pai /*
307b20889SRam Pai * linux/fs/pnode.c
407b20889SRam Pai *
507b20889SRam Pai * (C) Copyright IBM Corporation 2005.
607b20889SRam Pai * Author : Ram Pai (linuxram@us.ibm.com)
707b20889SRam Pai */
86b3286edSKirill Korotaev #include <linux/mnt_namespace.h>
907b20889SRam Pai #include <linux/mount.h>
1007b20889SRam Pai #include <linux/fs.h>
11132c94e3SEric W. Biederman #include <linux/nsproxy.h>
12e262e32dSDavid Howells #include <uapi/linux/mount.h>
136d59e7f5SAl Viro #include "internal.h"
1407b20889SRam Pai #include "pnode.h"
1507b20889SRam Pai
1603e06e68SRam Pai /* return the next shared peer mount of @p */
next_peer(struct mount * p)17c937135dSAl Viro static inline struct mount *next_peer(struct mount *p)
1803e06e68SRam Pai {
196776db3dSAl Viro return list_entry(p->mnt_share.next, struct mount, mnt_share);
2003e06e68SRam Pai }
2103e06e68SRam Pai
first_slave(struct mount * p)22c937135dSAl Viro static inline struct mount *first_slave(struct mount *p)
235afe0022SRam Pai {
246776db3dSAl Viro return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
255afe0022SRam Pai }
265afe0022SRam Pai
last_slave(struct mount * p)27296990deSEric W. Biederman static inline struct mount *last_slave(struct mount *p)
28296990deSEric W. Biederman {
29296990deSEric W. Biederman return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
30296990deSEric W. Biederman }
31296990deSEric W. Biederman
next_slave(struct mount * p)32c937135dSAl Viro static inline struct mount *next_slave(struct mount *p)
335afe0022SRam Pai {
346776db3dSAl Viro return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
355afe0022SRam Pai }
365afe0022SRam Pai
get_peer_under_root(struct mount * mnt,struct mnt_namespace * ns,const struct path * root)376fc7871fSAl Viro static struct mount *get_peer_under_root(struct mount *mnt,
3897e7e0f7SMiklos Szeredi struct mnt_namespace *ns,
3997e7e0f7SMiklos Szeredi const struct path *root)
4097e7e0f7SMiklos Szeredi {
416fc7871fSAl Viro struct mount *m = mnt;
4297e7e0f7SMiklos Szeredi
4397e7e0f7SMiklos Szeredi do {
4497e7e0f7SMiklos Szeredi /* Check the namespace first for optimization */
45143c8c91SAl Viro if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
466fc7871fSAl Viro return m;
4797e7e0f7SMiklos Szeredi
48c937135dSAl Viro m = next_peer(m);
496fc7871fSAl Viro } while (m != mnt);
5097e7e0f7SMiklos Szeredi
5197e7e0f7SMiklos Szeredi return NULL;
5297e7e0f7SMiklos Szeredi }
5397e7e0f7SMiklos Szeredi
5497e7e0f7SMiklos Szeredi /*
5597e7e0f7SMiklos Szeredi * Get ID of closest dominating peer group having a representative
5697e7e0f7SMiklos Szeredi * under the given root.
5797e7e0f7SMiklos Szeredi *
5897e7e0f7SMiklos Szeredi * Caller must hold namespace_sem
5997e7e0f7SMiklos Szeredi */
get_dominating_id(struct mount * mnt,const struct path * root)606fc7871fSAl Viro int get_dominating_id(struct mount *mnt, const struct path *root)
6197e7e0f7SMiklos Szeredi {
626fc7871fSAl Viro struct mount *m;
6397e7e0f7SMiklos Szeredi
6432301920SAl Viro for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
65143c8c91SAl Viro struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
6697e7e0f7SMiklos Szeredi if (d)
6715169fe7SAl Viro return d->mnt_group_id;
6897e7e0f7SMiklos Szeredi }
6997e7e0f7SMiklos Szeredi
7097e7e0f7SMiklos Szeredi return 0;
7197e7e0f7SMiklos Szeredi }
7297e7e0f7SMiklos Szeredi
do_make_slave(struct mount * mnt)736fc7871fSAl Viro static int do_make_slave(struct mount *mnt)
74a58b0eb8SRam Pai {
755235d448SAl Viro struct mount *master, *slave_mnt;
76a58b0eb8SRam Pai
775235d448SAl Viro if (list_empty(&mnt->mnt_share)) {
785235d448SAl Viro if (IS_MNT_SHARED(mnt)) {
796fc7871fSAl Viro mnt_release_group_id(mnt);
805235d448SAl Viro CLEAR_MNT_SHARED(mnt);
815235d448SAl Viro }
825235d448SAl Viro master = mnt->mnt_master;
835235d448SAl Viro if (!master) {
846776db3dSAl Viro struct list_head *p = &mnt->mnt_slave_list;
85a58b0eb8SRam Pai while (!list_empty(p)) {
86b5e61818SPavel Emelianov slave_mnt = list_first_entry(p,
876776db3dSAl Viro struct mount, mnt_slave);
886776db3dSAl Viro list_del_init(&slave_mnt->mnt_slave);
89a58b0eb8SRam Pai slave_mnt->mnt_master = NULL;
90a58b0eb8SRam Pai }
915235d448SAl Viro return 0;
92a58b0eb8SRam Pai }
935235d448SAl Viro } else {
945235d448SAl Viro struct mount *m;
955235d448SAl Viro /*
965235d448SAl Viro * slave 'mnt' to a peer mount that has the
975235d448SAl Viro * same root dentry. If none is available then
985235d448SAl Viro * slave it to anything that is available.
995235d448SAl Viro */
1005235d448SAl Viro for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
1015235d448SAl Viro if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
1025235d448SAl Viro master = m;
1035235d448SAl Viro break;
1045235d448SAl Viro }
1055235d448SAl Viro }
1065235d448SAl Viro list_del_init(&mnt->mnt_share);
1075235d448SAl Viro mnt->mnt_group_id = 0;
108fc7be130SAl Viro CLEAR_MNT_SHARED(mnt);
1095235d448SAl Viro }
1105235d448SAl Viro list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
1115235d448SAl Viro slave_mnt->mnt_master = master;
1125235d448SAl Viro list_move(&mnt->mnt_slave, &master->mnt_slave_list);
1135235d448SAl Viro list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
1145235d448SAl Viro INIT_LIST_HEAD(&mnt->mnt_slave_list);
1155235d448SAl Viro mnt->mnt_master = master;
116a58b0eb8SRam Pai return 0;
117a58b0eb8SRam Pai }
118a58b0eb8SRam Pai
11999b7db7bSNick Piggin /*
12099b7db7bSNick Piggin * vfsmount lock must be held for write
12199b7db7bSNick Piggin */
change_mnt_propagation(struct mount * mnt,int type)1220f0afb1dSAl Viro void change_mnt_propagation(struct mount *mnt, int type)
12307b20889SRam Pai {
12403e06e68SRam Pai if (type == MS_SHARED) {
125b90fa9aeSRam Pai set_mnt_shared(mnt);
126a58b0eb8SRam Pai return;
127a58b0eb8SRam Pai }
1286fc7871fSAl Viro do_make_slave(mnt);
129a58b0eb8SRam Pai if (type != MS_SLAVE) {
1306776db3dSAl Viro list_del_init(&mnt->mnt_slave);
131d10e8defSAl Viro mnt->mnt_master = NULL;
1329676f0c6SRam Pai if (type == MS_UNBINDABLE)
1330f0afb1dSAl Viro mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
1340b03cfb2SAndries E. Brouwer else
1350f0afb1dSAl Viro mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
13607b20889SRam Pai }
13703e06e68SRam Pai }
138b90fa9aeSRam Pai
139b90fa9aeSRam Pai /*
140b90fa9aeSRam Pai * get the next mount in the propagation tree.
141b90fa9aeSRam Pai * @m: the mount seen last
142b90fa9aeSRam Pai * @origin: the original mount from where the tree walk initiated
143796a6b52SAl Viro *
144796a6b52SAl Viro * Note that peer groups form contiguous segments of slave lists.
145796a6b52SAl Viro * We rely on that in get_source() to be able to find out if
146796a6b52SAl Viro * vfsmount found while iterating with propagation_next() is
147796a6b52SAl Viro * a peer of one we'd found earlier.
148b90fa9aeSRam Pai */
propagation_next(struct mount * m,struct mount * origin)149c937135dSAl Viro static struct mount *propagation_next(struct mount *m,
150c937135dSAl Viro struct mount *origin)
151b90fa9aeSRam Pai {
1525afe0022SRam Pai /* are there any slaves of this mount? */
153143c8c91SAl Viro if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
1545afe0022SRam Pai return first_slave(m);
1555afe0022SRam Pai
1565afe0022SRam Pai while (1) {
15732301920SAl Viro struct mount *master = m->mnt_master;
1585afe0022SRam Pai
15932301920SAl Viro if (master == origin->mnt_master) {
160c937135dSAl Viro struct mount *next = next_peer(m);
161c937135dSAl Viro return (next == origin) ? NULL : next;
1626776db3dSAl Viro } else if (m->mnt_slave.next != &master->mnt_slave_list)
1635afe0022SRam Pai return next_slave(m);
1645afe0022SRam Pai
1655afe0022SRam Pai /* back at master */
1665afe0022SRam Pai m = master;
1675afe0022SRam Pai }
1685afe0022SRam Pai }
1695afe0022SRam Pai
skip_propagation_subtree(struct mount * m,struct mount * origin)170296990deSEric W. Biederman static struct mount *skip_propagation_subtree(struct mount *m,
171296990deSEric W. Biederman struct mount *origin)
172296990deSEric W. Biederman {
173296990deSEric W. Biederman /*
174296990deSEric W. Biederman * Advance m such that propagation_next will not return
175296990deSEric W. Biederman * the slaves of m.
176296990deSEric W. Biederman */
177296990deSEric W. Biederman if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
178296990deSEric W. Biederman m = last_slave(m);
179296990deSEric W. Biederman
180296990deSEric W. Biederman return m;
181296990deSEric W. Biederman }
182296990deSEric W. Biederman
next_group(struct mount * m,struct mount * origin)183f2ebb3a9SAl Viro static struct mount *next_group(struct mount *m, struct mount *origin)
1845afe0022SRam Pai {
185f2ebb3a9SAl Viro while (1) {
186f2ebb3a9SAl Viro while (1) {
187f2ebb3a9SAl Viro struct mount *next;
188f2ebb3a9SAl Viro if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
189f2ebb3a9SAl Viro return first_slave(m);
190f2ebb3a9SAl Viro next = next_peer(m);
191f2ebb3a9SAl Viro if (m->mnt_group_id == origin->mnt_group_id) {
192f2ebb3a9SAl Viro if (next == origin)
193f2ebb3a9SAl Viro return NULL;
194f2ebb3a9SAl Viro } else if (m->mnt_slave.next != &next->mnt_slave)
195f2ebb3a9SAl Viro break;
196f2ebb3a9SAl Viro m = next;
197f2ebb3a9SAl Viro }
198f2ebb3a9SAl Viro /* m is the last peer */
199f2ebb3a9SAl Viro while (1) {
200f2ebb3a9SAl Viro struct mount *master = m->mnt_master;
201f2ebb3a9SAl Viro if (m->mnt_slave.next != &master->mnt_slave_list)
202f2ebb3a9SAl Viro return next_slave(m);
203f2ebb3a9SAl Viro m = next_peer(master);
204f2ebb3a9SAl Viro if (master->mnt_group_id == origin->mnt_group_id)
205f2ebb3a9SAl Viro break;
206f2ebb3a9SAl Viro if (master->mnt_slave.next == &m->mnt_slave)
207f2ebb3a9SAl Viro break;
208f2ebb3a9SAl Viro m = master;
209f2ebb3a9SAl Viro }
210f2ebb3a9SAl Viro if (m == origin)
211f2ebb3a9SAl Viro return NULL;
212f2ebb3a9SAl Viro }
2135afe0022SRam Pai }
2145afe0022SRam Pai
215f2ebb3a9SAl Viro /* all accesses are serialized by namespace_sem */
2165ec0811dSEric W. Biederman static struct mount *last_dest, *first_source, *last_source, *dest_master;
217f2ebb3a9SAl Viro static struct hlist_head *list;
218f2ebb3a9SAl Viro
peers(const struct mount * m1,const struct mount * m2)2197ae8fd03SMaxim Patlasov static inline bool peers(const struct mount *m1, const struct mount *m2)
2207ae8fd03SMaxim Patlasov {
2217ae8fd03SMaxim Patlasov return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
2227ae8fd03SMaxim Patlasov }
2237ae8fd03SMaxim Patlasov
propagate_one(struct mount * m,struct mountpoint * dest_mp)224*4ea2a8d8SChristian Brauner static int propagate_one(struct mount *m, struct mountpoint *dest_mp)
225f2ebb3a9SAl Viro {
226f2ebb3a9SAl Viro struct mount *child;
227f2ebb3a9SAl Viro int type;
228f2ebb3a9SAl Viro /* skip ones added by this propagate_mnt() */
229f2ebb3a9SAl Viro if (IS_MNT_NEW(m))
230f2ebb3a9SAl Viro return 0;
231f2ebb3a9SAl Viro /* skip if mountpoint isn't covered by it */
232*4ea2a8d8SChristian Brauner if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
233f2ebb3a9SAl Viro return 0;
2347ae8fd03SMaxim Patlasov if (peers(m, last_dest)) {
235f2ebb3a9SAl Viro type = CL_MAKE_SHARED;
236f2ebb3a9SAl Viro } else {
237f2ebb3a9SAl Viro struct mount *n, *p;
2385ec0811dSEric W. Biederman bool done;
239f2ebb3a9SAl Viro for (n = m; ; n = p) {
240f2ebb3a9SAl Viro p = n->mnt_master;
2415ec0811dSEric W. Biederman if (p == dest_master || IS_MNT_MARKED(p))
242f2ebb3a9SAl Viro break;
243b90fa9aeSRam Pai }
2445ec0811dSEric W. Biederman do {
2455ec0811dSEric W. Biederman struct mount *parent = last_source->mnt_parent;
24611933cf1SChristian Brauner if (peers(last_source, first_source))
2475ec0811dSEric W. Biederman break;
2485ec0811dSEric W. Biederman done = parent->mnt_master == p;
2495ec0811dSEric W. Biederman if (done && peers(n, parent))
2505ec0811dSEric W. Biederman break;
2515ec0811dSEric W. Biederman last_source = last_source->mnt_master;
2525ec0811dSEric W. Biederman } while (!done);
2535ec0811dSEric W. Biederman
254f2ebb3a9SAl Viro type = CL_SLAVE;
255796a6b52SAl Viro /* beginning of peer group among the slaves? */
256f2ebb3a9SAl Viro if (IS_MNT_SHARED(m))
257f2ebb3a9SAl Viro type |= CL_MAKE_SHARED;
258f2ebb3a9SAl Viro }
259f2ebb3a9SAl Viro
260f2ebb3a9SAl Viro child = copy_tree(last_source, last_source->mnt.mnt_root, type);
261f2ebb3a9SAl Viro if (IS_ERR(child))
262f2ebb3a9SAl Viro return PTR_ERR(child);
263f2ebb3a9SAl Viro read_seqlock_excl(&mount_lock);
264*4ea2a8d8SChristian Brauner mnt_set_mountpoint(m, dest_mp, child);
265b0d3869cSAl Viro if (m->mnt_master != dest_master)
266f2ebb3a9SAl Viro SET_MNT_MARK(m->mnt_master);
267f2ebb3a9SAl Viro read_sequnlock_excl(&mount_lock);
268b0d3869cSAl Viro last_dest = m;
269b0d3869cSAl Viro last_source = child;
270f2ebb3a9SAl Viro hlist_add_head(&child->mnt_hash, list);
271d2921684SEric W. Biederman return count_mounts(m->mnt_ns, child);
272796a6b52SAl Viro }
273b90fa9aeSRam Pai
274b90fa9aeSRam Pai /*
275b90fa9aeSRam Pai * mount 'source_mnt' under the destination 'dest_mnt' at
276b90fa9aeSRam Pai * dentry 'dest_dentry'. And propagate that mount to
277b90fa9aeSRam Pai * all the peer and slave mounts of 'dest_mnt'.
278b90fa9aeSRam Pai * Link all the new mounts into a propagation tree headed at
279b90fa9aeSRam Pai * source_mnt. Also link all the new mounts using ->mnt_list
280b90fa9aeSRam Pai * headed at source_mnt's ->mnt_list
281b90fa9aeSRam Pai *
282b90fa9aeSRam Pai * @dest_mnt: destination mount.
283b90fa9aeSRam Pai * @dest_dentry: destination dentry.
284b90fa9aeSRam Pai * @source_mnt: source mount.
285b90fa9aeSRam Pai * @tree_list : list of heads of trees to be attached.
286b90fa9aeSRam Pai */
propagate_mnt(struct mount * dest_mnt,struct mountpoint * dest_mp,struct mount * source_mnt,struct hlist_head * tree_list)28784d17192SAl Viro int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
28838129a13SAl Viro struct mount *source_mnt, struct hlist_head *tree_list)
289b90fa9aeSRam Pai {
290f2ebb3a9SAl Viro struct mount *m, *n;
291b90fa9aeSRam Pai int ret = 0;
292b90fa9aeSRam Pai
293f2ebb3a9SAl Viro /*
294f2ebb3a9SAl Viro * we don't want to bother passing tons of arguments to
295f2ebb3a9SAl Viro * propagate_one(); everything is serialized by namespace_sem,
296f2ebb3a9SAl Viro * so globals will do just fine.
297f2ebb3a9SAl Viro */
298f2ebb3a9SAl Viro last_dest = dest_mnt;
2995ec0811dSEric W. Biederman first_source = source_mnt;
300f2ebb3a9SAl Viro last_source = source_mnt;
301f2ebb3a9SAl Viro list = tree_list;
302f2ebb3a9SAl Viro dest_master = dest_mnt->mnt_master;
303b90fa9aeSRam Pai
304f2ebb3a9SAl Viro /* all peers of dest_mnt, except dest_mnt itself */
305f2ebb3a9SAl Viro for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
306*4ea2a8d8SChristian Brauner ret = propagate_one(n, dest_mp);
307f2ebb3a9SAl Viro if (ret)
308b90fa9aeSRam Pai goto out;
309b90fa9aeSRam Pai }
310b90fa9aeSRam Pai
311f2ebb3a9SAl Viro /* all slave groups */
312f2ebb3a9SAl Viro for (m = next_group(dest_mnt, dest_mnt); m;
313f2ebb3a9SAl Viro m = next_group(m, dest_mnt)) {
314f2ebb3a9SAl Viro /* everything in that slave group */
315f2ebb3a9SAl Viro n = m;
316f2ebb3a9SAl Viro do {
317*4ea2a8d8SChristian Brauner ret = propagate_one(n, dest_mp);
318f2ebb3a9SAl Viro if (ret)
319f2ebb3a9SAl Viro goto out;
320f2ebb3a9SAl Viro n = next_peer(n);
321f2ebb3a9SAl Viro } while (n != m);
322b90fa9aeSRam Pai }
323b90fa9aeSRam Pai out:
324f2ebb3a9SAl Viro read_seqlock_excl(&mount_lock);
325f2ebb3a9SAl Viro hlist_for_each_entry(n, tree_list, mnt_hash) {
326f2ebb3a9SAl Viro m = n->mnt_parent;
327f2ebb3a9SAl Viro if (m->mnt_master != dest_mnt->mnt_master)
328f2ebb3a9SAl Viro CLEAR_MNT_MARK(m->mnt_master);
329b90fa9aeSRam Pai }
330f2ebb3a9SAl Viro read_sequnlock_excl(&mount_lock);
331b90fa9aeSRam Pai return ret;
332b90fa9aeSRam Pai }
333a05964f3SRam Pai
find_topper(struct mount * mnt)3341064f874SEric W. Biederman static struct mount *find_topper(struct mount *mnt)
3351064f874SEric W. Biederman {
3361064f874SEric W. Biederman /* If there is exactly one mount covering mnt completely return it. */
3371064f874SEric W. Biederman struct mount *child;
3381064f874SEric W. Biederman
3391064f874SEric W. Biederman if (!list_is_singular(&mnt->mnt_mounts))
3401064f874SEric W. Biederman return NULL;
3411064f874SEric W. Biederman
3421064f874SEric W. Biederman child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
3431064f874SEric W. Biederman if (child->mnt_mountpoint != mnt->mnt.mnt_root)
3441064f874SEric W. Biederman return NULL;
3451064f874SEric W. Biederman
3461064f874SEric W. Biederman return child;
3471064f874SEric W. Biederman }
3481064f874SEric W. Biederman
349a05964f3SRam Pai /*
350a05964f3SRam Pai * return true if the refcount is greater than count
351a05964f3SRam Pai */
do_refcount_check(struct mount * mnt,int count)3521ab59738SAl Viro static inline int do_refcount_check(struct mount *mnt, int count)
353a05964f3SRam Pai {
354aba809cfSAl Viro return mnt_get_count(mnt) > count;
355a05964f3SRam Pai }
356a05964f3SRam Pai
357a05964f3SRam Pai /**
358a05964f3SRam Pai * propagation_would_overmount - check whether propagation from @from
359a05964f3SRam Pai * would overmount @to
360a05964f3SRam Pai * @from: shared mount
361a05964f3SRam Pai * @to: mount to check
362a05964f3SRam Pai * @mp: future mountpoint of @to on @from
363a05964f3SRam Pai *
36499b7db7bSNick Piggin * If @from propagates mounts to @to, @from and @to must either be peers
365b3e19d92SNick Piggin * or one of the masters in the hierarchy of masters of @to must be a
366a05964f3SRam Pai * peer of @from.
3671ab59738SAl Viro *
368a05964f3SRam Pai * If the root of the @to mount is equal to the future mountpoint @mp of
3691064f874SEric W. Biederman * the @to mount on @from then @to will be overmounted by whatever is
3700714a533SAl Viro * propagated to it.
371a05964f3SRam Pai *
3720714a533SAl Viro * Context: This function expects namespace_lock() to be held and that
373a05964f3SRam Pai * @mp is stable.
374a05964f3SRam Pai * Return: If @from overmounts @to, true is returned, false if not.
375a05964f3SRam Pai */
propagation_would_overmount(const struct mount * from,const struct mount * to,const struct mountpoint * mp)376a05964f3SRam Pai bool propagation_would_overmount(const struct mount *from,
377a05964f3SRam Pai const struct mount *to,
378a05964f3SRam Pai const struct mountpoint *mp)
379a05964f3SRam Pai {
3806b41d536SAl Viro if (!IS_MNT_SHARED(from))
381a05964f3SRam Pai return false;
382a05964f3SRam Pai
383c937135dSAl Viro if (IS_MNT_NEW(to))
384c937135dSAl Viro return false;
3851064f874SEric W. Biederman
3861064f874SEric W. Biederman if (to->mnt.mnt_root != mp->m_dentry)
3871064f874SEric W. Biederman return false;
3881064f874SEric W. Biederman
3891064f874SEric W. Biederman for (const struct mount *m = to; m; m = m->mnt_master) {
3901064f874SEric W. Biederman if (peers(from, m))
3911064f874SEric W. Biederman return true;
3921064f874SEric W. Biederman }
3931064f874SEric W. Biederman
3941064f874SEric W. Biederman return false;
3951064f874SEric W. Biederman }
3961064f874SEric W. Biederman
3971064f874SEric W. Biederman /*
3981064f874SEric W. Biederman * check if the mount 'mnt' can be unmounted successfully.
3991064f874SEric W. Biederman * @mnt: the mount to be checked for unmount
4001064f874SEric W. Biederman * NOTE: unmounting 'mnt' would naturally propagate to all
401a05964f3SRam Pai * other mounts its parent propagates to.
4021064f874SEric W. Biederman * Check if any of these mounts that **do not have submounts**
403a05964f3SRam Pai * have more references than 'refcnt'. If so return busy.
404a05964f3SRam Pai *
405a05964f3SRam Pai * vfsmount lock must be held for write
4065d88457eSEric W. Biederman */
propagate_mount_busy(struct mount * mnt,int refcnt)4075d88457eSEric W. Biederman int propagate_mount_busy(struct mount *mnt, int refcnt)
4085d88457eSEric W. Biederman {
4095d88457eSEric W. Biederman struct mount *m, *child, *topper;
4105d88457eSEric W. Biederman struct mount *parent = mnt->mnt_parent;
4115d88457eSEric W. Biederman
4125d88457eSEric W. Biederman if (mnt == parent)
4135d88457eSEric W. Biederman return do_refcount_check(mnt, refcnt);
4145d88457eSEric W. Biederman
4155d88457eSEric W. Biederman /*
4165d88457eSEric W. Biederman * quickly check if the current mount can be unmounted.
4175d88457eSEric W. Biederman * If not, we don't have to go checking for all other
4185d88457eSEric W. Biederman * mounts
4191064f874SEric W. Biederman */
4205d88457eSEric W. Biederman if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
4215d88457eSEric W. Biederman return 1;
4225d88457eSEric W. Biederman
4235d88457eSEric W. Biederman for (m = propagation_next(parent, parent); m;
4245d88457eSEric W. Biederman m = propagation_next(m, parent)) {
42599b19d16SEric W. Biederman int count = 1;
4260c56fe31SEric W. Biederman child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
42799b19d16SEric W. Biederman if (!child)
42899b19d16SEric W. Biederman continue;
42999b19d16SEric W. Biederman
43099b19d16SEric W. Biederman /* Is there exactly one mount on the child that covers
43199b19d16SEric W. Biederman * it completely whose reference should be ignored?
4320c56fe31SEric W. Biederman */
4330c56fe31SEric W. Biederman topper = find_topper(child);
4340c56fe31SEric W. Biederman if (topper)
435a05964f3SRam Pai count += 1;
436a05964f3SRam Pai else if (!list_empty(&child->mnt_mounts))
437a05964f3SRam Pai continue;
43899b19d16SEric W. Biederman
43999b19d16SEric W. Biederman if (do_refcount_check(child, count))
44099b19d16SEric W. Biederman return 1;
441a05964f3SRam Pai }
44299b19d16SEric W. Biederman return 0;
44399b19d16SEric W. Biederman }
444a05964f3SRam Pai
445a05964f3SRam Pai /*
44699b19d16SEric W. Biederman * Clear MNT_LOCKED when it can be shown to be safe.
44799b19d16SEric W. Biederman *
448a05964f3SRam Pai * mount_lock lock must be held for write
44999b19d16SEric W. Biederman */
propagate_mount_unlock(struct mount * mnt)45099b19d16SEric W. Biederman void propagate_mount_unlock(struct mount *mnt)
45199b19d16SEric W. Biederman {
45299b19d16SEric W. Biederman struct mount *parent = mnt->mnt_parent;
45399b19d16SEric W. Biederman struct mount *m, *child;
45499b19d16SEric W. Biederman
45599b19d16SEric W. Biederman BUG_ON(parent == mnt);
45699b19d16SEric W. Biederman
4570c56fe31SEric W. Biederman for (m = propagation_next(parent, parent); m;
45899b19d16SEric W. Biederman m = propagation_next(m, parent)) {
45999b19d16SEric W. Biederman child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
46099b19d16SEric W. Biederman if (child)
46199b19d16SEric W. Biederman child->mnt.mnt_flags &= ~MNT_LOCKED;
462a05964f3SRam Pai }
463a05964f3SRam Pai }
46499b19d16SEric W. Biederman
umount_one(struct mount * mnt,struct list_head * to_umount)46599b19d16SEric W. Biederman static void umount_one(struct mount *mnt, struct list_head *to_umount)
46699b19d16SEric W. Biederman {
46799b19d16SEric W. Biederman CLEAR_MNT_MARK(mnt);
46899b19d16SEric W. Biederman mnt->mnt.mnt_flags |= MNT_UMOUNT;
46999b19d16SEric W. Biederman list_del_init(&mnt->mnt_child);
47099b19d16SEric W. Biederman list_del_init(&mnt->mnt_umounting);
47199b19d16SEric W. Biederman list_move_tail(&mnt->mnt_list, to_umount);
47299b19d16SEric W. Biederman }
47399b19d16SEric W. Biederman
47499b19d16SEric W. Biederman /*
47599b19d16SEric W. Biederman * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
47699b19d16SEric W. Biederman * parent propagates to.
47799b19d16SEric W. Biederman */
__propagate_umount(struct mount * mnt,struct list_head * to_umount,struct list_head * to_restore)47899b19d16SEric W. Biederman static bool __propagate_umount(struct mount *mnt,
47999b19d16SEric W. Biederman struct list_head *to_umount,
48099b19d16SEric W. Biederman struct list_head *to_restore)
481570487d3SEric W. Biederman {
48299b19d16SEric W. Biederman bool progress = false;
48399b19d16SEric W. Biederman struct mount *child;
48499b19d16SEric W. Biederman
48599b19d16SEric W. Biederman /*
48699b19d16SEric W. Biederman * The state of the parent won't change if this mount is
48799b19d16SEric W. Biederman * already unmounted or marked as without children.
48899b19d16SEric W. Biederman */
48999b19d16SEric W. Biederman if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
49099b19d16SEric W. Biederman goto out;
49199b19d16SEric W. Biederman
49299b19d16SEric W. Biederman /* Verify topper is the only grandchild that has not been
49399b19d16SEric W. Biederman * speculatively unmounted.
49499b19d16SEric W. Biederman */
49599b19d16SEric W. Biederman list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
49699b19d16SEric W. Biederman if (child->mnt_mountpoint == mnt->mnt.mnt_root)
49799b19d16SEric W. Biederman continue;
498570487d3SEric W. Biederman if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
499570487d3SEric W. Biederman continue;
500570487d3SEric W. Biederman /* Found a mounted child */
50199b19d16SEric W. Biederman goto children;
50299b19d16SEric W. Biederman }
50399b19d16SEric W. Biederman
504570487d3SEric W. Biederman /* Mark mounts that can be unmounted if not locked */
50599b19d16SEric W. Biederman SET_MNT_MARK(mnt);
506570487d3SEric W. Biederman progress = true;
507570487d3SEric W. Biederman
508570487d3SEric W. Biederman /* If a mount is without children and not locked umount it. */
509570487d3SEric W. Biederman if (!IS_MNT_LOCKED(mnt)) {
510570487d3SEric W. Biederman umount_one(mnt, to_umount);
511570487d3SEric W. Biederman } else {
51299b19d16SEric W. Biederman children:
513570487d3SEric W. Biederman list_move_tail(&mnt->mnt_umounting, to_restore);
514570487d3SEric W. Biederman }
515570487d3SEric W. Biederman out:
516570487d3SEric W. Biederman return progress;
517296990deSEric W. Biederman }
518296990deSEric W. Biederman
umount_list(struct list_head * to_umount,struct list_head * to_restore)519296990deSEric W. Biederman static void umount_list(struct list_head *to_umount,
520296990deSEric W. Biederman struct list_head *to_restore)
521296990deSEric W. Biederman {
522296990deSEric W. Biederman struct mount *mnt, *child, *tmp;
523296990deSEric W. Biederman list_for_each_entry(mnt, to_umount, mnt_list) {
524296990deSEric W. Biederman list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
525296990deSEric W. Biederman /* topper? */
526a05964f3SRam Pai if (child->mnt_mountpoint == mnt->mnt.mnt_root)
527a05964f3SRam Pai list_move_tail(&child->mnt_umounting, to_restore);
528a05964f3SRam Pai else
529a05964f3SRam Pai umount_one(child, to_umount);
53099b7db7bSNick Piggin }
53199b7db7bSNick Piggin }
532a05964f3SRam Pai }
533c003b26fSEric W. Biederman
restore_mounts(struct list_head * to_restore)534a05964f3SRam Pai static void restore_mounts(struct list_head *to_restore)
53561ef47b1SAl Viro {
53699b19d16SEric W. Biederman /* Restore mounts to a clean working state */
53799b19d16SEric W. Biederman while (!list_empty(to_restore)) {
538296990deSEric W. Biederman struct mount *mnt, *parent;
539a05964f3SRam Pai struct mountpoint *mp;
540296990deSEric W. Biederman
541296990deSEric W. Biederman mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
54299b19d16SEric W. Biederman CLEAR_MNT_MARK(mnt);
54399b19d16SEric W. Biederman list_del_init(&mnt->mnt_umounting);
5440c56fe31SEric W. Biederman
545296990deSEric W. Biederman /* Should this mount be reparented? */
546296990deSEric W. Biederman mp = mnt->mnt_mp;
547296990deSEric W. Biederman parent = mnt->mnt_parent;
548296990deSEric W. Biederman while (parent->mnt.mnt_flags & MNT_UMOUNT) {
549296990deSEric W. Biederman mp = parent->mnt_mp;
550296990deSEric W. Biederman parent = parent->mnt_parent;
551296990deSEric W. Biederman }
552296990deSEric W. Biederman if (parent != mnt->mnt_parent)
553296990deSEric W. Biederman mnt_change_mountpoint(parent, mp, mnt);
554296990deSEric W. Biederman }
55599b19d16SEric W. Biederman }
55699b19d16SEric W. Biederman
cleanup_umount_visitations(struct list_head * visited)55799b19d16SEric W. Biederman static void cleanup_umount_visitations(struct list_head *visited)
55899b19d16SEric W. Biederman {
55999b19d16SEric W. Biederman while (!list_empty(visited)) {
56099b19d16SEric W. Biederman struct mount *mnt =
561570487d3SEric W. Biederman list_first_entry(visited, struct mount, mnt_umounting);
562296990deSEric W. Biederman list_del_init(&mnt->mnt_umounting);
563296990deSEric W. Biederman }
564296990deSEric W. Biederman }
565296990deSEric W. Biederman
566296990deSEric W. Biederman /*
567296990deSEric W. Biederman * collect all mounts that receive propagation from the mount in @list,
568296990deSEric W. Biederman * and return these additional mounts in the same list.
569296990deSEric W. Biederman * @list: the list of mounts to be unmounted.
570296990deSEric W. Biederman *
571296990deSEric W. Biederman * vfsmount lock must be held for write
572296990deSEric W. Biederman */
propagate_umount(struct list_head * list)573296990deSEric W. Biederman int propagate_umount(struct list_head *list)
574296990deSEric W. Biederman {
575296990deSEric W. Biederman struct mount *mnt;
576296990deSEric W. Biederman LIST_HEAD(to_restore);
577296990deSEric W. Biederman LIST_HEAD(to_umount);
578296990deSEric W. Biederman LIST_HEAD(visited);
579296990deSEric W. Biederman
580296990deSEric W. Biederman /* Find candidates for unmounting */
581296990deSEric W. Biederman list_for_each_entry_reverse(mnt, list, mnt_list) {
582296990deSEric W. Biederman struct mount *parent = mnt->mnt_parent;
58399b19d16SEric W. Biederman struct mount *m;
58499b19d16SEric W. Biederman
58599b19d16SEric W. Biederman /*
58699b19d16SEric W. Biederman * If this mount has already been visited it is known that it's
58799b19d16SEric W. Biederman * entire peer group and all of their slaves in the propagation
58899b19d16SEric W. Biederman * tree for the mountpoint has already been visited and there is
58999b19d16SEric W. Biederman * no need to visit them again.
59099b19d16SEric W. Biederman */
59199b19d16SEric W. Biederman if (!list_empty(&mnt->mnt_umounting))
59299b19d16SEric W. Biederman continue;
59399b19d16SEric W. Biederman
59499b19d16SEric W. Biederman list_add_tail(&mnt->mnt_umounting, &visited);
59599b19d16SEric W. Biederman for (m = propagation_next(parent, parent); m;
596296990deSEric W. Biederman m = propagation_next(m, parent)) {
59799b19d16SEric W. Biederman struct mount *child = __lookup_mnt(&m->mnt,
598570487d3SEric W. Biederman mnt->mnt_mountpoint);
599a05964f3SRam Pai if (!child)
600a05964f3SRam Pai continue;
601
602 if (!list_empty(&child->mnt_umounting)) {
603 /*
604 * If the child has already been visited it is
605 * know that it's entire peer group and all of
606 * their slaves in the propgation tree for the
607 * mountpoint has already been visited and there
608 * is no need to visit this subtree again.
609 */
610 m = skip_propagation_subtree(m, parent);
611 continue;
612 } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
613 /*
614 * We have come accross an partially unmounted
615 * mount in list that has not been visited yet.
616 * Remember it has been visited and continue
617 * about our merry way.
618 */
619 list_add_tail(&child->mnt_umounting, &visited);
620 continue;
621 }
622
623 /* Check the child and parents while progress is made */
624 while (__propagate_umount(child,
625 &to_umount, &to_restore)) {
626 /* Is the parent a umount candidate? */
627 child = child->mnt_parent;
628 if (list_empty(&child->mnt_umounting))
629 break;
630 }
631 }
632 }
633
634 umount_list(&to_umount, &to_restore);
635 restore_mounts(&to_restore);
636 cleanup_umount_visitations(&visited);
637 list_splice_tail(&to_umount, list);
638
639 return 0;
640 }
641