xref: /openbmc/linux/fs/fs_struct.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 
8 /*
9  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
10  * It can block.
11  */
12 void set_fs_root(struct fs_struct *fs, struct path *path)
13 {
14 	struct path old_root;
15 
16 	spin_lock(&fs->lock);
17 	old_root = fs->root;
18 	fs->root = *path;
19 	path_get(path);
20 	spin_unlock(&fs->lock);
21 	if (old_root.dentry)
22 		path_put(&old_root);
23 }
24 
25 /*
26  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
27  * It can block.
28  */
29 void set_fs_pwd(struct fs_struct *fs, struct path *path)
30 {
31 	struct path old_pwd;
32 
33 	spin_lock(&fs->lock);
34 	old_pwd = fs->pwd;
35 	fs->pwd = *path;
36 	path_get(path);
37 	spin_unlock(&fs->lock);
38 
39 	if (old_pwd.dentry)
40 		path_put(&old_pwd);
41 }
42 
43 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44 {
45 	struct task_struct *g, *p;
46 	struct fs_struct *fs;
47 	int count = 0;
48 
49 	read_lock(&tasklist_lock);
50 	do_each_thread(g, p) {
51 		task_lock(p);
52 		fs = p->fs;
53 		if (fs) {
54 			spin_lock(&fs->lock);
55 			if (fs->root.dentry == old_root->dentry
56 			    && fs->root.mnt == old_root->mnt) {
57 				path_get(new_root);
58 				fs->root = *new_root;
59 				count++;
60 			}
61 			if (fs->pwd.dentry == old_root->dentry
62 			    && fs->pwd.mnt == old_root->mnt) {
63 				path_get(new_root);
64 				fs->pwd = *new_root;
65 				count++;
66 			}
67 			spin_unlock(&fs->lock);
68 		}
69 		task_unlock(p);
70 	} while_each_thread(g, p);
71 	read_unlock(&tasklist_lock);
72 	while (count--)
73 		path_put(old_root);
74 }
75 
76 void free_fs_struct(struct fs_struct *fs)
77 {
78 	path_put(&fs->root);
79 	path_put(&fs->pwd);
80 	kmem_cache_free(fs_cachep, fs);
81 }
82 
83 void exit_fs(struct task_struct *tsk)
84 {
85 	struct fs_struct *fs = tsk->fs;
86 
87 	if (fs) {
88 		int kill;
89 		task_lock(tsk);
90 		spin_lock(&fs->lock);
91 		tsk->fs = NULL;
92 		kill = !--fs->users;
93 		spin_unlock(&fs->lock);
94 		task_unlock(tsk);
95 		if (kill)
96 			free_fs_struct(fs);
97 	}
98 }
99 
100 struct fs_struct *copy_fs_struct(struct fs_struct *old)
101 {
102 	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
103 	/* We don't need to lock fs - think why ;-) */
104 	if (fs) {
105 		fs->users = 1;
106 		fs->in_exec = 0;
107 		spin_lock_init(&fs->lock);
108 		fs->umask = old->umask;
109 		get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
110 	}
111 	return fs;
112 }
113 
114 int unshare_fs_struct(void)
115 {
116 	struct fs_struct *fs = current->fs;
117 	struct fs_struct *new_fs = copy_fs_struct(fs);
118 	int kill;
119 
120 	if (!new_fs)
121 		return -ENOMEM;
122 
123 	task_lock(current);
124 	spin_lock(&fs->lock);
125 	kill = !--fs->users;
126 	current->fs = new_fs;
127 	spin_unlock(&fs->lock);
128 	task_unlock(current);
129 
130 	if (kill)
131 		free_fs_struct(fs);
132 
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(unshare_fs_struct);
136 
137 int current_umask(void)
138 {
139 	return current->fs->umask;
140 }
141 EXPORT_SYMBOL(current_umask);
142 
143 /* to be mentioned only in INIT_TASK */
144 struct fs_struct init_fs = {
145 	.users		= 1,
146 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
147 	.umask		= 0022,
148 };
149 
150 void daemonize_fs_struct(void)
151 {
152 	struct fs_struct *fs = current->fs;
153 
154 	if (fs) {
155 		int kill;
156 
157 		task_lock(current);
158 
159 		spin_lock(&init_fs.lock);
160 		init_fs.users++;
161 		spin_unlock(&init_fs.lock);
162 
163 		spin_lock(&fs->lock);
164 		current->fs = &init_fs;
165 		kill = !--fs->users;
166 		spin_unlock(&fs->lock);
167 
168 		task_unlock(current);
169 		if (kill)
170 			free_fs_struct(fs);
171 	}
172 }
173