1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/statfs.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/exportfs.h>
24 #include <linux/posix_acl.h>
25 #include <linux/pid_namespace.h>
26 #include <uapi/linux/magic.h>
27
28 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
29 MODULE_DESCRIPTION("Filesystem in Userspace");
30 MODULE_LICENSE("GPL");
31
32 static struct kmem_cache *fuse_inode_cachep;
33 struct list_head fuse_conn_list;
34 DEFINE_MUTEX(fuse_mutex);
35
36 static int set_global_limit(const char *val, const struct kernel_param *kp);
37
38 unsigned max_user_bgreq;
39 module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
40 &max_user_bgreq, 0644);
41 __MODULE_PARM_TYPE(max_user_bgreq, "uint");
42 MODULE_PARM_DESC(max_user_bgreq,
43 "Global limit for the maximum number of backgrounded requests an "
44 "unprivileged user can set");
45
46 unsigned max_user_congthresh;
47 module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
48 &max_user_congthresh, 0644);
49 __MODULE_PARM_TYPE(max_user_congthresh, "uint");
50 MODULE_PARM_DESC(max_user_congthresh,
51 "Global limit for the maximum congestion threshold an "
52 "unprivileged user can set");
53
54 #define FUSE_DEFAULT_BLKSIZE 512
55
56 /** Maximum number of outstanding background requests */
57 #define FUSE_DEFAULT_MAX_BACKGROUND 12
58
59 /** Congestion starts at 75% of maximum */
60 #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
61
62 #ifdef CONFIG_BLOCK
63 static struct file_system_type fuseblk_fs_type;
64 #endif
65
fuse_alloc_forget(void)66 struct fuse_forget_link *fuse_alloc_forget(void)
67 {
68 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
69 }
70
fuse_alloc_submount_lookup(void)71 static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
72 {
73 struct fuse_submount_lookup *sl;
74
75 sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
76 if (!sl)
77 return NULL;
78 sl->forget = fuse_alloc_forget();
79 if (!sl->forget)
80 goto out_free;
81
82 return sl;
83
84 out_free:
85 kfree(sl);
86 return NULL;
87 }
88
fuse_alloc_inode(struct super_block * sb)89 static struct inode *fuse_alloc_inode(struct super_block *sb)
90 {
91 struct fuse_inode *fi;
92
93 fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL);
94 if (!fi)
95 return NULL;
96
97 fi->i_time = 0;
98 fi->inval_mask = ~0;
99 fi->nodeid = 0;
100 fi->nlookup = 0;
101 fi->attr_version = 0;
102 fi->orig_ino = 0;
103 fi->state = 0;
104 fi->submount_lookup = NULL;
105 mutex_init(&fi->mutex);
106 spin_lock_init(&fi->lock);
107 fi->forget = fuse_alloc_forget();
108 if (!fi->forget)
109 goto out_free;
110
111 if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi))
112 goto out_free_forget;
113
114 return &fi->inode;
115
116 out_free_forget:
117 kfree(fi->forget);
118 out_free:
119 kmem_cache_free(fuse_inode_cachep, fi);
120 return NULL;
121 }
122
fuse_free_inode(struct inode * inode)123 static void fuse_free_inode(struct inode *inode)
124 {
125 struct fuse_inode *fi = get_fuse_inode(inode);
126
127 mutex_destroy(&fi->mutex);
128 kfree(fi->forget);
129 #ifdef CONFIG_FUSE_DAX
130 kfree(fi->dax);
131 #endif
132 kmem_cache_free(fuse_inode_cachep, fi);
133 }
134
fuse_cleanup_submount_lookup(struct fuse_conn * fc,struct fuse_submount_lookup * sl)135 static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
136 struct fuse_submount_lookup *sl)
137 {
138 if (!refcount_dec_and_test(&sl->count))
139 return;
140
141 fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
142 sl->forget = NULL;
143 kfree(sl);
144 }
145
fuse_evict_inode(struct inode * inode)146 static void fuse_evict_inode(struct inode *inode)
147 {
148 struct fuse_inode *fi = get_fuse_inode(inode);
149
150 /* Will write inode on close/munmap and in all other dirtiers */
151 WARN_ON(inode->i_state & I_DIRTY_INODE);
152
153 truncate_inode_pages_final(&inode->i_data);
154 clear_inode(inode);
155 if (inode->i_sb->s_flags & SB_ACTIVE) {
156 struct fuse_conn *fc = get_fuse_conn(inode);
157
158 if (FUSE_IS_DAX(inode))
159 fuse_dax_inode_cleanup(inode);
160 if (fi->nlookup) {
161 fuse_queue_forget(fc, fi->forget, fi->nodeid,
162 fi->nlookup);
163 fi->forget = NULL;
164 }
165
166 if (fi->submount_lookup) {
167 fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
168 fi->submount_lookup = NULL;
169 }
170 }
171 if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
172 WARN_ON(!list_empty(&fi->write_files));
173 WARN_ON(!list_empty(&fi->queued_writes));
174 }
175 }
176
fuse_reconfigure(struct fs_context * fsc)177 static int fuse_reconfigure(struct fs_context *fsc)
178 {
179 struct super_block *sb = fsc->root->d_sb;
180
181 sync_filesystem(sb);
182 if (fsc->sb_flags & SB_MANDLOCK)
183 return -EINVAL;
184
185 return 0;
186 }
187
188 /*
189 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
190 * so that it will fit.
191 */
fuse_squash_ino(u64 ino64)192 static ino_t fuse_squash_ino(u64 ino64)
193 {
194 ino_t ino = (ino_t) ino64;
195 if (sizeof(ino_t) < sizeof(u64))
196 ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
197 return ino;
198 }
199
fuse_change_attributes_common(struct inode * inode,struct fuse_attr * attr,struct fuse_statx * sx,u64 attr_valid,u32 cache_mask)200 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
201 struct fuse_statx *sx,
202 u64 attr_valid, u32 cache_mask)
203 {
204 struct fuse_conn *fc = get_fuse_conn(inode);
205 struct fuse_inode *fi = get_fuse_inode(inode);
206
207 lockdep_assert_held(&fi->lock);
208
209 fi->attr_version = atomic64_inc_return(&fc->attr_version);
210 fi->i_time = attr_valid;
211 /* Clear basic stats from invalid mask */
212 set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0);
213
214 inode->i_ino = fuse_squash_ino(attr->ino);
215 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
216 set_nlink(inode, attr->nlink);
217 inode->i_uid = make_kuid(fc->user_ns, attr->uid);
218 inode->i_gid = make_kgid(fc->user_ns, attr->gid);
219 inode->i_blocks = attr->blocks;
220
221 /* Sanitize nsecs */
222 attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
223 attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
224 attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
225
226 inode->i_atime.tv_sec = attr->atime;
227 inode->i_atime.tv_nsec = attr->atimensec;
228 /* mtime from server may be stale due to local buffered write */
229 if (!(cache_mask & STATX_MTIME)) {
230 inode->i_mtime.tv_sec = attr->mtime;
231 inode->i_mtime.tv_nsec = attr->mtimensec;
232 }
233 if (!(cache_mask & STATX_CTIME)) {
234 inode_set_ctime(inode, attr->ctime, attr->ctimensec);
235 }
236 if (sx) {
237 /* Sanitize nsecs */
238 sx->btime.tv_nsec =
239 min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
240
241 /*
242 * Btime has been queried, cache is valid (whether or not btime
243 * is available or not) so clear STATX_BTIME from inval_mask.
244 *
245 * Availability of the btime attribute is indicated in
246 * FUSE_I_BTIME
247 */
248 set_mask_bits(&fi->inval_mask, STATX_BTIME, 0);
249 if (sx->mask & STATX_BTIME) {
250 set_bit(FUSE_I_BTIME, &fi->state);
251 fi->i_btime.tv_sec = sx->btime.tv_sec;
252 fi->i_btime.tv_nsec = sx->btime.tv_nsec;
253 }
254 }
255
256 if (attr->blksize != 0)
257 inode->i_blkbits = ilog2(attr->blksize);
258 else
259 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
260
261 /*
262 * Don't set the sticky bit in i_mode, unless we want the VFS
263 * to check permissions. This prevents failures due to the
264 * check in may_delete().
265 */
266 fi->orig_i_mode = inode->i_mode;
267 if (!fc->default_permissions)
268 inode->i_mode &= ~S_ISVTX;
269
270 fi->orig_ino = attr->ino;
271
272 /*
273 * We are refreshing inode data and it is possible that another
274 * client set suid/sgid or security.capability xattr. So clear
275 * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
276 * was set or if security.capability xattr was set. But we don't
277 * know if security.capability has been set or not. So clear it
278 * anyway. Its less efficient but should be safe.
279 */
280 inode->i_flags &= ~S_NOSEC;
281 }
282
fuse_get_cache_mask(struct inode * inode)283 u32 fuse_get_cache_mask(struct inode *inode)
284 {
285 struct fuse_conn *fc = get_fuse_conn(inode);
286
287 if (!fc->writeback_cache || !S_ISREG(inode->i_mode))
288 return 0;
289
290 return STATX_MTIME | STATX_CTIME | STATX_SIZE;
291 }
292
fuse_change_attributes(struct inode * inode,struct fuse_attr * attr,struct fuse_statx * sx,u64 attr_valid,u64 attr_version)293 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
294 struct fuse_statx *sx,
295 u64 attr_valid, u64 attr_version)
296 {
297 struct fuse_conn *fc = get_fuse_conn(inode);
298 struct fuse_inode *fi = get_fuse_inode(inode);
299 u32 cache_mask;
300 loff_t oldsize;
301 struct timespec64 old_mtime;
302
303 spin_lock(&fi->lock);
304 /*
305 * In case of writeback_cache enabled, writes update mtime, ctime and
306 * may update i_size. In these cases trust the cached value in the
307 * inode.
308 */
309 cache_mask = fuse_get_cache_mask(inode);
310 if (cache_mask & STATX_SIZE)
311 attr->size = i_size_read(inode);
312
313 if (cache_mask & STATX_MTIME) {
314 attr->mtime = inode->i_mtime.tv_sec;
315 attr->mtimensec = inode->i_mtime.tv_nsec;
316 }
317 if (cache_mask & STATX_CTIME) {
318 attr->ctime = inode_get_ctime(inode).tv_sec;
319 attr->ctimensec = inode_get_ctime(inode).tv_nsec;
320 }
321
322 if ((attr_version != 0 && fi->attr_version > attr_version) ||
323 test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
324 spin_unlock(&fi->lock);
325 return;
326 }
327
328 old_mtime = inode->i_mtime;
329 fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask);
330
331 oldsize = inode->i_size;
332 /*
333 * In case of writeback_cache enabled, the cached writes beyond EOF
334 * extend local i_size without keeping userspace server in sync. So,
335 * attr->size coming from server can be stale. We cannot trust it.
336 */
337 if (!(cache_mask & STATX_SIZE))
338 i_size_write(inode, attr->size);
339 spin_unlock(&fi->lock);
340
341 if (!cache_mask && S_ISREG(inode->i_mode)) {
342 bool inval = false;
343
344 if (oldsize != attr->size) {
345 truncate_pagecache(inode, attr->size);
346 if (!fc->explicit_inval_data)
347 inval = true;
348 } else if (fc->auto_inval_data) {
349 struct timespec64 new_mtime = {
350 .tv_sec = attr->mtime,
351 .tv_nsec = attr->mtimensec,
352 };
353
354 /*
355 * Auto inval mode also checks and invalidates if mtime
356 * has changed.
357 */
358 if (!timespec64_equal(&old_mtime, &new_mtime))
359 inval = true;
360 }
361
362 if (inval)
363 invalidate_inode_pages2(inode->i_mapping);
364 }
365
366 if (IS_ENABLED(CONFIG_FUSE_DAX))
367 fuse_dax_dontcache(inode, attr->flags);
368 }
369
fuse_init_submount_lookup(struct fuse_submount_lookup * sl,u64 nodeid)370 static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
371 u64 nodeid)
372 {
373 sl->nodeid = nodeid;
374 refcount_set(&sl->count, 1);
375 }
376
fuse_init_inode(struct inode * inode,struct fuse_attr * attr,struct fuse_conn * fc)377 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
378 struct fuse_conn *fc)
379 {
380 inode->i_mode = attr->mode & S_IFMT;
381 inode->i_size = attr->size;
382 inode->i_mtime.tv_sec = attr->mtime;
383 inode->i_mtime.tv_nsec = attr->mtimensec;
384 inode_set_ctime(inode, attr->ctime, attr->ctimensec);
385 if (S_ISREG(inode->i_mode)) {
386 fuse_init_common(inode);
387 fuse_init_file_inode(inode, attr->flags);
388 } else if (S_ISDIR(inode->i_mode))
389 fuse_init_dir(inode);
390 else if (S_ISLNK(inode->i_mode))
391 fuse_init_symlink(inode);
392 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
393 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
394 fuse_init_common(inode);
395 init_special_inode(inode, inode->i_mode,
396 new_decode_dev(attr->rdev));
397 } else
398 BUG();
399 /*
400 * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
401 * so they see the exact same behavior as before.
402 */
403 if (!fc->posix_acl)
404 inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
405 }
406
fuse_inode_eq(struct inode * inode,void * _nodeidp)407 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
408 {
409 u64 nodeid = *(u64 *) _nodeidp;
410 if (get_node_id(inode) == nodeid)
411 return 1;
412 else
413 return 0;
414 }
415
fuse_inode_set(struct inode * inode,void * _nodeidp)416 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
417 {
418 u64 nodeid = *(u64 *) _nodeidp;
419 get_fuse_inode(inode)->nodeid = nodeid;
420 return 0;
421 }
422
fuse_iget(struct super_block * sb,u64 nodeid,int generation,struct fuse_attr * attr,u64 attr_valid,u64 attr_version)423 struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
424 int generation, struct fuse_attr *attr,
425 u64 attr_valid, u64 attr_version)
426 {
427 struct inode *inode;
428 struct fuse_inode *fi;
429 struct fuse_conn *fc = get_fuse_conn_super(sb);
430
431 /*
432 * Auto mount points get their node id from the submount root, which is
433 * not a unique identifier within this filesystem.
434 *
435 * To avoid conflicts, do not place submount points into the inode hash
436 * table.
437 */
438 if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
439 S_ISDIR(attr->mode)) {
440 struct fuse_inode *fi;
441
442 inode = new_inode(sb);
443 if (!inode)
444 return NULL;
445
446 fuse_init_inode(inode, attr, fc);
447 fi = get_fuse_inode(inode);
448 fi->nodeid = nodeid;
449 fi->submount_lookup = fuse_alloc_submount_lookup();
450 if (!fi->submount_lookup) {
451 iput(inode);
452 return NULL;
453 }
454 /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
455 fuse_init_submount_lookup(fi->submount_lookup, nodeid);
456 inode->i_flags |= S_AUTOMOUNT;
457 goto done;
458 }
459
460 retry:
461 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
462 if (!inode)
463 return NULL;
464
465 if ((inode->i_state & I_NEW)) {
466 inode->i_flags |= S_NOATIME;
467 if (!fc->writeback_cache || !S_ISREG(attr->mode))
468 inode->i_flags |= S_NOCMTIME;
469 inode->i_generation = generation;
470 fuse_init_inode(inode, attr, fc);
471 unlock_new_inode(inode);
472 } else if (fuse_stale_inode(inode, generation, attr)) {
473 /* nodeid was reused, any I/O on the old inode should fail */
474 fuse_make_bad(inode);
475 if (inode != d_inode(sb->s_root)) {
476 remove_inode_hash(inode);
477 iput(inode);
478 goto retry;
479 }
480 }
481 fi = get_fuse_inode(inode);
482 spin_lock(&fi->lock);
483 fi->nlookup++;
484 spin_unlock(&fi->lock);
485 done:
486 fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version);
487
488 return inode;
489 }
490
fuse_ilookup(struct fuse_conn * fc,u64 nodeid,struct fuse_mount ** fm)491 struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid,
492 struct fuse_mount **fm)
493 {
494 struct fuse_mount *fm_iter;
495 struct inode *inode;
496
497 WARN_ON(!rwsem_is_locked(&fc->killsb));
498 list_for_each_entry(fm_iter, &fc->mounts, fc_entry) {
499 if (!fm_iter->sb)
500 continue;
501
502 inode = ilookup5(fm_iter->sb, nodeid, fuse_inode_eq, &nodeid);
503 if (inode) {
504 if (fm)
505 *fm = fm_iter;
506 return inode;
507 }
508 }
509
510 return NULL;
511 }
512
fuse_reverse_inval_inode(struct fuse_conn * fc,u64 nodeid,loff_t offset,loff_t len)513 int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
514 loff_t offset, loff_t len)
515 {
516 struct fuse_inode *fi;
517 struct inode *inode;
518 pgoff_t pg_start;
519 pgoff_t pg_end;
520
521 inode = fuse_ilookup(fc, nodeid, NULL);
522 if (!inode)
523 return -ENOENT;
524
525 fi = get_fuse_inode(inode);
526 spin_lock(&fi->lock);
527 fi->attr_version = atomic64_inc_return(&fc->attr_version);
528 spin_unlock(&fi->lock);
529
530 fuse_invalidate_attr(inode);
531 forget_all_cached_acls(inode);
532 if (offset >= 0) {
533 pg_start = offset >> PAGE_SHIFT;
534 if (len <= 0)
535 pg_end = -1;
536 else
537 pg_end = (offset + len - 1) >> PAGE_SHIFT;
538 invalidate_inode_pages2_range(inode->i_mapping,
539 pg_start, pg_end);
540 }
541 iput(inode);
542 return 0;
543 }
544
fuse_lock_inode(struct inode * inode)545 bool fuse_lock_inode(struct inode *inode)
546 {
547 bool locked = false;
548
549 if (!get_fuse_conn(inode)->parallel_dirops) {
550 mutex_lock(&get_fuse_inode(inode)->mutex);
551 locked = true;
552 }
553
554 return locked;
555 }
556
fuse_unlock_inode(struct inode * inode,bool locked)557 void fuse_unlock_inode(struct inode *inode, bool locked)
558 {
559 if (locked)
560 mutex_unlock(&get_fuse_inode(inode)->mutex);
561 }
562
fuse_umount_begin(struct super_block * sb)563 static void fuse_umount_begin(struct super_block *sb)
564 {
565 struct fuse_conn *fc = get_fuse_conn_super(sb);
566
567 if (fc->no_force_umount)
568 return;
569
570 fuse_abort_conn(fc);
571
572 // Only retire block-device-based superblocks.
573 if (sb->s_bdev != NULL)
574 retire_super(sb);
575 }
576
fuse_send_destroy(struct fuse_mount * fm)577 static void fuse_send_destroy(struct fuse_mount *fm)
578 {
579 if (fm->fc->conn_init) {
580 FUSE_ARGS(args);
581
582 args.opcode = FUSE_DESTROY;
583 args.force = true;
584 args.nocreds = true;
585 fuse_simple_request(fm, &args);
586 }
587 }
588
convert_fuse_statfs(struct kstatfs * stbuf,struct fuse_kstatfs * attr)589 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
590 {
591 stbuf->f_type = FUSE_SUPER_MAGIC;
592 stbuf->f_bsize = attr->bsize;
593 stbuf->f_frsize = attr->frsize;
594 stbuf->f_blocks = attr->blocks;
595 stbuf->f_bfree = attr->bfree;
596 stbuf->f_bavail = attr->bavail;
597 stbuf->f_files = attr->files;
598 stbuf->f_ffree = attr->ffree;
599 stbuf->f_namelen = attr->namelen;
600 /* fsid is left zero */
601 }
602
fuse_statfs(struct dentry * dentry,struct kstatfs * buf)603 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
604 {
605 struct super_block *sb = dentry->d_sb;
606 struct fuse_mount *fm = get_fuse_mount_super(sb);
607 FUSE_ARGS(args);
608 struct fuse_statfs_out outarg;
609 int err;
610
611 if (!fuse_allow_current_process(fm->fc)) {
612 buf->f_type = FUSE_SUPER_MAGIC;
613 return 0;
614 }
615
616 memset(&outarg, 0, sizeof(outarg));
617 args.in_numargs = 0;
618 args.opcode = FUSE_STATFS;
619 args.nodeid = get_node_id(d_inode(dentry));
620 args.out_numargs = 1;
621 args.out_args[0].size = sizeof(outarg);
622 args.out_args[0].value = &outarg;
623 err = fuse_simple_request(fm, &args);
624 if (!err)
625 convert_fuse_statfs(buf, &outarg.st);
626 return err;
627 }
628
fuse_sync_bucket_alloc(void)629 static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void)
630 {
631 struct fuse_sync_bucket *bucket;
632
633 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL);
634 if (bucket) {
635 init_waitqueue_head(&bucket->waitq);
636 /* Initial active count */
637 atomic_set(&bucket->count, 1);
638 }
639 return bucket;
640 }
641
fuse_sync_fs_writes(struct fuse_conn * fc)642 static void fuse_sync_fs_writes(struct fuse_conn *fc)
643 {
644 struct fuse_sync_bucket *bucket, *new_bucket;
645 int count;
646
647 new_bucket = fuse_sync_bucket_alloc();
648 spin_lock(&fc->lock);
649 bucket = rcu_dereference_protected(fc->curr_bucket, 1);
650 count = atomic_read(&bucket->count);
651 WARN_ON(count < 1);
652 /* No outstanding writes? */
653 if (count == 1) {
654 spin_unlock(&fc->lock);
655 kfree(new_bucket);
656 return;
657 }
658
659 /*
660 * Completion of new bucket depends on completion of this bucket, so add
661 * one more count.
662 */
663 atomic_inc(&new_bucket->count);
664 rcu_assign_pointer(fc->curr_bucket, new_bucket);
665 spin_unlock(&fc->lock);
666 /*
667 * Drop initial active count. At this point if all writes in this and
668 * ancestor buckets complete, the count will go to zero and this task
669 * will be woken up.
670 */
671 atomic_dec(&bucket->count);
672
673 wait_event(bucket->waitq, atomic_read(&bucket->count) == 0);
674
675 /* Drop temp count on descendant bucket */
676 fuse_sync_bucket_dec(new_bucket);
677 kfree_rcu(bucket, rcu);
678 }
679
fuse_sync_fs(struct super_block * sb,int wait)680 static int fuse_sync_fs(struct super_block *sb, int wait)
681 {
682 struct fuse_mount *fm = get_fuse_mount_super(sb);
683 struct fuse_conn *fc = fm->fc;
684 struct fuse_syncfs_in inarg;
685 FUSE_ARGS(args);
686 int err;
687
688 /*
689 * Userspace cannot handle the wait == 0 case. Avoid a
690 * gratuitous roundtrip.
691 */
692 if (!wait)
693 return 0;
694
695 /* The filesystem is being unmounted. Nothing to do. */
696 if (!sb->s_root)
697 return 0;
698
699 if (!fc->sync_fs)
700 return 0;
701
702 fuse_sync_fs_writes(fc);
703
704 memset(&inarg, 0, sizeof(inarg));
705 args.in_numargs = 1;
706 args.in_args[0].size = sizeof(inarg);
707 args.in_args[0].value = &inarg;
708 args.opcode = FUSE_SYNCFS;
709 args.nodeid = get_node_id(sb->s_root->d_inode);
710 args.out_numargs = 0;
711
712 err = fuse_simple_request(fm, &args);
713 if (err == -ENOSYS) {
714 fc->sync_fs = 0;
715 err = 0;
716 }
717
718 return err;
719 }
720
721 enum {
722 OPT_SOURCE,
723 OPT_SUBTYPE,
724 OPT_FD,
725 OPT_ROOTMODE,
726 OPT_USER_ID,
727 OPT_GROUP_ID,
728 OPT_DEFAULT_PERMISSIONS,
729 OPT_ALLOW_OTHER,
730 OPT_MAX_READ,
731 OPT_BLKSIZE,
732 OPT_ERR
733 };
734
735 static const struct fs_parameter_spec fuse_fs_parameters[] = {
736 fsparam_string ("source", OPT_SOURCE),
737 fsparam_u32 ("fd", OPT_FD),
738 fsparam_u32oct ("rootmode", OPT_ROOTMODE),
739 fsparam_u32 ("user_id", OPT_USER_ID),
740 fsparam_u32 ("group_id", OPT_GROUP_ID),
741 fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
742 fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
743 fsparam_u32 ("max_read", OPT_MAX_READ),
744 fsparam_u32 ("blksize", OPT_BLKSIZE),
745 fsparam_string ("subtype", OPT_SUBTYPE),
746 {}
747 };
748
fuse_parse_param(struct fs_context * fsc,struct fs_parameter * param)749 static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
750 {
751 struct fs_parse_result result;
752 struct fuse_fs_context *ctx = fsc->fs_private;
753 int opt;
754 kuid_t kuid;
755 kgid_t kgid;
756
757 if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
758 /*
759 * Ignore options coming from mount(MS_REMOUNT) for backward
760 * compatibility.
761 */
762 if (fsc->oldapi)
763 return 0;
764
765 return invalfc(fsc, "No changes allowed in reconfigure");
766 }
767
768 opt = fs_parse(fsc, fuse_fs_parameters, param, &result);
769 if (opt < 0)
770 return opt;
771
772 switch (opt) {
773 case OPT_SOURCE:
774 if (fsc->source)
775 return invalfc(fsc, "Multiple sources specified");
776 fsc->source = param->string;
777 param->string = NULL;
778 break;
779
780 case OPT_SUBTYPE:
781 if (ctx->subtype)
782 return invalfc(fsc, "Multiple subtypes specified");
783 ctx->subtype = param->string;
784 param->string = NULL;
785 return 0;
786
787 case OPT_FD:
788 ctx->fd = result.uint_32;
789 ctx->fd_present = true;
790 break;
791
792 case OPT_ROOTMODE:
793 if (!fuse_valid_type(result.uint_32))
794 return invalfc(fsc, "Invalid rootmode");
795 ctx->rootmode = result.uint_32;
796 ctx->rootmode_present = true;
797 break;
798
799 case OPT_USER_ID:
800 kuid = make_kuid(fsc->user_ns, result.uint_32);
801 if (!uid_valid(kuid))
802 return invalfc(fsc, "Invalid user_id");
803 /*
804 * The requested uid must be representable in the
805 * filesystem's idmapping.
806 */
807 if (!kuid_has_mapping(fsc->user_ns, kuid))
808 return invalfc(fsc, "Invalid user_id");
809 ctx->user_id = kuid;
810 ctx->user_id_present = true;
811 break;
812
813 case OPT_GROUP_ID:
814 kgid = make_kgid(fsc->user_ns, result.uint_32);;
815 if (!gid_valid(kgid))
816 return invalfc(fsc, "Invalid group_id");
817 /*
818 * The requested gid must be representable in the
819 * filesystem's idmapping.
820 */
821 if (!kgid_has_mapping(fsc->user_ns, kgid))
822 return invalfc(fsc, "Invalid group_id");
823 ctx->group_id = kgid;
824 ctx->group_id_present = true;
825 break;
826
827 case OPT_DEFAULT_PERMISSIONS:
828 ctx->default_permissions = true;
829 break;
830
831 case OPT_ALLOW_OTHER:
832 ctx->allow_other = true;
833 break;
834
835 case OPT_MAX_READ:
836 ctx->max_read = result.uint_32;
837 break;
838
839 case OPT_BLKSIZE:
840 if (!ctx->is_bdev)
841 return invalfc(fsc, "blksize only supported for fuseblk");
842 ctx->blksize = result.uint_32;
843 break;
844
845 default:
846 return -EINVAL;
847 }
848
849 return 0;
850 }
851
fuse_free_fsc(struct fs_context * fsc)852 static void fuse_free_fsc(struct fs_context *fsc)
853 {
854 struct fuse_fs_context *ctx = fsc->fs_private;
855
856 if (ctx) {
857 kfree(ctx->subtype);
858 kfree(ctx);
859 }
860 }
861
fuse_show_options(struct seq_file * m,struct dentry * root)862 static int fuse_show_options(struct seq_file *m, struct dentry *root)
863 {
864 struct super_block *sb = root->d_sb;
865 struct fuse_conn *fc = get_fuse_conn_super(sb);
866
867 if (fc->legacy_opts_show) {
868 seq_printf(m, ",user_id=%u",
869 from_kuid_munged(fc->user_ns, fc->user_id));
870 seq_printf(m, ",group_id=%u",
871 from_kgid_munged(fc->user_ns, fc->group_id));
872 if (fc->default_permissions)
873 seq_puts(m, ",default_permissions");
874 if (fc->allow_other)
875 seq_puts(m, ",allow_other");
876 if (fc->max_read != ~0)
877 seq_printf(m, ",max_read=%u", fc->max_read);
878 if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
879 seq_printf(m, ",blksize=%lu", sb->s_blocksize);
880 }
881 #ifdef CONFIG_FUSE_DAX
882 if (fc->dax_mode == FUSE_DAX_ALWAYS)
883 seq_puts(m, ",dax=always");
884 else if (fc->dax_mode == FUSE_DAX_NEVER)
885 seq_puts(m, ",dax=never");
886 else if (fc->dax_mode == FUSE_DAX_INODE_USER)
887 seq_puts(m, ",dax=inode");
888 #endif
889
890 return 0;
891 }
892
fuse_iqueue_init(struct fuse_iqueue * fiq,const struct fuse_iqueue_ops * ops,void * priv)893 static void fuse_iqueue_init(struct fuse_iqueue *fiq,
894 const struct fuse_iqueue_ops *ops,
895 void *priv)
896 {
897 memset(fiq, 0, sizeof(struct fuse_iqueue));
898 spin_lock_init(&fiq->lock);
899 init_waitqueue_head(&fiq->waitq);
900 INIT_LIST_HEAD(&fiq->pending);
901 INIT_LIST_HEAD(&fiq->interrupts);
902 fiq->forget_list_tail = &fiq->forget_list_head;
903 fiq->connected = 1;
904 fiq->ops = ops;
905 fiq->priv = priv;
906 }
907
fuse_pqueue_init(struct fuse_pqueue * fpq)908 static void fuse_pqueue_init(struct fuse_pqueue *fpq)
909 {
910 unsigned int i;
911
912 spin_lock_init(&fpq->lock);
913 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
914 INIT_LIST_HEAD(&fpq->processing[i]);
915 INIT_LIST_HEAD(&fpq->io);
916 fpq->connected = 1;
917 }
918
fuse_conn_init(struct fuse_conn * fc,struct fuse_mount * fm,struct user_namespace * user_ns,const struct fuse_iqueue_ops * fiq_ops,void * fiq_priv)919 void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
920 struct user_namespace *user_ns,
921 const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv)
922 {
923 memset(fc, 0, sizeof(*fc));
924 spin_lock_init(&fc->lock);
925 spin_lock_init(&fc->bg_lock);
926 init_rwsem(&fc->killsb);
927 refcount_set(&fc->count, 1);
928 atomic_set(&fc->dev_count, 1);
929 init_waitqueue_head(&fc->blocked_waitq);
930 fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
931 INIT_LIST_HEAD(&fc->bg_queue);
932 INIT_LIST_HEAD(&fc->entry);
933 INIT_LIST_HEAD(&fc->devices);
934 atomic_set(&fc->num_waiting, 0);
935 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
936 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
937 atomic64_set(&fc->khctr, 0);
938 fc->polled_files = RB_ROOT;
939 fc->blocked = 0;
940 fc->initialized = 0;
941 fc->connected = 1;
942 atomic64_set(&fc->attr_version, 1);
943 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
944 fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
945 fc->user_ns = get_user_ns(user_ns);
946 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
947 fc->max_pages_limit = FUSE_MAX_MAX_PAGES;
948
949 INIT_LIST_HEAD(&fc->mounts);
950 list_add(&fm->fc_entry, &fc->mounts);
951 fm->fc = fc;
952 }
953 EXPORT_SYMBOL_GPL(fuse_conn_init);
954
delayed_release(struct rcu_head * p)955 static void delayed_release(struct rcu_head *p)
956 {
957 struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
958
959 put_user_ns(fc->user_ns);
960 fc->release(fc);
961 }
962
fuse_conn_put(struct fuse_conn * fc)963 void fuse_conn_put(struct fuse_conn *fc)
964 {
965 if (refcount_dec_and_test(&fc->count)) {
966 struct fuse_iqueue *fiq = &fc->iq;
967 struct fuse_sync_bucket *bucket;
968
969 if (IS_ENABLED(CONFIG_FUSE_DAX))
970 fuse_dax_conn_free(fc);
971 if (fiq->ops->release)
972 fiq->ops->release(fiq);
973 put_pid_ns(fc->pid_ns);
974 bucket = rcu_dereference_protected(fc->curr_bucket, 1);
975 if (bucket) {
976 WARN_ON(atomic_read(&bucket->count) != 1);
977 kfree(bucket);
978 }
979 call_rcu(&fc->rcu, delayed_release);
980 }
981 }
982 EXPORT_SYMBOL_GPL(fuse_conn_put);
983
fuse_conn_get(struct fuse_conn * fc)984 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
985 {
986 refcount_inc(&fc->count);
987 return fc;
988 }
989 EXPORT_SYMBOL_GPL(fuse_conn_get);
990
fuse_get_root_inode(struct super_block * sb,unsigned mode)991 static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
992 {
993 struct fuse_attr attr;
994 memset(&attr, 0, sizeof(attr));
995
996 attr.mode = mode;
997 attr.ino = FUSE_ROOT_ID;
998 attr.nlink = 1;
999 return fuse_iget(sb, 1, 0, &attr, 0, 0);
1000 }
1001
1002 struct fuse_inode_handle {
1003 u64 nodeid;
1004 u32 generation;
1005 };
1006
fuse_get_dentry(struct super_block * sb,struct fuse_inode_handle * handle)1007 static struct dentry *fuse_get_dentry(struct super_block *sb,
1008 struct fuse_inode_handle *handle)
1009 {
1010 struct fuse_conn *fc = get_fuse_conn_super(sb);
1011 struct inode *inode;
1012 struct dentry *entry;
1013 int err = -ESTALE;
1014
1015 if (handle->nodeid == 0)
1016 goto out_err;
1017
1018 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
1019 if (!inode) {
1020 struct fuse_entry_out outarg;
1021 const struct qstr name = QSTR_INIT(".", 1);
1022
1023 if (!fc->export_support)
1024 goto out_err;
1025
1026 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
1027 &inode);
1028 if (err && err != -ENOENT)
1029 goto out_err;
1030 if (err || !inode) {
1031 err = -ESTALE;
1032 goto out_err;
1033 }
1034 err = -EIO;
1035 if (get_node_id(inode) != handle->nodeid)
1036 goto out_iput;
1037 }
1038 err = -ESTALE;
1039 if (inode->i_generation != handle->generation)
1040 goto out_iput;
1041
1042 entry = d_obtain_alias(inode);
1043 if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
1044 fuse_invalidate_entry_cache(entry);
1045
1046 return entry;
1047
1048 out_iput:
1049 iput(inode);
1050 out_err:
1051 return ERR_PTR(err);
1052 }
1053
fuse_encode_fh(struct inode * inode,u32 * fh,int * max_len,struct inode * parent)1054 static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
1055 struct inode *parent)
1056 {
1057 int len = parent ? 6 : 3;
1058 u64 nodeid;
1059 u32 generation;
1060
1061 if (*max_len < len) {
1062 *max_len = len;
1063 return FILEID_INVALID;
1064 }
1065
1066 nodeid = get_fuse_inode(inode)->nodeid;
1067 generation = inode->i_generation;
1068
1069 fh[0] = (u32)(nodeid >> 32);
1070 fh[1] = (u32)(nodeid & 0xffffffff);
1071 fh[2] = generation;
1072
1073 if (parent) {
1074 nodeid = get_fuse_inode(parent)->nodeid;
1075 generation = parent->i_generation;
1076
1077 fh[3] = (u32)(nodeid >> 32);
1078 fh[4] = (u32)(nodeid & 0xffffffff);
1079 fh[5] = generation;
1080 }
1081
1082 *max_len = len;
1083 return parent ? 0x82 : 0x81;
1084 }
1085
fuse_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1086 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
1087 struct fid *fid, int fh_len, int fh_type)
1088 {
1089 struct fuse_inode_handle handle;
1090
1091 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
1092 return NULL;
1093
1094 handle.nodeid = (u64) fid->raw[0] << 32;
1095 handle.nodeid |= (u64) fid->raw[1];
1096 handle.generation = fid->raw[2];
1097 return fuse_get_dentry(sb, &handle);
1098 }
1099
fuse_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1100 static struct dentry *fuse_fh_to_parent(struct super_block *sb,
1101 struct fid *fid, int fh_len, int fh_type)
1102 {
1103 struct fuse_inode_handle parent;
1104
1105 if (fh_type != 0x82 || fh_len < 6)
1106 return NULL;
1107
1108 parent.nodeid = (u64) fid->raw[3] << 32;
1109 parent.nodeid |= (u64) fid->raw[4];
1110 parent.generation = fid->raw[5];
1111 return fuse_get_dentry(sb, &parent);
1112 }
1113
fuse_get_parent(struct dentry * child)1114 static struct dentry *fuse_get_parent(struct dentry *child)
1115 {
1116 struct inode *child_inode = d_inode(child);
1117 struct fuse_conn *fc = get_fuse_conn(child_inode);
1118 struct inode *inode;
1119 struct dentry *parent;
1120 struct fuse_entry_out outarg;
1121 int err;
1122
1123 if (!fc->export_support)
1124 return ERR_PTR(-ESTALE);
1125
1126 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
1127 &dotdot_name, &outarg, &inode);
1128 if (err) {
1129 if (err == -ENOENT)
1130 return ERR_PTR(-ESTALE);
1131 return ERR_PTR(err);
1132 }
1133
1134 parent = d_obtain_alias(inode);
1135 if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
1136 fuse_invalidate_entry_cache(parent);
1137
1138 return parent;
1139 }
1140
1141 static const struct export_operations fuse_export_operations = {
1142 .fh_to_dentry = fuse_fh_to_dentry,
1143 .fh_to_parent = fuse_fh_to_parent,
1144 .encode_fh = fuse_encode_fh,
1145 .get_parent = fuse_get_parent,
1146 };
1147
1148 static const struct super_operations fuse_super_operations = {
1149 .alloc_inode = fuse_alloc_inode,
1150 .free_inode = fuse_free_inode,
1151 .evict_inode = fuse_evict_inode,
1152 .write_inode = fuse_write_inode,
1153 .drop_inode = generic_delete_inode,
1154 .umount_begin = fuse_umount_begin,
1155 .statfs = fuse_statfs,
1156 .sync_fs = fuse_sync_fs,
1157 .show_options = fuse_show_options,
1158 };
1159
sanitize_global_limit(unsigned * limit)1160 static void sanitize_global_limit(unsigned *limit)
1161 {
1162 /*
1163 * The default maximum number of async requests is calculated to consume
1164 * 1/2^13 of the total memory, assuming 392 bytes per request.
1165 */
1166 if (*limit == 0)
1167 *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392;
1168
1169 if (*limit >= 1 << 16)
1170 *limit = (1 << 16) - 1;
1171 }
1172
set_global_limit(const char * val,const struct kernel_param * kp)1173 static int set_global_limit(const char *val, const struct kernel_param *kp)
1174 {
1175 int rv;
1176
1177 rv = param_set_uint(val, kp);
1178 if (rv)
1179 return rv;
1180
1181 sanitize_global_limit((unsigned *)kp->arg);
1182
1183 return 0;
1184 }
1185
process_init_limits(struct fuse_conn * fc,struct fuse_init_out * arg)1186 static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
1187 {
1188 int cap_sys_admin = capable(CAP_SYS_ADMIN);
1189
1190 if (arg->minor < 13)
1191 return;
1192
1193 sanitize_global_limit(&max_user_bgreq);
1194 sanitize_global_limit(&max_user_congthresh);
1195
1196 spin_lock(&fc->bg_lock);
1197 if (arg->max_background) {
1198 fc->max_background = arg->max_background;
1199
1200 if (!cap_sys_admin && fc->max_background > max_user_bgreq)
1201 fc->max_background = max_user_bgreq;
1202 }
1203 if (arg->congestion_threshold) {
1204 fc->congestion_threshold = arg->congestion_threshold;
1205
1206 if (!cap_sys_admin &&
1207 fc->congestion_threshold > max_user_congthresh)
1208 fc->congestion_threshold = max_user_congthresh;
1209 }
1210 spin_unlock(&fc->bg_lock);
1211 }
1212
1213 struct fuse_init_args {
1214 struct fuse_args args;
1215 struct fuse_init_in in;
1216 struct fuse_init_out out;
1217 };
1218
process_init_reply(struct fuse_mount * fm,struct fuse_args * args,int error)1219 static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
1220 int error)
1221 {
1222 struct fuse_conn *fc = fm->fc;
1223 struct fuse_init_args *ia = container_of(args, typeof(*ia), args);
1224 struct fuse_init_out *arg = &ia->out;
1225 bool ok = true;
1226
1227 if (error || arg->major != FUSE_KERNEL_VERSION)
1228 ok = false;
1229 else {
1230 unsigned long ra_pages;
1231
1232 process_init_limits(fc, arg);
1233
1234 if (arg->minor >= 6) {
1235 u64 flags = arg->flags;
1236
1237 if (flags & FUSE_INIT_EXT)
1238 flags |= (u64) arg->flags2 << 32;
1239
1240 ra_pages = arg->max_readahead / PAGE_SIZE;
1241 if (flags & FUSE_ASYNC_READ)
1242 fc->async_read = 1;
1243 if (!(flags & FUSE_POSIX_LOCKS))
1244 fc->no_lock = 1;
1245 if (arg->minor >= 17) {
1246 if (!(flags & FUSE_FLOCK_LOCKS))
1247 fc->no_flock = 1;
1248 } else {
1249 if (!(flags & FUSE_POSIX_LOCKS))
1250 fc->no_flock = 1;
1251 }
1252 if (flags & FUSE_ATOMIC_O_TRUNC)
1253 fc->atomic_o_trunc = 1;
1254 if (arg->minor >= 9) {
1255 /* LOOKUP has dependency on proto version */
1256 if (flags & FUSE_EXPORT_SUPPORT)
1257 fc->export_support = 1;
1258 }
1259 if (flags & FUSE_BIG_WRITES)
1260 fc->big_writes = 1;
1261 if (flags & FUSE_DONT_MASK)
1262 fc->dont_mask = 1;
1263 if (flags & FUSE_AUTO_INVAL_DATA)
1264 fc->auto_inval_data = 1;
1265 else if (flags & FUSE_EXPLICIT_INVAL_DATA)
1266 fc->explicit_inval_data = 1;
1267 if (flags & FUSE_DO_READDIRPLUS) {
1268 fc->do_readdirplus = 1;
1269 if (flags & FUSE_READDIRPLUS_AUTO)
1270 fc->readdirplus_auto = 1;
1271 }
1272 if (flags & FUSE_ASYNC_DIO)
1273 fc->async_dio = 1;
1274 if (flags & FUSE_WRITEBACK_CACHE)
1275 fc->writeback_cache = 1;
1276 if (flags & FUSE_PARALLEL_DIROPS)
1277 fc->parallel_dirops = 1;
1278 if (flags & FUSE_HANDLE_KILLPRIV)
1279 fc->handle_killpriv = 1;
1280 if (arg->time_gran && arg->time_gran <= 1000000000)
1281 fm->sb->s_time_gran = arg->time_gran;
1282 if ((flags & FUSE_POSIX_ACL)) {
1283 fc->default_permissions = 1;
1284 fc->posix_acl = 1;
1285 }
1286 if (flags & FUSE_CACHE_SYMLINKS)
1287 fc->cache_symlinks = 1;
1288 if (flags & FUSE_ABORT_ERROR)
1289 fc->abort_err = 1;
1290 if (flags & FUSE_MAX_PAGES) {
1291 fc->max_pages =
1292 min_t(unsigned int, fc->max_pages_limit,
1293 max_t(unsigned int, arg->max_pages, 1));
1294 }
1295 if (IS_ENABLED(CONFIG_FUSE_DAX)) {
1296 if (flags & FUSE_MAP_ALIGNMENT &&
1297 !fuse_dax_check_alignment(fc, arg->map_alignment)) {
1298 ok = false;
1299 }
1300 if (flags & FUSE_HAS_INODE_DAX)
1301 fc->inode_dax = 1;
1302 }
1303 if (flags & FUSE_HANDLE_KILLPRIV_V2) {
1304 fc->handle_killpriv_v2 = 1;
1305 fm->sb->s_flags |= SB_NOSEC;
1306 }
1307 if (flags & FUSE_SETXATTR_EXT)
1308 fc->setxattr_ext = 1;
1309 if (flags & FUSE_SECURITY_CTX)
1310 fc->init_security = 1;
1311 if (flags & FUSE_CREATE_SUPP_GROUP)
1312 fc->create_supp_group = 1;
1313 if (flags & FUSE_DIRECT_IO_ALLOW_MMAP)
1314 fc->direct_io_allow_mmap = 1;
1315 } else {
1316 ra_pages = fc->max_read / PAGE_SIZE;
1317 fc->no_lock = 1;
1318 fc->no_flock = 1;
1319 }
1320
1321 fm->sb->s_bdi->ra_pages =
1322 min(fm->sb->s_bdi->ra_pages, ra_pages);
1323 fc->minor = arg->minor;
1324 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
1325 fc->max_write = max_t(unsigned, 4096, fc->max_write);
1326 fc->conn_init = 1;
1327 }
1328 kfree(ia);
1329
1330 if (!ok) {
1331 fc->conn_init = 0;
1332 fc->conn_error = 1;
1333 }
1334
1335 fuse_set_initialized(fc);
1336 wake_up_all(&fc->blocked_waitq);
1337 }
1338
fuse_send_init(struct fuse_mount * fm)1339 void fuse_send_init(struct fuse_mount *fm)
1340 {
1341 struct fuse_init_args *ia;
1342 u64 flags;
1343
1344 ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
1345
1346 ia->in.major = FUSE_KERNEL_VERSION;
1347 ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
1348 ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE;
1349 flags =
1350 FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
1351 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
1352 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
1353 FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
1354 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
1355 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
1356 FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
1357 FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
1358 FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
1359 FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
1360 FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
1361 FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP;
1362 #ifdef CONFIG_FUSE_DAX
1363 if (fm->fc->dax)
1364 flags |= FUSE_MAP_ALIGNMENT;
1365 if (fuse_is_inode_dax_mode(fm->fc->dax_mode))
1366 flags |= FUSE_HAS_INODE_DAX;
1367 #endif
1368 if (fm->fc->auto_submounts)
1369 flags |= FUSE_SUBMOUNTS;
1370
1371 ia->in.flags = flags;
1372 ia->in.flags2 = flags >> 32;
1373
1374 ia->args.opcode = FUSE_INIT;
1375 ia->args.in_numargs = 1;
1376 ia->args.in_args[0].size = sizeof(ia->in);
1377 ia->args.in_args[0].value = &ia->in;
1378 ia->args.out_numargs = 1;
1379 /* Variable length argument used for backward compatibility
1380 with interface version < 7.5. Rest of init_out is zeroed
1381 by do_get_request(), so a short reply is not a problem */
1382 ia->args.out_argvar = true;
1383 ia->args.out_args[0].size = sizeof(ia->out);
1384 ia->args.out_args[0].value = &ia->out;
1385 ia->args.force = true;
1386 ia->args.nocreds = true;
1387 ia->args.end = process_init_reply;
1388
1389 if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0)
1390 process_init_reply(fm, &ia->args, -ENOTCONN);
1391 }
1392 EXPORT_SYMBOL_GPL(fuse_send_init);
1393
fuse_free_conn(struct fuse_conn * fc)1394 void fuse_free_conn(struct fuse_conn *fc)
1395 {
1396 WARN_ON(!list_empty(&fc->devices));
1397 kfree(fc);
1398 }
1399 EXPORT_SYMBOL_GPL(fuse_free_conn);
1400
fuse_bdi_init(struct fuse_conn * fc,struct super_block * sb)1401 static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
1402 {
1403 int err;
1404 char *suffix = "";
1405
1406 if (sb->s_bdev) {
1407 suffix = "-fuseblk";
1408 /*
1409 * sb->s_bdi points to blkdev's bdi however we want to redirect
1410 * it to our private bdi...
1411 */
1412 bdi_put(sb->s_bdi);
1413 sb->s_bdi = &noop_backing_dev_info;
1414 }
1415 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev),
1416 MINOR(fc->dev), suffix);
1417 if (err)
1418 return err;
1419
1420 /* fuse does it's own writeback accounting */
1421 sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT;
1422 sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT;
1423
1424 /*
1425 * For a single fuse filesystem use max 1% of dirty +
1426 * writeback threshold.
1427 *
1428 * This gives about 1M of write buffer for memory maps on a
1429 * machine with 1G and 10% dirty_ratio, which should be more
1430 * than enough.
1431 *
1432 * Privileged users can raise it by writing to
1433 *
1434 * /sys/class/bdi/<bdi>/max_ratio
1435 */
1436 bdi_set_max_ratio(sb->s_bdi, 1);
1437
1438 return 0;
1439 }
1440
fuse_dev_alloc(void)1441 struct fuse_dev *fuse_dev_alloc(void)
1442 {
1443 struct fuse_dev *fud;
1444 struct list_head *pq;
1445
1446 fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
1447 if (!fud)
1448 return NULL;
1449
1450 pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
1451 if (!pq) {
1452 kfree(fud);
1453 return NULL;
1454 }
1455
1456 fud->pq.processing = pq;
1457 fuse_pqueue_init(&fud->pq);
1458
1459 return fud;
1460 }
1461 EXPORT_SYMBOL_GPL(fuse_dev_alloc);
1462
fuse_dev_install(struct fuse_dev * fud,struct fuse_conn * fc)1463 void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc)
1464 {
1465 fud->fc = fuse_conn_get(fc);
1466 spin_lock(&fc->lock);
1467 list_add_tail(&fud->entry, &fc->devices);
1468 spin_unlock(&fc->lock);
1469 }
1470 EXPORT_SYMBOL_GPL(fuse_dev_install);
1471
fuse_dev_alloc_install(struct fuse_conn * fc)1472 struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc)
1473 {
1474 struct fuse_dev *fud;
1475
1476 fud = fuse_dev_alloc();
1477 if (!fud)
1478 return NULL;
1479
1480 fuse_dev_install(fud, fc);
1481 return fud;
1482 }
1483 EXPORT_SYMBOL_GPL(fuse_dev_alloc_install);
1484
fuse_dev_free(struct fuse_dev * fud)1485 void fuse_dev_free(struct fuse_dev *fud)
1486 {
1487 struct fuse_conn *fc = fud->fc;
1488
1489 if (fc) {
1490 spin_lock(&fc->lock);
1491 list_del(&fud->entry);
1492 spin_unlock(&fc->lock);
1493
1494 fuse_conn_put(fc);
1495 }
1496 kfree(fud->pq.processing);
1497 kfree(fud);
1498 }
1499 EXPORT_SYMBOL_GPL(fuse_dev_free);
1500
fuse_fill_attr_from_inode(struct fuse_attr * attr,const struct fuse_inode * fi)1501 static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
1502 const struct fuse_inode *fi)
1503 {
1504 struct timespec64 ctime = inode_get_ctime(&fi->inode);
1505
1506 *attr = (struct fuse_attr){
1507 .ino = fi->inode.i_ino,
1508 .size = fi->inode.i_size,
1509 .blocks = fi->inode.i_blocks,
1510 .atime = fi->inode.i_atime.tv_sec,
1511 .mtime = fi->inode.i_mtime.tv_sec,
1512 .ctime = ctime.tv_sec,
1513 .atimensec = fi->inode.i_atime.tv_nsec,
1514 .mtimensec = fi->inode.i_mtime.tv_nsec,
1515 .ctimensec = ctime.tv_nsec,
1516 .mode = fi->inode.i_mode,
1517 .nlink = fi->inode.i_nlink,
1518 .uid = fi->inode.i_uid.val,
1519 .gid = fi->inode.i_gid.val,
1520 .rdev = fi->inode.i_rdev,
1521 .blksize = 1u << fi->inode.i_blkbits,
1522 };
1523 }
1524
fuse_sb_defaults(struct super_block * sb)1525 static void fuse_sb_defaults(struct super_block *sb)
1526 {
1527 sb->s_magic = FUSE_SUPER_MAGIC;
1528 sb->s_op = &fuse_super_operations;
1529 sb->s_xattr = fuse_xattr_handlers;
1530 sb->s_maxbytes = MAX_LFS_FILESIZE;
1531 sb->s_time_gran = 1;
1532 sb->s_export_op = &fuse_export_operations;
1533 sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
1534 if (sb->s_user_ns != &init_user_ns)
1535 sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
1536 sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
1537 }
1538
fuse_fill_super_submount(struct super_block * sb,struct fuse_inode * parent_fi)1539 static int fuse_fill_super_submount(struct super_block *sb,
1540 struct fuse_inode *parent_fi)
1541 {
1542 struct fuse_mount *fm = get_fuse_mount_super(sb);
1543 struct super_block *parent_sb = parent_fi->inode.i_sb;
1544 struct fuse_attr root_attr;
1545 struct inode *root;
1546 struct fuse_submount_lookup *sl;
1547 struct fuse_inode *fi;
1548
1549 fuse_sb_defaults(sb);
1550 fm->sb = sb;
1551
1552 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1553 sb->s_bdi = bdi_get(parent_sb->s_bdi);
1554
1555 sb->s_xattr = parent_sb->s_xattr;
1556 sb->s_time_gran = parent_sb->s_time_gran;
1557 sb->s_blocksize = parent_sb->s_blocksize;
1558 sb->s_blocksize_bits = parent_sb->s_blocksize_bits;
1559 sb->s_subtype = kstrdup(parent_sb->s_subtype, GFP_KERNEL);
1560 if (parent_sb->s_subtype && !sb->s_subtype)
1561 return -ENOMEM;
1562
1563 fuse_fill_attr_from_inode(&root_attr, parent_fi);
1564 root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0);
1565 /*
1566 * This inode is just a duplicate, so it is not looked up and
1567 * its nlookup should not be incremented. fuse_iget() does
1568 * that, though, so undo it here.
1569 */
1570 fi = get_fuse_inode(root);
1571 fi->nlookup--;
1572
1573 sb->s_d_op = &fuse_dentry_operations;
1574 sb->s_root = d_make_root(root);
1575 if (!sb->s_root)
1576 return -ENOMEM;
1577
1578 /*
1579 * Grab the parent's submount_lookup pointer and take a
1580 * reference on the shared nlookup from the parent. This is to
1581 * prevent the last forget for this nodeid from getting
1582 * triggered until all users have finished with it.
1583 */
1584 sl = parent_fi->submount_lookup;
1585 WARN_ON(!sl);
1586 if (sl) {
1587 refcount_inc(&sl->count);
1588 fi->submount_lookup = sl;
1589 }
1590
1591 return 0;
1592 }
1593
1594 /* Filesystem context private data holds the FUSE inode of the mount point */
fuse_get_tree_submount(struct fs_context * fsc)1595 static int fuse_get_tree_submount(struct fs_context *fsc)
1596 {
1597 struct fuse_mount *fm;
1598 struct fuse_inode *mp_fi = fsc->fs_private;
1599 struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode);
1600 struct super_block *sb;
1601 int err;
1602
1603 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1604 if (!fm)
1605 return -ENOMEM;
1606
1607 fm->fc = fuse_conn_get(fc);
1608 fsc->s_fs_info = fm;
1609 sb = sget_fc(fsc, NULL, set_anon_super_fc);
1610 if (fsc->s_fs_info)
1611 fuse_mount_destroy(fm);
1612 if (IS_ERR(sb))
1613 return PTR_ERR(sb);
1614
1615 /* Initialize superblock, making @mp_fi its root */
1616 err = fuse_fill_super_submount(sb, mp_fi);
1617 if (err) {
1618 deactivate_locked_super(sb);
1619 return err;
1620 }
1621
1622 down_write(&fc->killsb);
1623 list_add_tail(&fm->fc_entry, &fc->mounts);
1624 up_write(&fc->killsb);
1625
1626 sb->s_flags |= SB_ACTIVE;
1627 fsc->root = dget(sb->s_root);
1628
1629 return 0;
1630 }
1631
1632 static const struct fs_context_operations fuse_context_submount_ops = {
1633 .get_tree = fuse_get_tree_submount,
1634 };
1635
fuse_init_fs_context_submount(struct fs_context * fsc)1636 int fuse_init_fs_context_submount(struct fs_context *fsc)
1637 {
1638 fsc->ops = &fuse_context_submount_ops;
1639 return 0;
1640 }
1641 EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount);
1642
fuse_fill_super_common(struct super_block * sb,struct fuse_fs_context * ctx)1643 int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
1644 {
1645 struct fuse_dev *fud = NULL;
1646 struct fuse_mount *fm = get_fuse_mount_super(sb);
1647 struct fuse_conn *fc = fm->fc;
1648 struct inode *root;
1649 struct dentry *root_dentry;
1650 int err;
1651
1652 err = -EINVAL;
1653 if (sb->s_flags & SB_MANDLOCK)
1654 goto err;
1655
1656 rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc());
1657 fuse_sb_defaults(sb);
1658
1659 if (ctx->is_bdev) {
1660 #ifdef CONFIG_BLOCK
1661 err = -EINVAL;
1662 if (!sb_set_blocksize(sb, ctx->blksize))
1663 goto err;
1664 #endif
1665 } else {
1666 sb->s_blocksize = PAGE_SIZE;
1667 sb->s_blocksize_bits = PAGE_SHIFT;
1668 }
1669
1670 sb->s_subtype = ctx->subtype;
1671 ctx->subtype = NULL;
1672 if (IS_ENABLED(CONFIG_FUSE_DAX)) {
1673 err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev);
1674 if (err)
1675 goto err;
1676 }
1677
1678 if (ctx->fudptr) {
1679 err = -ENOMEM;
1680 fud = fuse_dev_alloc_install(fc);
1681 if (!fud)
1682 goto err_free_dax;
1683 }
1684
1685 fc->dev = sb->s_dev;
1686 fm->sb = sb;
1687 err = fuse_bdi_init(fc, sb);
1688 if (err)
1689 goto err_dev_free;
1690
1691 /* Handle umasking inside the fuse code */
1692 if (sb->s_flags & SB_POSIXACL)
1693 fc->dont_mask = 1;
1694 sb->s_flags |= SB_POSIXACL;
1695
1696 fc->default_permissions = ctx->default_permissions;
1697 fc->allow_other = ctx->allow_other;
1698 fc->user_id = ctx->user_id;
1699 fc->group_id = ctx->group_id;
1700 fc->legacy_opts_show = ctx->legacy_opts_show;
1701 fc->max_read = max_t(unsigned int, 4096, ctx->max_read);
1702 fc->destroy = ctx->destroy;
1703 fc->no_control = ctx->no_control;
1704 fc->no_force_umount = ctx->no_force_umount;
1705
1706 err = -ENOMEM;
1707 root = fuse_get_root_inode(sb, ctx->rootmode);
1708 sb->s_d_op = &fuse_root_dentry_operations;
1709 root_dentry = d_make_root(root);
1710 if (!root_dentry)
1711 goto err_dev_free;
1712 /* Root dentry doesn't have .d_revalidate */
1713 sb->s_d_op = &fuse_dentry_operations;
1714
1715 mutex_lock(&fuse_mutex);
1716 err = -EINVAL;
1717 if (ctx->fudptr && *ctx->fudptr)
1718 goto err_unlock;
1719
1720 err = fuse_ctl_add_conn(fc);
1721 if (err)
1722 goto err_unlock;
1723
1724 list_add_tail(&fc->entry, &fuse_conn_list);
1725 sb->s_root = root_dentry;
1726 if (ctx->fudptr)
1727 *ctx->fudptr = fud;
1728 mutex_unlock(&fuse_mutex);
1729 return 0;
1730
1731 err_unlock:
1732 mutex_unlock(&fuse_mutex);
1733 dput(root_dentry);
1734 err_dev_free:
1735 if (fud)
1736 fuse_dev_free(fud);
1737 err_free_dax:
1738 if (IS_ENABLED(CONFIG_FUSE_DAX))
1739 fuse_dax_conn_free(fc);
1740 err:
1741 return err;
1742 }
1743 EXPORT_SYMBOL_GPL(fuse_fill_super_common);
1744
fuse_fill_super(struct super_block * sb,struct fs_context * fsc)1745 static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
1746 {
1747 struct fuse_fs_context *ctx = fsc->fs_private;
1748 int err;
1749
1750 if (!ctx->file || !ctx->rootmode_present ||
1751 !ctx->user_id_present || !ctx->group_id_present)
1752 return -EINVAL;
1753
1754 /*
1755 * Require mount to happen from the same user namespace which
1756 * opened /dev/fuse to prevent potential attacks.
1757 */
1758 if ((ctx->file->f_op != &fuse_dev_operations) ||
1759 (ctx->file->f_cred->user_ns != sb->s_user_ns))
1760 return -EINVAL;
1761 ctx->fudptr = &ctx->file->private_data;
1762
1763 err = fuse_fill_super_common(sb, ctx);
1764 if (err)
1765 return err;
1766 /* file->private_data shall be visible on all CPUs after this */
1767 smp_mb();
1768 fuse_send_init(get_fuse_mount_super(sb));
1769 return 0;
1770 }
1771
1772 /*
1773 * This is the path where user supplied an already initialized fuse dev. In
1774 * this case never create a new super if the old one is gone.
1775 */
fuse_set_no_super(struct super_block * sb,struct fs_context * fsc)1776 static int fuse_set_no_super(struct super_block *sb, struct fs_context *fsc)
1777 {
1778 return -ENOTCONN;
1779 }
1780
fuse_test_super(struct super_block * sb,struct fs_context * fsc)1781 static int fuse_test_super(struct super_block *sb, struct fs_context *fsc)
1782 {
1783
1784 return fsc->sget_key == get_fuse_conn_super(sb);
1785 }
1786
fuse_get_tree(struct fs_context * fsc)1787 static int fuse_get_tree(struct fs_context *fsc)
1788 {
1789 struct fuse_fs_context *ctx = fsc->fs_private;
1790 struct fuse_dev *fud;
1791 struct fuse_conn *fc;
1792 struct fuse_mount *fm;
1793 struct super_block *sb;
1794 int err;
1795
1796 fc = kmalloc(sizeof(*fc), GFP_KERNEL);
1797 if (!fc)
1798 return -ENOMEM;
1799
1800 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1801 if (!fm) {
1802 kfree(fc);
1803 return -ENOMEM;
1804 }
1805
1806 fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
1807 fc->release = fuse_free_conn;
1808
1809 fsc->s_fs_info = fm;
1810
1811 if (ctx->fd_present)
1812 ctx->file = fget(ctx->fd);
1813
1814 if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
1815 err = get_tree_bdev(fsc, fuse_fill_super);
1816 goto out;
1817 }
1818 /*
1819 * While block dev mount can be initialized with a dummy device fd
1820 * (found by device name), normal fuse mounts can't
1821 */
1822 err = -EINVAL;
1823 if (!ctx->file)
1824 goto out;
1825
1826 /*
1827 * Allow creating a fuse mount with an already initialized fuse
1828 * connection
1829 */
1830 fud = READ_ONCE(ctx->file->private_data);
1831 if (ctx->file->f_op == &fuse_dev_operations && fud) {
1832 fsc->sget_key = fud->fc;
1833 sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super);
1834 err = PTR_ERR_OR_ZERO(sb);
1835 if (!IS_ERR(sb))
1836 fsc->root = dget(sb->s_root);
1837 } else {
1838 err = get_tree_nodev(fsc, fuse_fill_super);
1839 }
1840 out:
1841 if (fsc->s_fs_info)
1842 fuse_mount_destroy(fm);
1843 if (ctx->file)
1844 fput(ctx->file);
1845 return err;
1846 }
1847
1848 static const struct fs_context_operations fuse_context_ops = {
1849 .free = fuse_free_fsc,
1850 .parse_param = fuse_parse_param,
1851 .reconfigure = fuse_reconfigure,
1852 .get_tree = fuse_get_tree,
1853 };
1854
1855 /*
1856 * Set up the filesystem mount context.
1857 */
fuse_init_fs_context(struct fs_context * fsc)1858 static int fuse_init_fs_context(struct fs_context *fsc)
1859 {
1860 struct fuse_fs_context *ctx;
1861
1862 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1863 if (!ctx)
1864 return -ENOMEM;
1865
1866 ctx->max_read = ~0;
1867 ctx->blksize = FUSE_DEFAULT_BLKSIZE;
1868 ctx->legacy_opts_show = true;
1869
1870 #ifdef CONFIG_BLOCK
1871 if (fsc->fs_type == &fuseblk_fs_type) {
1872 ctx->is_bdev = true;
1873 ctx->destroy = true;
1874 }
1875 #endif
1876
1877 fsc->fs_private = ctx;
1878 fsc->ops = &fuse_context_ops;
1879 return 0;
1880 }
1881
fuse_mount_remove(struct fuse_mount * fm)1882 bool fuse_mount_remove(struct fuse_mount *fm)
1883 {
1884 struct fuse_conn *fc = fm->fc;
1885 bool last = false;
1886
1887 down_write(&fc->killsb);
1888 list_del_init(&fm->fc_entry);
1889 if (list_empty(&fc->mounts))
1890 last = true;
1891 up_write(&fc->killsb);
1892
1893 return last;
1894 }
1895 EXPORT_SYMBOL_GPL(fuse_mount_remove);
1896
fuse_conn_destroy(struct fuse_mount * fm)1897 void fuse_conn_destroy(struct fuse_mount *fm)
1898 {
1899 struct fuse_conn *fc = fm->fc;
1900
1901 if (fc->destroy)
1902 fuse_send_destroy(fm);
1903
1904 fuse_abort_conn(fc);
1905 fuse_wait_aborted(fc);
1906
1907 if (!list_empty(&fc->entry)) {
1908 mutex_lock(&fuse_mutex);
1909 list_del(&fc->entry);
1910 fuse_ctl_remove_conn(fc);
1911 mutex_unlock(&fuse_mutex);
1912 }
1913 }
1914 EXPORT_SYMBOL_GPL(fuse_conn_destroy);
1915
fuse_sb_destroy(struct super_block * sb)1916 static void fuse_sb_destroy(struct super_block *sb)
1917 {
1918 struct fuse_mount *fm = get_fuse_mount_super(sb);
1919 bool last;
1920
1921 if (sb->s_root) {
1922 last = fuse_mount_remove(fm);
1923 if (last)
1924 fuse_conn_destroy(fm);
1925 }
1926 }
1927
fuse_mount_destroy(struct fuse_mount * fm)1928 void fuse_mount_destroy(struct fuse_mount *fm)
1929 {
1930 fuse_conn_put(fm->fc);
1931 kfree_rcu(fm, rcu);
1932 }
1933 EXPORT_SYMBOL(fuse_mount_destroy);
1934
fuse_kill_sb_anon(struct super_block * sb)1935 static void fuse_kill_sb_anon(struct super_block *sb)
1936 {
1937 fuse_sb_destroy(sb);
1938 kill_anon_super(sb);
1939 fuse_mount_destroy(get_fuse_mount_super(sb));
1940 }
1941
1942 static struct file_system_type fuse_fs_type = {
1943 .owner = THIS_MODULE,
1944 .name = "fuse",
1945 .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
1946 .init_fs_context = fuse_init_fs_context,
1947 .parameters = fuse_fs_parameters,
1948 .kill_sb = fuse_kill_sb_anon,
1949 };
1950 MODULE_ALIAS_FS("fuse");
1951
1952 #ifdef CONFIG_BLOCK
fuse_kill_sb_blk(struct super_block * sb)1953 static void fuse_kill_sb_blk(struct super_block *sb)
1954 {
1955 fuse_sb_destroy(sb);
1956 kill_block_super(sb);
1957 fuse_mount_destroy(get_fuse_mount_super(sb));
1958 }
1959
1960 static struct file_system_type fuseblk_fs_type = {
1961 .owner = THIS_MODULE,
1962 .name = "fuseblk",
1963 .init_fs_context = fuse_init_fs_context,
1964 .parameters = fuse_fs_parameters,
1965 .kill_sb = fuse_kill_sb_blk,
1966 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
1967 };
1968 MODULE_ALIAS_FS("fuseblk");
1969
register_fuseblk(void)1970 static inline int register_fuseblk(void)
1971 {
1972 return register_filesystem(&fuseblk_fs_type);
1973 }
1974
unregister_fuseblk(void)1975 static inline void unregister_fuseblk(void)
1976 {
1977 unregister_filesystem(&fuseblk_fs_type);
1978 }
1979 #else
register_fuseblk(void)1980 static inline int register_fuseblk(void)
1981 {
1982 return 0;
1983 }
1984
unregister_fuseblk(void)1985 static inline void unregister_fuseblk(void)
1986 {
1987 }
1988 #endif
1989
fuse_inode_init_once(void * foo)1990 static void fuse_inode_init_once(void *foo)
1991 {
1992 struct inode *inode = foo;
1993
1994 inode_init_once(inode);
1995 }
1996
fuse_fs_init(void)1997 static int __init fuse_fs_init(void)
1998 {
1999 int err;
2000
2001 fuse_inode_cachep = kmem_cache_create("fuse_inode",
2002 sizeof(struct fuse_inode), 0,
2003 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT,
2004 fuse_inode_init_once);
2005 err = -ENOMEM;
2006 if (!fuse_inode_cachep)
2007 goto out;
2008
2009 err = register_fuseblk();
2010 if (err)
2011 goto out2;
2012
2013 err = register_filesystem(&fuse_fs_type);
2014 if (err)
2015 goto out3;
2016
2017 return 0;
2018
2019 out3:
2020 unregister_fuseblk();
2021 out2:
2022 kmem_cache_destroy(fuse_inode_cachep);
2023 out:
2024 return err;
2025 }
2026
fuse_fs_cleanup(void)2027 static void fuse_fs_cleanup(void)
2028 {
2029 unregister_filesystem(&fuse_fs_type);
2030 unregister_fuseblk();
2031
2032 /*
2033 * Make sure all delayed rcu free inodes are flushed before we
2034 * destroy cache.
2035 */
2036 rcu_barrier();
2037 kmem_cache_destroy(fuse_inode_cachep);
2038 }
2039
2040 static struct kobject *fuse_kobj;
2041
fuse_sysfs_init(void)2042 static int fuse_sysfs_init(void)
2043 {
2044 int err;
2045
2046 fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
2047 if (!fuse_kobj) {
2048 err = -ENOMEM;
2049 goto out_err;
2050 }
2051
2052 err = sysfs_create_mount_point(fuse_kobj, "connections");
2053 if (err)
2054 goto out_fuse_unregister;
2055
2056 return 0;
2057
2058 out_fuse_unregister:
2059 kobject_put(fuse_kobj);
2060 out_err:
2061 return err;
2062 }
2063
fuse_sysfs_cleanup(void)2064 static void fuse_sysfs_cleanup(void)
2065 {
2066 sysfs_remove_mount_point(fuse_kobj, "connections");
2067 kobject_put(fuse_kobj);
2068 }
2069
fuse_init(void)2070 static int __init fuse_init(void)
2071 {
2072 int res;
2073
2074 pr_info("init (API version %i.%i)\n",
2075 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2076
2077 INIT_LIST_HEAD(&fuse_conn_list);
2078 res = fuse_fs_init();
2079 if (res)
2080 goto err;
2081
2082 res = fuse_dev_init();
2083 if (res)
2084 goto err_fs_cleanup;
2085
2086 res = fuse_sysfs_init();
2087 if (res)
2088 goto err_dev_cleanup;
2089
2090 res = fuse_ctl_init();
2091 if (res)
2092 goto err_sysfs_cleanup;
2093
2094 sanitize_global_limit(&max_user_bgreq);
2095 sanitize_global_limit(&max_user_congthresh);
2096
2097 return 0;
2098
2099 err_sysfs_cleanup:
2100 fuse_sysfs_cleanup();
2101 err_dev_cleanup:
2102 fuse_dev_cleanup();
2103 err_fs_cleanup:
2104 fuse_fs_cleanup();
2105 err:
2106 return res;
2107 }
2108
fuse_exit(void)2109 static void __exit fuse_exit(void)
2110 {
2111 pr_debug("exit\n");
2112
2113 fuse_ctl_cleanup();
2114 fuse_sysfs_cleanup();
2115 fuse_fs_cleanup();
2116 fuse_dev_cleanup();
2117 }
2118
2119 module_init(fuse_init);
2120 module_exit(fuse_exit);
2121