xref: /openbmc/linux/fs/fuse/dir.c (revision 8fdff1dc)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/sched.h>
14 #include <linux/namei.h>
15 #include <linux/slab.h>
16 
17 #if BITS_PER_LONG >= 64
18 static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
19 {
20 	entry->d_time = time;
21 }
22 
23 static inline u64 fuse_dentry_time(struct dentry *entry)
24 {
25 	return entry->d_time;
26 }
27 #else
28 /*
29  * On 32 bit archs store the high 32 bits of time in d_fsdata
30  */
31 static void fuse_dentry_settime(struct dentry *entry, u64 time)
32 {
33 	entry->d_time = time;
34 	entry->d_fsdata = (void *) (unsigned long) (time >> 32);
35 }
36 
37 static u64 fuse_dentry_time(struct dentry *entry)
38 {
39 	return (u64) entry->d_time +
40 		((u64) (unsigned long) entry->d_fsdata << 32);
41 }
42 #endif
43 
44 /*
45  * FUSE caches dentries and attributes with separate timeout.  The
46  * time in jiffies until the dentry/attributes are valid is stored in
47  * dentry->d_time and fuse_inode->i_time respectively.
48  */
49 
50 /*
51  * Calculate the time in jiffies until a dentry/attributes are valid
52  */
53 static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
54 {
55 	if (sec || nsec) {
56 		struct timespec ts = {sec, nsec};
57 		return get_jiffies_64() + timespec_to_jiffies(&ts);
58 	} else
59 		return 0;
60 }
61 
62 /*
63  * Set dentry and possibly attribute timeouts from the lookup/mk*
64  * replies
65  */
66 static void fuse_change_entry_timeout(struct dentry *entry,
67 				      struct fuse_entry_out *o)
68 {
69 	fuse_dentry_settime(entry,
70 		time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
71 }
72 
73 static u64 attr_timeout(struct fuse_attr_out *o)
74 {
75 	return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
76 }
77 
78 static u64 entry_attr_timeout(struct fuse_entry_out *o)
79 {
80 	return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
81 }
82 
83 /*
84  * Mark the attributes as stale, so that at the next call to
85  * ->getattr() they will be fetched from userspace
86  */
87 void fuse_invalidate_attr(struct inode *inode)
88 {
89 	get_fuse_inode(inode)->i_time = 0;
90 }
91 
92 /*
93  * Just mark the entry as stale, so that a next attempt to look it up
94  * will result in a new lookup call to userspace
95  *
96  * This is called when a dentry is about to become negative and the
97  * timeout is unknown (unlink, rmdir, rename and in some cases
98  * lookup)
99  */
100 void fuse_invalidate_entry_cache(struct dentry *entry)
101 {
102 	fuse_dentry_settime(entry, 0);
103 }
104 
105 /*
106  * Same as fuse_invalidate_entry_cache(), but also try to remove the
107  * dentry from the hash
108  */
109 static void fuse_invalidate_entry(struct dentry *entry)
110 {
111 	d_invalidate(entry);
112 	fuse_invalidate_entry_cache(entry);
113 }
114 
115 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
116 			     u64 nodeid, struct qstr *name,
117 			     struct fuse_entry_out *outarg)
118 {
119 	memset(outarg, 0, sizeof(struct fuse_entry_out));
120 	req->in.h.opcode = FUSE_LOOKUP;
121 	req->in.h.nodeid = nodeid;
122 	req->in.numargs = 1;
123 	req->in.args[0].size = name->len + 1;
124 	req->in.args[0].value = name->name;
125 	req->out.numargs = 1;
126 	if (fc->minor < 9)
127 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
128 	else
129 		req->out.args[0].size = sizeof(struct fuse_entry_out);
130 	req->out.args[0].value = outarg;
131 }
132 
133 u64 fuse_get_attr_version(struct fuse_conn *fc)
134 {
135 	u64 curr_version;
136 
137 	/*
138 	 * The spin lock isn't actually needed on 64bit archs, but we
139 	 * don't yet care too much about such optimizations.
140 	 */
141 	spin_lock(&fc->lock);
142 	curr_version = fc->attr_version;
143 	spin_unlock(&fc->lock);
144 
145 	return curr_version;
146 }
147 
148 /*
149  * Check whether the dentry is still valid
150  *
151  * If the entry validity timeout has expired and the dentry is
152  * positive, try to redo the lookup.  If the lookup results in a
153  * different inode, then let the VFS invalidate the dentry and redo
154  * the lookup once more.  If the lookup results in the same inode,
155  * then refresh the attributes, timeouts and mark the dentry valid.
156  */
157 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
158 {
159 	struct inode *inode;
160 
161 	inode = ACCESS_ONCE(entry->d_inode);
162 	if (inode && is_bad_inode(inode))
163 		return 0;
164 	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
165 		int err;
166 		struct fuse_entry_out outarg;
167 		struct fuse_conn *fc;
168 		struct fuse_req *req;
169 		struct fuse_forget_link *forget;
170 		struct dentry *parent;
171 		u64 attr_version;
172 
173 		/* For negative dentries, always do a fresh lookup */
174 		if (!inode)
175 			return 0;
176 
177 		if (flags & LOOKUP_RCU)
178 			return -ECHILD;
179 
180 		fc = get_fuse_conn(inode);
181 		req = fuse_get_req(fc);
182 		if (IS_ERR(req))
183 			return 0;
184 
185 		forget = fuse_alloc_forget();
186 		if (!forget) {
187 			fuse_put_request(fc, req);
188 			return 0;
189 		}
190 
191 		attr_version = fuse_get_attr_version(fc);
192 
193 		parent = dget_parent(entry);
194 		fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
195 				 &entry->d_name, &outarg);
196 		fuse_request_send(fc, req);
197 		dput(parent);
198 		err = req->out.h.error;
199 		fuse_put_request(fc, req);
200 		/* Zero nodeid is same as -ENOENT */
201 		if (!err && !outarg.nodeid)
202 			err = -ENOENT;
203 		if (!err) {
204 			struct fuse_inode *fi = get_fuse_inode(inode);
205 			if (outarg.nodeid != get_node_id(inode)) {
206 				fuse_queue_forget(fc, forget, outarg.nodeid, 1);
207 				return 0;
208 			}
209 			spin_lock(&fc->lock);
210 			fi->nlookup++;
211 			spin_unlock(&fc->lock);
212 		}
213 		kfree(forget);
214 		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
215 			return 0;
216 
217 		fuse_change_attributes(inode, &outarg.attr,
218 				       entry_attr_timeout(&outarg),
219 				       attr_version);
220 		fuse_change_entry_timeout(entry, &outarg);
221 	}
222 	return 1;
223 }
224 
225 static int invalid_nodeid(u64 nodeid)
226 {
227 	return !nodeid || nodeid == FUSE_ROOT_ID;
228 }
229 
230 const struct dentry_operations fuse_dentry_operations = {
231 	.d_revalidate	= fuse_dentry_revalidate,
232 };
233 
234 int fuse_valid_type(int m)
235 {
236 	return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
237 		S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
238 }
239 
240 /*
241  * Add a directory inode to a dentry, ensuring that no other dentry
242  * refers to this inode.  Called with fc->inst_mutex.
243  */
244 static struct dentry *fuse_d_add_directory(struct dentry *entry,
245 					   struct inode *inode)
246 {
247 	struct dentry *alias = d_find_alias(inode);
248 	if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
249 		/* This tries to shrink the subtree below alias */
250 		fuse_invalidate_entry(alias);
251 		dput(alias);
252 		if (!hlist_empty(&inode->i_dentry))
253 			return ERR_PTR(-EBUSY);
254 	} else {
255 		dput(alias);
256 	}
257 	return d_splice_alias(inode, entry);
258 }
259 
260 int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
261 		     struct fuse_entry_out *outarg, struct inode **inode)
262 {
263 	struct fuse_conn *fc = get_fuse_conn_super(sb);
264 	struct fuse_req *req;
265 	struct fuse_forget_link *forget;
266 	u64 attr_version;
267 	int err;
268 
269 	*inode = NULL;
270 	err = -ENAMETOOLONG;
271 	if (name->len > FUSE_NAME_MAX)
272 		goto out;
273 
274 	req = fuse_get_req(fc);
275 	err = PTR_ERR(req);
276 	if (IS_ERR(req))
277 		goto out;
278 
279 	forget = fuse_alloc_forget();
280 	err = -ENOMEM;
281 	if (!forget) {
282 		fuse_put_request(fc, req);
283 		goto out;
284 	}
285 
286 	attr_version = fuse_get_attr_version(fc);
287 
288 	fuse_lookup_init(fc, req, nodeid, name, outarg);
289 	fuse_request_send(fc, req);
290 	err = req->out.h.error;
291 	fuse_put_request(fc, req);
292 	/* Zero nodeid is same as -ENOENT, but with valid timeout */
293 	if (err || !outarg->nodeid)
294 		goto out_put_forget;
295 
296 	err = -EIO;
297 	if (!outarg->nodeid)
298 		goto out_put_forget;
299 	if (!fuse_valid_type(outarg->attr.mode))
300 		goto out_put_forget;
301 
302 	*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
303 			   &outarg->attr, entry_attr_timeout(outarg),
304 			   attr_version);
305 	err = -ENOMEM;
306 	if (!*inode) {
307 		fuse_queue_forget(fc, forget, outarg->nodeid, 1);
308 		goto out;
309 	}
310 	err = 0;
311 
312  out_put_forget:
313 	kfree(forget);
314  out:
315 	return err;
316 }
317 
318 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
319 				  unsigned int flags)
320 {
321 	int err;
322 	struct fuse_entry_out outarg;
323 	struct inode *inode;
324 	struct dentry *newent;
325 	struct fuse_conn *fc = get_fuse_conn(dir);
326 	bool outarg_valid = true;
327 
328 	err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
329 			       &outarg, &inode);
330 	if (err == -ENOENT) {
331 		outarg_valid = false;
332 		err = 0;
333 	}
334 	if (err)
335 		goto out_err;
336 
337 	err = -EIO;
338 	if (inode && get_node_id(inode) == FUSE_ROOT_ID)
339 		goto out_iput;
340 
341 	if (inode && S_ISDIR(inode->i_mode)) {
342 		mutex_lock(&fc->inst_mutex);
343 		newent = fuse_d_add_directory(entry, inode);
344 		mutex_unlock(&fc->inst_mutex);
345 		err = PTR_ERR(newent);
346 		if (IS_ERR(newent))
347 			goto out_iput;
348 	} else {
349 		newent = d_splice_alias(inode, entry);
350 	}
351 
352 	entry = newent ? newent : entry;
353 	if (outarg_valid)
354 		fuse_change_entry_timeout(entry, &outarg);
355 	else
356 		fuse_invalidate_entry_cache(entry);
357 
358 	return newent;
359 
360  out_iput:
361 	iput(inode);
362  out_err:
363 	return ERR_PTR(err);
364 }
365 
366 /*
367  * Atomic create+open operation
368  *
369  * If the filesystem doesn't support this, then fall back to separate
370  * 'mknod' + 'open' requests.
371  */
372 static int fuse_create_open(struct inode *dir, struct dentry *entry,
373 			    struct file *file, unsigned flags,
374 			    umode_t mode, int *opened)
375 {
376 	int err;
377 	struct inode *inode;
378 	struct fuse_conn *fc = get_fuse_conn(dir);
379 	struct fuse_req *req;
380 	struct fuse_forget_link *forget;
381 	struct fuse_create_in inarg;
382 	struct fuse_open_out outopen;
383 	struct fuse_entry_out outentry;
384 	struct fuse_file *ff;
385 
386 	/* Userspace expects S_IFREG in create mode */
387 	BUG_ON((mode & S_IFMT) != S_IFREG);
388 
389 	forget = fuse_alloc_forget();
390 	err = -ENOMEM;
391 	if (!forget)
392 		goto out_err;
393 
394 	req = fuse_get_req(fc);
395 	err = PTR_ERR(req);
396 	if (IS_ERR(req))
397 		goto out_put_forget_req;
398 
399 	err = -ENOMEM;
400 	ff = fuse_file_alloc(fc);
401 	if (!ff)
402 		goto out_put_request;
403 
404 	if (!fc->dont_mask)
405 		mode &= ~current_umask();
406 
407 	flags &= ~O_NOCTTY;
408 	memset(&inarg, 0, sizeof(inarg));
409 	memset(&outentry, 0, sizeof(outentry));
410 	inarg.flags = flags;
411 	inarg.mode = mode;
412 	inarg.umask = current_umask();
413 	req->in.h.opcode = FUSE_CREATE;
414 	req->in.h.nodeid = get_node_id(dir);
415 	req->in.numargs = 2;
416 	req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
417 						sizeof(inarg);
418 	req->in.args[0].value = &inarg;
419 	req->in.args[1].size = entry->d_name.len + 1;
420 	req->in.args[1].value = entry->d_name.name;
421 	req->out.numargs = 2;
422 	if (fc->minor < 9)
423 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
424 	else
425 		req->out.args[0].size = sizeof(outentry);
426 	req->out.args[0].value = &outentry;
427 	req->out.args[1].size = sizeof(outopen);
428 	req->out.args[1].value = &outopen;
429 	fuse_request_send(fc, req);
430 	err = req->out.h.error;
431 	if (err)
432 		goto out_free_ff;
433 
434 	err = -EIO;
435 	if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
436 		goto out_free_ff;
437 
438 	fuse_put_request(fc, req);
439 	ff->fh = outopen.fh;
440 	ff->nodeid = outentry.nodeid;
441 	ff->open_flags = outopen.open_flags;
442 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
443 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
444 	if (!inode) {
445 		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
446 		fuse_sync_release(ff, flags);
447 		fuse_queue_forget(fc, forget, outentry.nodeid, 1);
448 		err = -ENOMEM;
449 		goto out_err;
450 	}
451 	kfree(forget);
452 	d_instantiate(entry, inode);
453 	fuse_change_entry_timeout(entry, &outentry);
454 	fuse_invalidate_attr(dir);
455 	err = finish_open(file, entry, generic_file_open, opened);
456 	if (err) {
457 		fuse_sync_release(ff, flags);
458 	} else {
459 		file->private_data = fuse_file_get(ff);
460 		fuse_finish_open(inode, file);
461 	}
462 	return err;
463 
464 out_free_ff:
465 	fuse_file_free(ff);
466 out_put_request:
467 	fuse_put_request(fc, req);
468 out_put_forget_req:
469 	kfree(forget);
470 out_err:
471 	return err;
472 }
473 
474 static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
475 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
476 			    struct file *file, unsigned flags,
477 			    umode_t mode, int *opened)
478 {
479 	int err;
480 	struct fuse_conn *fc = get_fuse_conn(dir);
481 	struct dentry *res = NULL;
482 
483 	if (d_unhashed(entry)) {
484 		res = fuse_lookup(dir, entry, 0);
485 		if (IS_ERR(res))
486 			return PTR_ERR(res);
487 
488 		if (res)
489 			entry = res;
490 	}
491 
492 	if (!(flags & O_CREAT) || entry->d_inode)
493 		goto no_open;
494 
495 	/* Only creates */
496 	*opened |= FILE_CREATED;
497 
498 	if (fc->no_create)
499 		goto mknod;
500 
501 	err = fuse_create_open(dir, entry, file, flags, mode, opened);
502 	if (err == -ENOSYS) {
503 		fc->no_create = 1;
504 		goto mknod;
505 	}
506 out_dput:
507 	dput(res);
508 	return err;
509 
510 mknod:
511 	err = fuse_mknod(dir, entry, mode, 0);
512 	if (err)
513 		goto out_dput;
514 no_open:
515 	return finish_no_open(file, res);
516 }
517 
518 /*
519  * Code shared between mknod, mkdir, symlink and link
520  */
521 static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
522 			    struct inode *dir, struct dentry *entry,
523 			    umode_t mode)
524 {
525 	struct fuse_entry_out outarg;
526 	struct inode *inode;
527 	int err;
528 	struct fuse_forget_link *forget;
529 
530 	forget = fuse_alloc_forget();
531 	if (!forget) {
532 		fuse_put_request(fc, req);
533 		return -ENOMEM;
534 	}
535 
536 	memset(&outarg, 0, sizeof(outarg));
537 	req->in.h.nodeid = get_node_id(dir);
538 	req->out.numargs = 1;
539 	if (fc->minor < 9)
540 		req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
541 	else
542 		req->out.args[0].size = sizeof(outarg);
543 	req->out.args[0].value = &outarg;
544 	fuse_request_send(fc, req);
545 	err = req->out.h.error;
546 	fuse_put_request(fc, req);
547 	if (err)
548 		goto out_put_forget_req;
549 
550 	err = -EIO;
551 	if (invalid_nodeid(outarg.nodeid))
552 		goto out_put_forget_req;
553 
554 	if ((outarg.attr.mode ^ mode) & S_IFMT)
555 		goto out_put_forget_req;
556 
557 	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
558 			  &outarg.attr, entry_attr_timeout(&outarg), 0);
559 	if (!inode) {
560 		fuse_queue_forget(fc, forget, outarg.nodeid, 1);
561 		return -ENOMEM;
562 	}
563 	kfree(forget);
564 
565 	if (S_ISDIR(inode->i_mode)) {
566 		struct dentry *alias;
567 		mutex_lock(&fc->inst_mutex);
568 		alias = d_find_alias(inode);
569 		if (alias) {
570 			/* New directory must have moved since mkdir */
571 			mutex_unlock(&fc->inst_mutex);
572 			dput(alias);
573 			iput(inode);
574 			return -EBUSY;
575 		}
576 		d_instantiate(entry, inode);
577 		mutex_unlock(&fc->inst_mutex);
578 	} else
579 		d_instantiate(entry, inode);
580 
581 	fuse_change_entry_timeout(entry, &outarg);
582 	fuse_invalidate_attr(dir);
583 	return 0;
584 
585  out_put_forget_req:
586 	kfree(forget);
587 	return err;
588 }
589 
590 static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
591 		      dev_t rdev)
592 {
593 	struct fuse_mknod_in inarg;
594 	struct fuse_conn *fc = get_fuse_conn(dir);
595 	struct fuse_req *req = fuse_get_req(fc);
596 	if (IS_ERR(req))
597 		return PTR_ERR(req);
598 
599 	if (!fc->dont_mask)
600 		mode &= ~current_umask();
601 
602 	memset(&inarg, 0, sizeof(inarg));
603 	inarg.mode = mode;
604 	inarg.rdev = new_encode_dev(rdev);
605 	inarg.umask = current_umask();
606 	req->in.h.opcode = FUSE_MKNOD;
607 	req->in.numargs = 2;
608 	req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
609 						sizeof(inarg);
610 	req->in.args[0].value = &inarg;
611 	req->in.args[1].size = entry->d_name.len + 1;
612 	req->in.args[1].value = entry->d_name.name;
613 	return create_new_entry(fc, req, dir, entry, mode);
614 }
615 
616 static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
617 		       bool excl)
618 {
619 	return fuse_mknod(dir, entry, mode, 0);
620 }
621 
622 static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
623 {
624 	struct fuse_mkdir_in inarg;
625 	struct fuse_conn *fc = get_fuse_conn(dir);
626 	struct fuse_req *req = fuse_get_req(fc);
627 	if (IS_ERR(req))
628 		return PTR_ERR(req);
629 
630 	if (!fc->dont_mask)
631 		mode &= ~current_umask();
632 
633 	memset(&inarg, 0, sizeof(inarg));
634 	inarg.mode = mode;
635 	inarg.umask = current_umask();
636 	req->in.h.opcode = FUSE_MKDIR;
637 	req->in.numargs = 2;
638 	req->in.args[0].size = sizeof(inarg);
639 	req->in.args[0].value = &inarg;
640 	req->in.args[1].size = entry->d_name.len + 1;
641 	req->in.args[1].value = entry->d_name.name;
642 	return create_new_entry(fc, req, dir, entry, S_IFDIR);
643 }
644 
645 static int fuse_symlink(struct inode *dir, struct dentry *entry,
646 			const char *link)
647 {
648 	struct fuse_conn *fc = get_fuse_conn(dir);
649 	unsigned len = strlen(link) + 1;
650 	struct fuse_req *req = fuse_get_req(fc);
651 	if (IS_ERR(req))
652 		return PTR_ERR(req);
653 
654 	req->in.h.opcode = FUSE_SYMLINK;
655 	req->in.numargs = 2;
656 	req->in.args[0].size = entry->d_name.len + 1;
657 	req->in.args[0].value = entry->d_name.name;
658 	req->in.args[1].size = len;
659 	req->in.args[1].value = link;
660 	return create_new_entry(fc, req, dir, entry, S_IFLNK);
661 }
662 
663 static int fuse_unlink(struct inode *dir, struct dentry *entry)
664 {
665 	int err;
666 	struct fuse_conn *fc = get_fuse_conn(dir);
667 	struct fuse_req *req = fuse_get_req(fc);
668 	if (IS_ERR(req))
669 		return PTR_ERR(req);
670 
671 	req->in.h.opcode = FUSE_UNLINK;
672 	req->in.h.nodeid = get_node_id(dir);
673 	req->in.numargs = 1;
674 	req->in.args[0].size = entry->d_name.len + 1;
675 	req->in.args[0].value = entry->d_name.name;
676 	fuse_request_send(fc, req);
677 	err = req->out.h.error;
678 	fuse_put_request(fc, req);
679 	if (!err) {
680 		struct inode *inode = entry->d_inode;
681 		struct fuse_inode *fi = get_fuse_inode(inode);
682 
683 		spin_lock(&fc->lock);
684 		fi->attr_version = ++fc->attr_version;
685 		drop_nlink(inode);
686 		spin_unlock(&fc->lock);
687 		fuse_invalidate_attr(inode);
688 		fuse_invalidate_attr(dir);
689 		fuse_invalidate_entry_cache(entry);
690 	} else if (err == -EINTR)
691 		fuse_invalidate_entry(entry);
692 	return err;
693 }
694 
695 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
696 {
697 	int err;
698 	struct fuse_conn *fc = get_fuse_conn(dir);
699 	struct fuse_req *req = fuse_get_req(fc);
700 	if (IS_ERR(req))
701 		return PTR_ERR(req);
702 
703 	req->in.h.opcode = FUSE_RMDIR;
704 	req->in.h.nodeid = get_node_id(dir);
705 	req->in.numargs = 1;
706 	req->in.args[0].size = entry->d_name.len + 1;
707 	req->in.args[0].value = entry->d_name.name;
708 	fuse_request_send(fc, req);
709 	err = req->out.h.error;
710 	fuse_put_request(fc, req);
711 	if (!err) {
712 		clear_nlink(entry->d_inode);
713 		fuse_invalidate_attr(dir);
714 		fuse_invalidate_entry_cache(entry);
715 	} else if (err == -EINTR)
716 		fuse_invalidate_entry(entry);
717 	return err;
718 }
719 
720 static int fuse_rename(struct inode *olddir, struct dentry *oldent,
721 		       struct inode *newdir, struct dentry *newent)
722 {
723 	int err;
724 	struct fuse_rename_in inarg;
725 	struct fuse_conn *fc = get_fuse_conn(olddir);
726 	struct fuse_req *req = fuse_get_req(fc);
727 
728 	if (IS_ERR(req))
729 		return PTR_ERR(req);
730 
731 	memset(&inarg, 0, sizeof(inarg));
732 	inarg.newdir = get_node_id(newdir);
733 	req->in.h.opcode = FUSE_RENAME;
734 	req->in.h.nodeid = get_node_id(olddir);
735 	req->in.numargs = 3;
736 	req->in.args[0].size = sizeof(inarg);
737 	req->in.args[0].value = &inarg;
738 	req->in.args[1].size = oldent->d_name.len + 1;
739 	req->in.args[1].value = oldent->d_name.name;
740 	req->in.args[2].size = newent->d_name.len + 1;
741 	req->in.args[2].value = newent->d_name.name;
742 	fuse_request_send(fc, req);
743 	err = req->out.h.error;
744 	fuse_put_request(fc, req);
745 	if (!err) {
746 		/* ctime changes */
747 		fuse_invalidate_attr(oldent->d_inode);
748 
749 		fuse_invalidate_attr(olddir);
750 		if (olddir != newdir)
751 			fuse_invalidate_attr(newdir);
752 
753 		/* newent will end up negative */
754 		if (newent->d_inode) {
755 			fuse_invalidate_attr(newent->d_inode);
756 			fuse_invalidate_entry_cache(newent);
757 		}
758 	} else if (err == -EINTR) {
759 		/* If request was interrupted, DEITY only knows if the
760 		   rename actually took place.  If the invalidation
761 		   fails (e.g. some process has CWD under the renamed
762 		   directory), then there can be inconsistency between
763 		   the dcache and the real filesystem.  Tough luck. */
764 		fuse_invalidate_entry(oldent);
765 		if (newent->d_inode)
766 			fuse_invalidate_entry(newent);
767 	}
768 
769 	return err;
770 }
771 
772 static int fuse_link(struct dentry *entry, struct inode *newdir,
773 		     struct dentry *newent)
774 {
775 	int err;
776 	struct fuse_link_in inarg;
777 	struct inode *inode = entry->d_inode;
778 	struct fuse_conn *fc = get_fuse_conn(inode);
779 	struct fuse_req *req = fuse_get_req(fc);
780 	if (IS_ERR(req))
781 		return PTR_ERR(req);
782 
783 	memset(&inarg, 0, sizeof(inarg));
784 	inarg.oldnodeid = get_node_id(inode);
785 	req->in.h.opcode = FUSE_LINK;
786 	req->in.numargs = 2;
787 	req->in.args[0].size = sizeof(inarg);
788 	req->in.args[0].value = &inarg;
789 	req->in.args[1].size = newent->d_name.len + 1;
790 	req->in.args[1].value = newent->d_name.name;
791 	err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
792 	/* Contrary to "normal" filesystems it can happen that link
793 	   makes two "logical" inodes point to the same "physical"
794 	   inode.  We invalidate the attributes of the old one, so it
795 	   will reflect changes in the backing inode (link count,
796 	   etc.)
797 	*/
798 	if (!err) {
799 		struct fuse_inode *fi = get_fuse_inode(inode);
800 
801 		spin_lock(&fc->lock);
802 		fi->attr_version = ++fc->attr_version;
803 		inc_nlink(inode);
804 		spin_unlock(&fc->lock);
805 		fuse_invalidate_attr(inode);
806 	} else if (err == -EINTR) {
807 		fuse_invalidate_attr(inode);
808 	}
809 	return err;
810 }
811 
812 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
813 			  struct kstat *stat)
814 {
815 	unsigned int blkbits;
816 
817 	stat->dev = inode->i_sb->s_dev;
818 	stat->ino = attr->ino;
819 	stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
820 	stat->nlink = attr->nlink;
821 	stat->uid = make_kuid(&init_user_ns, attr->uid);
822 	stat->gid = make_kgid(&init_user_ns, attr->gid);
823 	stat->rdev = inode->i_rdev;
824 	stat->atime.tv_sec = attr->atime;
825 	stat->atime.tv_nsec = attr->atimensec;
826 	stat->mtime.tv_sec = attr->mtime;
827 	stat->mtime.tv_nsec = attr->mtimensec;
828 	stat->ctime.tv_sec = attr->ctime;
829 	stat->ctime.tv_nsec = attr->ctimensec;
830 	stat->size = attr->size;
831 	stat->blocks = attr->blocks;
832 
833 	if (attr->blksize != 0)
834 		blkbits = ilog2(attr->blksize);
835 	else
836 		blkbits = inode->i_sb->s_blocksize_bits;
837 
838 	stat->blksize = 1 << blkbits;
839 }
840 
841 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
842 			   struct file *file)
843 {
844 	int err;
845 	struct fuse_getattr_in inarg;
846 	struct fuse_attr_out outarg;
847 	struct fuse_conn *fc = get_fuse_conn(inode);
848 	struct fuse_req *req;
849 	u64 attr_version;
850 
851 	req = fuse_get_req(fc);
852 	if (IS_ERR(req))
853 		return PTR_ERR(req);
854 
855 	attr_version = fuse_get_attr_version(fc);
856 
857 	memset(&inarg, 0, sizeof(inarg));
858 	memset(&outarg, 0, sizeof(outarg));
859 	/* Directories have separate file-handle space */
860 	if (file && S_ISREG(inode->i_mode)) {
861 		struct fuse_file *ff = file->private_data;
862 
863 		inarg.getattr_flags |= FUSE_GETATTR_FH;
864 		inarg.fh = ff->fh;
865 	}
866 	req->in.h.opcode = FUSE_GETATTR;
867 	req->in.h.nodeid = get_node_id(inode);
868 	req->in.numargs = 1;
869 	req->in.args[0].size = sizeof(inarg);
870 	req->in.args[0].value = &inarg;
871 	req->out.numargs = 1;
872 	if (fc->minor < 9)
873 		req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
874 	else
875 		req->out.args[0].size = sizeof(outarg);
876 	req->out.args[0].value = &outarg;
877 	fuse_request_send(fc, req);
878 	err = req->out.h.error;
879 	fuse_put_request(fc, req);
880 	if (!err) {
881 		if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
882 			make_bad_inode(inode);
883 			err = -EIO;
884 		} else {
885 			fuse_change_attributes(inode, &outarg.attr,
886 					       attr_timeout(&outarg),
887 					       attr_version);
888 			if (stat)
889 				fuse_fillattr(inode, &outarg.attr, stat);
890 		}
891 	}
892 	return err;
893 }
894 
895 int fuse_update_attributes(struct inode *inode, struct kstat *stat,
896 			   struct file *file, bool *refreshed)
897 {
898 	struct fuse_inode *fi = get_fuse_inode(inode);
899 	int err;
900 	bool r;
901 
902 	if (fi->i_time < get_jiffies_64()) {
903 		r = true;
904 		err = fuse_do_getattr(inode, stat, file);
905 	} else {
906 		r = false;
907 		err = 0;
908 		if (stat) {
909 			generic_fillattr(inode, stat);
910 			stat->mode = fi->orig_i_mode;
911 			stat->ino = fi->orig_ino;
912 		}
913 	}
914 
915 	if (refreshed != NULL)
916 		*refreshed = r;
917 
918 	return err;
919 }
920 
921 int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
922 			     u64 child_nodeid, struct qstr *name)
923 {
924 	int err = -ENOTDIR;
925 	struct inode *parent;
926 	struct dentry *dir;
927 	struct dentry *entry;
928 
929 	parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
930 	if (!parent)
931 		return -ENOENT;
932 
933 	mutex_lock(&parent->i_mutex);
934 	if (!S_ISDIR(parent->i_mode))
935 		goto unlock;
936 
937 	err = -ENOENT;
938 	dir = d_find_alias(parent);
939 	if (!dir)
940 		goto unlock;
941 
942 	entry = d_lookup(dir, name);
943 	dput(dir);
944 	if (!entry)
945 		goto unlock;
946 
947 	fuse_invalidate_attr(parent);
948 	fuse_invalidate_entry(entry);
949 
950 	if (child_nodeid != 0 && entry->d_inode) {
951 		mutex_lock(&entry->d_inode->i_mutex);
952 		if (get_node_id(entry->d_inode) != child_nodeid) {
953 			err = -ENOENT;
954 			goto badentry;
955 		}
956 		if (d_mountpoint(entry)) {
957 			err = -EBUSY;
958 			goto badentry;
959 		}
960 		if (S_ISDIR(entry->d_inode->i_mode)) {
961 			shrink_dcache_parent(entry);
962 			if (!simple_empty(entry)) {
963 				err = -ENOTEMPTY;
964 				goto badentry;
965 			}
966 			entry->d_inode->i_flags |= S_DEAD;
967 		}
968 		dont_mount(entry);
969 		clear_nlink(entry->d_inode);
970 		err = 0;
971  badentry:
972 		mutex_unlock(&entry->d_inode->i_mutex);
973 		if (!err)
974 			d_delete(entry);
975 	} else {
976 		err = 0;
977 	}
978 	dput(entry);
979 
980  unlock:
981 	mutex_unlock(&parent->i_mutex);
982 	iput(parent);
983 	return err;
984 }
985 
986 /*
987  * Calling into a user-controlled filesystem gives the filesystem
988  * daemon ptrace-like capabilities over the requester process.  This
989  * means, that the filesystem daemon is able to record the exact
990  * filesystem operations performed, and can also control the behavior
991  * of the requester process in otherwise impossible ways.  For example
992  * it can delay the operation for arbitrary length of time allowing
993  * DoS against the requester.
994  *
995  * For this reason only those processes can call into the filesystem,
996  * for which the owner of the mount has ptrace privilege.  This
997  * excludes processes started by other users, suid or sgid processes.
998  */
999 int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
1000 {
1001 	const struct cred *cred;
1002 	int ret;
1003 
1004 	if (fc->flags & FUSE_ALLOW_OTHER)
1005 		return 1;
1006 
1007 	rcu_read_lock();
1008 	ret = 0;
1009 	cred = __task_cred(task);
1010 	if (uid_eq(cred->euid, fc->user_id) &&
1011 	    uid_eq(cred->suid, fc->user_id) &&
1012 	    uid_eq(cred->uid,  fc->user_id) &&
1013 	    gid_eq(cred->egid, fc->group_id) &&
1014 	    gid_eq(cred->sgid, fc->group_id) &&
1015 	    gid_eq(cred->gid,  fc->group_id))
1016 		ret = 1;
1017 	rcu_read_unlock();
1018 
1019 	return ret;
1020 }
1021 
1022 static int fuse_access(struct inode *inode, int mask)
1023 {
1024 	struct fuse_conn *fc = get_fuse_conn(inode);
1025 	struct fuse_req *req;
1026 	struct fuse_access_in inarg;
1027 	int err;
1028 
1029 	if (fc->no_access)
1030 		return 0;
1031 
1032 	req = fuse_get_req(fc);
1033 	if (IS_ERR(req))
1034 		return PTR_ERR(req);
1035 
1036 	memset(&inarg, 0, sizeof(inarg));
1037 	inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1038 	req->in.h.opcode = FUSE_ACCESS;
1039 	req->in.h.nodeid = get_node_id(inode);
1040 	req->in.numargs = 1;
1041 	req->in.args[0].size = sizeof(inarg);
1042 	req->in.args[0].value = &inarg;
1043 	fuse_request_send(fc, req);
1044 	err = req->out.h.error;
1045 	fuse_put_request(fc, req);
1046 	if (err == -ENOSYS) {
1047 		fc->no_access = 1;
1048 		err = 0;
1049 	}
1050 	return err;
1051 }
1052 
1053 static int fuse_perm_getattr(struct inode *inode, int mask)
1054 {
1055 	if (mask & MAY_NOT_BLOCK)
1056 		return -ECHILD;
1057 
1058 	return fuse_do_getattr(inode, NULL, NULL);
1059 }
1060 
1061 /*
1062  * Check permission.  The two basic access models of FUSE are:
1063  *
1064  * 1) Local access checking ('default_permissions' mount option) based
1065  * on file mode.  This is the plain old disk filesystem permission
1066  * modell.
1067  *
1068  * 2) "Remote" access checking, where server is responsible for
1069  * checking permission in each inode operation.  An exception to this
1070  * is if ->permission() was invoked from sys_access() in which case an
1071  * access request is sent.  Execute permission is still checked
1072  * locally based on file mode.
1073  */
1074 static int fuse_permission(struct inode *inode, int mask)
1075 {
1076 	struct fuse_conn *fc = get_fuse_conn(inode);
1077 	bool refreshed = false;
1078 	int err = 0;
1079 
1080 	if (!fuse_allow_task(fc, current))
1081 		return -EACCES;
1082 
1083 	/*
1084 	 * If attributes are needed, refresh them before proceeding
1085 	 */
1086 	if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
1087 	    ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1088 		struct fuse_inode *fi = get_fuse_inode(inode);
1089 
1090 		if (fi->i_time < get_jiffies_64()) {
1091 			refreshed = true;
1092 
1093 			err = fuse_perm_getattr(inode, mask);
1094 			if (err)
1095 				return err;
1096 		}
1097 	}
1098 
1099 	if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
1100 		err = generic_permission(inode, mask);
1101 
1102 		/* If permission is denied, try to refresh file
1103 		   attributes.  This is also needed, because the root
1104 		   node will at first have no permissions */
1105 		if (err == -EACCES && !refreshed) {
1106 			err = fuse_perm_getattr(inode, mask);
1107 			if (!err)
1108 				err = generic_permission(inode, mask);
1109 		}
1110 
1111 		/* Note: the opposite of the above test does not
1112 		   exist.  So if permissions are revoked this won't be
1113 		   noticed immediately, only after the attribute
1114 		   timeout has expired */
1115 	} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1116 		if (mask & MAY_NOT_BLOCK)
1117 			return -ECHILD;
1118 
1119 		err = fuse_access(inode, mask);
1120 	} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1121 		if (!(inode->i_mode & S_IXUGO)) {
1122 			if (refreshed)
1123 				return -EACCES;
1124 
1125 			err = fuse_perm_getattr(inode, mask);
1126 			if (!err && !(inode->i_mode & S_IXUGO))
1127 				return -EACCES;
1128 		}
1129 	}
1130 	return err;
1131 }
1132 
1133 static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
1134 			 void *dstbuf, filldir_t filldir)
1135 {
1136 	while (nbytes >= FUSE_NAME_OFFSET) {
1137 		struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
1138 		size_t reclen = FUSE_DIRENT_SIZE(dirent);
1139 		int over;
1140 		if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
1141 			return -EIO;
1142 		if (reclen > nbytes)
1143 			break;
1144 
1145 		over = filldir(dstbuf, dirent->name, dirent->namelen,
1146 			       file->f_pos, dirent->ino, dirent->type);
1147 		if (over)
1148 			break;
1149 
1150 		buf += reclen;
1151 		nbytes -= reclen;
1152 		file->f_pos = dirent->off;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
1159 {
1160 	int err;
1161 	size_t nbytes;
1162 	struct page *page;
1163 	struct inode *inode = file->f_path.dentry->d_inode;
1164 	struct fuse_conn *fc = get_fuse_conn(inode);
1165 	struct fuse_req *req;
1166 
1167 	if (is_bad_inode(inode))
1168 		return -EIO;
1169 
1170 	req = fuse_get_req(fc);
1171 	if (IS_ERR(req))
1172 		return PTR_ERR(req);
1173 
1174 	page = alloc_page(GFP_KERNEL);
1175 	if (!page) {
1176 		fuse_put_request(fc, req);
1177 		return -ENOMEM;
1178 	}
1179 	req->out.argpages = 1;
1180 	req->num_pages = 1;
1181 	req->pages[0] = page;
1182 	fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
1183 	fuse_request_send(fc, req);
1184 	nbytes = req->out.args[0].size;
1185 	err = req->out.h.error;
1186 	fuse_put_request(fc, req);
1187 	if (!err)
1188 		err = parse_dirfile(page_address(page), nbytes, file, dstbuf,
1189 				    filldir);
1190 
1191 	__free_page(page);
1192 	fuse_invalidate_attr(inode); /* atime changed */
1193 	return err;
1194 }
1195 
1196 static char *read_link(struct dentry *dentry)
1197 {
1198 	struct inode *inode = dentry->d_inode;
1199 	struct fuse_conn *fc = get_fuse_conn(inode);
1200 	struct fuse_req *req = fuse_get_req(fc);
1201 	char *link;
1202 
1203 	if (IS_ERR(req))
1204 		return ERR_CAST(req);
1205 
1206 	link = (char *) __get_free_page(GFP_KERNEL);
1207 	if (!link) {
1208 		link = ERR_PTR(-ENOMEM);
1209 		goto out;
1210 	}
1211 	req->in.h.opcode = FUSE_READLINK;
1212 	req->in.h.nodeid = get_node_id(inode);
1213 	req->out.argvar = 1;
1214 	req->out.numargs = 1;
1215 	req->out.args[0].size = PAGE_SIZE - 1;
1216 	req->out.args[0].value = link;
1217 	fuse_request_send(fc, req);
1218 	if (req->out.h.error) {
1219 		free_page((unsigned long) link);
1220 		link = ERR_PTR(req->out.h.error);
1221 	} else
1222 		link[req->out.args[0].size] = '\0';
1223  out:
1224 	fuse_put_request(fc, req);
1225 	fuse_invalidate_attr(inode); /* atime changed */
1226 	return link;
1227 }
1228 
1229 static void free_link(char *link)
1230 {
1231 	if (!IS_ERR(link))
1232 		free_page((unsigned long) link);
1233 }
1234 
1235 static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
1236 {
1237 	nd_set_link(nd, read_link(dentry));
1238 	return NULL;
1239 }
1240 
1241 static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
1242 {
1243 	free_link(nd_get_link(nd));
1244 }
1245 
1246 static int fuse_dir_open(struct inode *inode, struct file *file)
1247 {
1248 	return fuse_open_common(inode, file, true);
1249 }
1250 
1251 static int fuse_dir_release(struct inode *inode, struct file *file)
1252 {
1253 	fuse_release_common(file, FUSE_RELEASEDIR);
1254 
1255 	return 0;
1256 }
1257 
1258 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1259 			  int datasync)
1260 {
1261 	return fuse_fsync_common(file, start, end, datasync, 1);
1262 }
1263 
1264 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
1265 			    unsigned long arg)
1266 {
1267 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1268 
1269 	/* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
1270 	if (fc->minor < 18)
1271 		return -ENOTTY;
1272 
1273 	return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
1274 }
1275 
1276 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
1277 				   unsigned long arg)
1278 {
1279 	struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1280 
1281 	if (fc->minor < 18)
1282 		return -ENOTTY;
1283 
1284 	return fuse_ioctl_common(file, cmd, arg,
1285 				 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
1286 }
1287 
1288 static bool update_mtime(unsigned ivalid)
1289 {
1290 	/* Always update if mtime is explicitly set  */
1291 	if (ivalid & ATTR_MTIME_SET)
1292 		return true;
1293 
1294 	/* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1295 	if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1296 		return false;
1297 
1298 	/* In all other cases update */
1299 	return true;
1300 }
1301 
1302 static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
1303 {
1304 	unsigned ivalid = iattr->ia_valid;
1305 
1306 	if (ivalid & ATTR_MODE)
1307 		arg->valid |= FATTR_MODE,   arg->mode = iattr->ia_mode;
1308 	if (ivalid & ATTR_UID)
1309 		arg->valid |= FATTR_UID,    arg->uid = from_kuid(&init_user_ns, iattr->ia_uid);
1310 	if (ivalid & ATTR_GID)
1311 		arg->valid |= FATTR_GID,    arg->gid = from_kgid(&init_user_ns, iattr->ia_gid);
1312 	if (ivalid & ATTR_SIZE)
1313 		arg->valid |= FATTR_SIZE,   arg->size = iattr->ia_size;
1314 	if (ivalid & ATTR_ATIME) {
1315 		arg->valid |= FATTR_ATIME;
1316 		arg->atime = iattr->ia_atime.tv_sec;
1317 		arg->atimensec = iattr->ia_atime.tv_nsec;
1318 		if (!(ivalid & ATTR_ATIME_SET))
1319 			arg->valid |= FATTR_ATIME_NOW;
1320 	}
1321 	if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
1322 		arg->valid |= FATTR_MTIME;
1323 		arg->mtime = iattr->ia_mtime.tv_sec;
1324 		arg->mtimensec = iattr->ia_mtime.tv_nsec;
1325 		if (!(ivalid & ATTR_MTIME_SET))
1326 			arg->valid |= FATTR_MTIME_NOW;
1327 	}
1328 }
1329 
1330 /*
1331  * Prevent concurrent writepages on inode
1332  *
1333  * This is done by adding a negative bias to the inode write counter
1334  * and waiting for all pending writes to finish.
1335  */
1336 void fuse_set_nowrite(struct inode *inode)
1337 {
1338 	struct fuse_conn *fc = get_fuse_conn(inode);
1339 	struct fuse_inode *fi = get_fuse_inode(inode);
1340 
1341 	BUG_ON(!mutex_is_locked(&inode->i_mutex));
1342 
1343 	spin_lock(&fc->lock);
1344 	BUG_ON(fi->writectr < 0);
1345 	fi->writectr += FUSE_NOWRITE;
1346 	spin_unlock(&fc->lock);
1347 	wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1348 }
1349 
1350 /*
1351  * Allow writepages on inode
1352  *
1353  * Remove the bias from the writecounter and send any queued
1354  * writepages.
1355  */
1356 static void __fuse_release_nowrite(struct inode *inode)
1357 {
1358 	struct fuse_inode *fi = get_fuse_inode(inode);
1359 
1360 	BUG_ON(fi->writectr != FUSE_NOWRITE);
1361 	fi->writectr = 0;
1362 	fuse_flush_writepages(inode);
1363 }
1364 
1365 void fuse_release_nowrite(struct inode *inode)
1366 {
1367 	struct fuse_conn *fc = get_fuse_conn(inode);
1368 
1369 	spin_lock(&fc->lock);
1370 	__fuse_release_nowrite(inode);
1371 	spin_unlock(&fc->lock);
1372 }
1373 
1374 /*
1375  * Set attributes, and at the same time refresh them.
1376  *
1377  * Truncation is slightly complicated, because the 'truncate' request
1378  * may fail, in which case we don't want to touch the mapping.
1379  * vmtruncate() doesn't allow for this case, so do the rlimit checking
1380  * and the actual truncation by hand.
1381  */
1382 static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
1383 			   struct file *file)
1384 {
1385 	struct inode *inode = entry->d_inode;
1386 	struct fuse_conn *fc = get_fuse_conn(inode);
1387 	struct fuse_req *req;
1388 	struct fuse_setattr_in inarg;
1389 	struct fuse_attr_out outarg;
1390 	bool is_truncate = false;
1391 	loff_t oldsize;
1392 	int err;
1393 
1394 	if (!fuse_allow_task(fc, current))
1395 		return -EACCES;
1396 
1397 	if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
1398 		attr->ia_valid |= ATTR_FORCE;
1399 
1400 	err = inode_change_ok(inode, attr);
1401 	if (err)
1402 		return err;
1403 
1404 	if (attr->ia_valid & ATTR_OPEN) {
1405 		if (fc->atomic_o_trunc)
1406 			return 0;
1407 		file = NULL;
1408 	}
1409 
1410 	if (attr->ia_valid & ATTR_SIZE)
1411 		is_truncate = true;
1412 
1413 	req = fuse_get_req(fc);
1414 	if (IS_ERR(req))
1415 		return PTR_ERR(req);
1416 
1417 	if (is_truncate)
1418 		fuse_set_nowrite(inode);
1419 
1420 	memset(&inarg, 0, sizeof(inarg));
1421 	memset(&outarg, 0, sizeof(outarg));
1422 	iattr_to_fattr(attr, &inarg);
1423 	if (file) {
1424 		struct fuse_file *ff = file->private_data;
1425 		inarg.valid |= FATTR_FH;
1426 		inarg.fh = ff->fh;
1427 	}
1428 	if (attr->ia_valid & ATTR_SIZE) {
1429 		/* For mandatory locking in truncate */
1430 		inarg.valid |= FATTR_LOCKOWNER;
1431 		inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
1432 	}
1433 	req->in.h.opcode = FUSE_SETATTR;
1434 	req->in.h.nodeid = get_node_id(inode);
1435 	req->in.numargs = 1;
1436 	req->in.args[0].size = sizeof(inarg);
1437 	req->in.args[0].value = &inarg;
1438 	req->out.numargs = 1;
1439 	if (fc->minor < 9)
1440 		req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
1441 	else
1442 		req->out.args[0].size = sizeof(outarg);
1443 	req->out.args[0].value = &outarg;
1444 	fuse_request_send(fc, req);
1445 	err = req->out.h.error;
1446 	fuse_put_request(fc, req);
1447 	if (err) {
1448 		if (err == -EINTR)
1449 			fuse_invalidate_attr(inode);
1450 		goto error;
1451 	}
1452 
1453 	if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
1454 		make_bad_inode(inode);
1455 		err = -EIO;
1456 		goto error;
1457 	}
1458 
1459 	spin_lock(&fc->lock);
1460 	fuse_change_attributes_common(inode, &outarg.attr,
1461 				      attr_timeout(&outarg));
1462 	oldsize = inode->i_size;
1463 	i_size_write(inode, outarg.attr.size);
1464 
1465 	if (is_truncate) {
1466 		/* NOTE: this may release/reacquire fc->lock */
1467 		__fuse_release_nowrite(inode);
1468 	}
1469 	spin_unlock(&fc->lock);
1470 
1471 	/*
1472 	 * Only call invalidate_inode_pages2() after removing
1473 	 * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
1474 	 */
1475 	if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
1476 		truncate_pagecache(inode, oldsize, outarg.attr.size);
1477 		invalidate_inode_pages2(inode->i_mapping);
1478 	}
1479 
1480 	return 0;
1481 
1482 error:
1483 	if (is_truncate)
1484 		fuse_release_nowrite(inode);
1485 
1486 	return err;
1487 }
1488 
1489 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1490 {
1491 	if (attr->ia_valid & ATTR_FILE)
1492 		return fuse_do_setattr(entry, attr, attr->ia_file);
1493 	else
1494 		return fuse_do_setattr(entry, attr, NULL);
1495 }
1496 
1497 static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
1498 			struct kstat *stat)
1499 {
1500 	struct inode *inode = entry->d_inode;
1501 	struct fuse_conn *fc = get_fuse_conn(inode);
1502 
1503 	if (!fuse_allow_task(fc, current))
1504 		return -EACCES;
1505 
1506 	return fuse_update_attributes(inode, stat, NULL, NULL);
1507 }
1508 
1509 static int fuse_setxattr(struct dentry *entry, const char *name,
1510 			 const void *value, size_t size, int flags)
1511 {
1512 	struct inode *inode = entry->d_inode;
1513 	struct fuse_conn *fc = get_fuse_conn(inode);
1514 	struct fuse_req *req;
1515 	struct fuse_setxattr_in inarg;
1516 	int err;
1517 
1518 	if (fc->no_setxattr)
1519 		return -EOPNOTSUPP;
1520 
1521 	req = fuse_get_req(fc);
1522 	if (IS_ERR(req))
1523 		return PTR_ERR(req);
1524 
1525 	memset(&inarg, 0, sizeof(inarg));
1526 	inarg.size = size;
1527 	inarg.flags = flags;
1528 	req->in.h.opcode = FUSE_SETXATTR;
1529 	req->in.h.nodeid = get_node_id(inode);
1530 	req->in.numargs = 3;
1531 	req->in.args[0].size = sizeof(inarg);
1532 	req->in.args[0].value = &inarg;
1533 	req->in.args[1].size = strlen(name) + 1;
1534 	req->in.args[1].value = name;
1535 	req->in.args[2].size = size;
1536 	req->in.args[2].value = value;
1537 	fuse_request_send(fc, req);
1538 	err = req->out.h.error;
1539 	fuse_put_request(fc, req);
1540 	if (err == -ENOSYS) {
1541 		fc->no_setxattr = 1;
1542 		err = -EOPNOTSUPP;
1543 	}
1544 	return err;
1545 }
1546 
1547 static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
1548 			     void *value, size_t size)
1549 {
1550 	struct inode *inode = entry->d_inode;
1551 	struct fuse_conn *fc = get_fuse_conn(inode);
1552 	struct fuse_req *req;
1553 	struct fuse_getxattr_in inarg;
1554 	struct fuse_getxattr_out outarg;
1555 	ssize_t ret;
1556 
1557 	if (fc->no_getxattr)
1558 		return -EOPNOTSUPP;
1559 
1560 	req = fuse_get_req(fc);
1561 	if (IS_ERR(req))
1562 		return PTR_ERR(req);
1563 
1564 	memset(&inarg, 0, sizeof(inarg));
1565 	inarg.size = size;
1566 	req->in.h.opcode = FUSE_GETXATTR;
1567 	req->in.h.nodeid = get_node_id(inode);
1568 	req->in.numargs = 2;
1569 	req->in.args[0].size = sizeof(inarg);
1570 	req->in.args[0].value = &inarg;
1571 	req->in.args[1].size = strlen(name) + 1;
1572 	req->in.args[1].value = name;
1573 	/* This is really two different operations rolled into one */
1574 	req->out.numargs = 1;
1575 	if (size) {
1576 		req->out.argvar = 1;
1577 		req->out.args[0].size = size;
1578 		req->out.args[0].value = value;
1579 	} else {
1580 		req->out.args[0].size = sizeof(outarg);
1581 		req->out.args[0].value = &outarg;
1582 	}
1583 	fuse_request_send(fc, req);
1584 	ret = req->out.h.error;
1585 	if (!ret)
1586 		ret = size ? req->out.args[0].size : outarg.size;
1587 	else {
1588 		if (ret == -ENOSYS) {
1589 			fc->no_getxattr = 1;
1590 			ret = -EOPNOTSUPP;
1591 		}
1592 	}
1593 	fuse_put_request(fc, req);
1594 	return ret;
1595 }
1596 
1597 static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1598 {
1599 	struct inode *inode = entry->d_inode;
1600 	struct fuse_conn *fc = get_fuse_conn(inode);
1601 	struct fuse_req *req;
1602 	struct fuse_getxattr_in inarg;
1603 	struct fuse_getxattr_out outarg;
1604 	ssize_t ret;
1605 
1606 	if (!fuse_allow_task(fc, current))
1607 		return -EACCES;
1608 
1609 	if (fc->no_listxattr)
1610 		return -EOPNOTSUPP;
1611 
1612 	req = fuse_get_req(fc);
1613 	if (IS_ERR(req))
1614 		return PTR_ERR(req);
1615 
1616 	memset(&inarg, 0, sizeof(inarg));
1617 	inarg.size = size;
1618 	req->in.h.opcode = FUSE_LISTXATTR;
1619 	req->in.h.nodeid = get_node_id(inode);
1620 	req->in.numargs = 1;
1621 	req->in.args[0].size = sizeof(inarg);
1622 	req->in.args[0].value = &inarg;
1623 	/* This is really two different operations rolled into one */
1624 	req->out.numargs = 1;
1625 	if (size) {
1626 		req->out.argvar = 1;
1627 		req->out.args[0].size = size;
1628 		req->out.args[0].value = list;
1629 	} else {
1630 		req->out.args[0].size = sizeof(outarg);
1631 		req->out.args[0].value = &outarg;
1632 	}
1633 	fuse_request_send(fc, req);
1634 	ret = req->out.h.error;
1635 	if (!ret)
1636 		ret = size ? req->out.args[0].size : outarg.size;
1637 	else {
1638 		if (ret == -ENOSYS) {
1639 			fc->no_listxattr = 1;
1640 			ret = -EOPNOTSUPP;
1641 		}
1642 	}
1643 	fuse_put_request(fc, req);
1644 	return ret;
1645 }
1646 
1647 static int fuse_removexattr(struct dentry *entry, const char *name)
1648 {
1649 	struct inode *inode = entry->d_inode;
1650 	struct fuse_conn *fc = get_fuse_conn(inode);
1651 	struct fuse_req *req;
1652 	int err;
1653 
1654 	if (fc->no_removexattr)
1655 		return -EOPNOTSUPP;
1656 
1657 	req = fuse_get_req(fc);
1658 	if (IS_ERR(req))
1659 		return PTR_ERR(req);
1660 
1661 	req->in.h.opcode = FUSE_REMOVEXATTR;
1662 	req->in.h.nodeid = get_node_id(inode);
1663 	req->in.numargs = 1;
1664 	req->in.args[0].size = strlen(name) + 1;
1665 	req->in.args[0].value = name;
1666 	fuse_request_send(fc, req);
1667 	err = req->out.h.error;
1668 	fuse_put_request(fc, req);
1669 	if (err == -ENOSYS) {
1670 		fc->no_removexattr = 1;
1671 		err = -EOPNOTSUPP;
1672 	}
1673 	return err;
1674 }
1675 
1676 static const struct inode_operations fuse_dir_inode_operations = {
1677 	.lookup		= fuse_lookup,
1678 	.mkdir		= fuse_mkdir,
1679 	.symlink	= fuse_symlink,
1680 	.unlink		= fuse_unlink,
1681 	.rmdir		= fuse_rmdir,
1682 	.rename		= fuse_rename,
1683 	.link		= fuse_link,
1684 	.setattr	= fuse_setattr,
1685 	.create		= fuse_create,
1686 	.atomic_open	= fuse_atomic_open,
1687 	.mknod		= fuse_mknod,
1688 	.permission	= fuse_permission,
1689 	.getattr	= fuse_getattr,
1690 	.setxattr	= fuse_setxattr,
1691 	.getxattr	= fuse_getxattr,
1692 	.listxattr	= fuse_listxattr,
1693 	.removexattr	= fuse_removexattr,
1694 };
1695 
1696 static const struct file_operations fuse_dir_operations = {
1697 	.llseek		= generic_file_llseek,
1698 	.read		= generic_read_dir,
1699 	.readdir	= fuse_readdir,
1700 	.open		= fuse_dir_open,
1701 	.release	= fuse_dir_release,
1702 	.fsync		= fuse_dir_fsync,
1703 	.unlocked_ioctl	= fuse_dir_ioctl,
1704 	.compat_ioctl	= fuse_dir_compat_ioctl,
1705 };
1706 
1707 static const struct inode_operations fuse_common_inode_operations = {
1708 	.setattr	= fuse_setattr,
1709 	.permission	= fuse_permission,
1710 	.getattr	= fuse_getattr,
1711 	.setxattr	= fuse_setxattr,
1712 	.getxattr	= fuse_getxattr,
1713 	.listxattr	= fuse_listxattr,
1714 	.removexattr	= fuse_removexattr,
1715 };
1716 
1717 static const struct inode_operations fuse_symlink_inode_operations = {
1718 	.setattr	= fuse_setattr,
1719 	.follow_link	= fuse_follow_link,
1720 	.put_link	= fuse_put_link,
1721 	.readlink	= generic_readlink,
1722 	.getattr	= fuse_getattr,
1723 	.setxattr	= fuse_setxattr,
1724 	.getxattr	= fuse_getxattr,
1725 	.listxattr	= fuse_listxattr,
1726 	.removexattr	= fuse_removexattr,
1727 };
1728 
1729 void fuse_init_common(struct inode *inode)
1730 {
1731 	inode->i_op = &fuse_common_inode_operations;
1732 }
1733 
1734 void fuse_init_dir(struct inode *inode)
1735 {
1736 	inode->i_op = &fuse_dir_inode_operations;
1737 	inode->i_fop = &fuse_dir_operations;
1738 }
1739 
1740 void fuse_init_symlink(struct inode *inode)
1741 {
1742 	inode->i_op = &fuse_symlink_inode_operations;
1743 }
1744