xref: /openbmc/linux/fs/fuse/file.c (revision 5c7587f6)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
20 #include <linux/fs.h>
21 #include <linux/filelock.h>
22 
23 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
24 			  unsigned int open_flags, int opcode,
25 			  struct fuse_open_out *outargp)
26 {
27 	struct fuse_open_in inarg;
28 	FUSE_ARGS(args);
29 
30 	memset(&inarg, 0, sizeof(inarg));
31 	inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 	if (!fm->fc->atomic_o_trunc)
33 		inarg.flags &= ~O_TRUNC;
34 
35 	if (fm->fc->handle_killpriv_v2 &&
36 	    (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
37 		inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
38 	}
39 
40 	args.opcode = opcode;
41 	args.nodeid = nodeid;
42 	args.in_numargs = 1;
43 	args.in_args[0].size = sizeof(inarg);
44 	args.in_args[0].value = &inarg;
45 	args.out_numargs = 1;
46 	args.out_args[0].size = sizeof(*outargp);
47 	args.out_args[0].value = outargp;
48 
49 	return fuse_simple_request(fm, &args);
50 }
51 
52 struct fuse_release_args {
53 	struct fuse_args args;
54 	struct fuse_release_in inarg;
55 	struct inode *inode;
56 };
57 
58 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
59 {
60 	struct fuse_file *ff;
61 
62 	ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
63 	if (unlikely(!ff))
64 		return NULL;
65 
66 	ff->fm = fm;
67 	ff->release_args = kzalloc(sizeof(*ff->release_args),
68 				   GFP_KERNEL_ACCOUNT);
69 	if (!ff->release_args) {
70 		kfree(ff);
71 		return NULL;
72 	}
73 
74 	INIT_LIST_HEAD(&ff->write_entry);
75 	mutex_init(&ff->readdir.lock);
76 	refcount_set(&ff->count, 1);
77 	RB_CLEAR_NODE(&ff->polled_node);
78 	init_waitqueue_head(&ff->poll_wait);
79 
80 	ff->kh = atomic64_inc_return(&fm->fc->khctr);
81 
82 	return ff;
83 }
84 
85 void fuse_file_free(struct fuse_file *ff)
86 {
87 	kfree(ff->release_args);
88 	mutex_destroy(&ff->readdir.lock);
89 	kfree(ff);
90 }
91 
92 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
93 {
94 	refcount_inc(&ff->count);
95 	return ff;
96 }
97 
98 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
99 			     int error)
100 {
101 	struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
102 
103 	iput(ra->inode);
104 	kfree(ra);
105 }
106 
107 static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
108 {
109 	if (refcount_dec_and_test(&ff->count)) {
110 		struct fuse_args *args = &ff->release_args->args;
111 
112 		if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
113 			/* Do nothing when client does not implement 'open' */
114 			fuse_release_end(ff->fm, args, 0);
115 		} else if (sync) {
116 			fuse_simple_request(ff->fm, args);
117 			fuse_release_end(ff->fm, args, 0);
118 		} else {
119 			args->end = fuse_release_end;
120 			if (fuse_simple_background(ff->fm, args,
121 						   GFP_KERNEL | __GFP_NOFAIL))
122 				fuse_release_end(ff->fm, args, -ENOTCONN);
123 		}
124 		kfree(ff);
125 	}
126 }
127 
128 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
129 				 unsigned int open_flags, bool isdir)
130 {
131 	struct fuse_conn *fc = fm->fc;
132 	struct fuse_file *ff;
133 	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
134 
135 	ff = fuse_file_alloc(fm);
136 	if (!ff)
137 		return ERR_PTR(-ENOMEM);
138 
139 	ff->fh = 0;
140 	/* Default for no-open */
141 	ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
142 	if (isdir ? !fc->no_opendir : !fc->no_open) {
143 		struct fuse_open_out outarg;
144 		int err;
145 
146 		err = fuse_send_open(fm, nodeid, open_flags, opcode, &outarg);
147 		if (!err) {
148 			ff->fh = outarg.fh;
149 			ff->open_flags = outarg.open_flags;
150 
151 		} else if (err != -ENOSYS) {
152 			fuse_file_free(ff);
153 			return ERR_PTR(err);
154 		} else {
155 			if (isdir)
156 				fc->no_opendir = 1;
157 			else
158 				fc->no_open = 1;
159 		}
160 	}
161 
162 	if (isdir)
163 		ff->open_flags &= ~FOPEN_DIRECT_IO;
164 
165 	ff->nodeid = nodeid;
166 
167 	return ff;
168 }
169 
170 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
171 		 bool isdir)
172 {
173 	struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
174 
175 	if (!IS_ERR(ff))
176 		file->private_data = ff;
177 
178 	return PTR_ERR_OR_ZERO(ff);
179 }
180 EXPORT_SYMBOL_GPL(fuse_do_open);
181 
182 static void fuse_link_write_file(struct file *file)
183 {
184 	struct inode *inode = file_inode(file);
185 	struct fuse_inode *fi = get_fuse_inode(inode);
186 	struct fuse_file *ff = file->private_data;
187 	/*
188 	 * file may be written through mmap, so chain it onto the
189 	 * inodes's write_file list
190 	 */
191 	spin_lock(&fi->lock);
192 	if (list_empty(&ff->write_entry))
193 		list_add(&ff->write_entry, &fi->write_files);
194 	spin_unlock(&fi->lock);
195 }
196 
197 void fuse_finish_open(struct inode *inode, struct file *file)
198 {
199 	struct fuse_file *ff = file->private_data;
200 	struct fuse_conn *fc = get_fuse_conn(inode);
201 
202 	if (ff->open_flags & FOPEN_STREAM)
203 		stream_open(inode, file);
204 	else if (ff->open_flags & FOPEN_NONSEEKABLE)
205 		nonseekable_open(inode, file);
206 
207 	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
208 		struct fuse_inode *fi = get_fuse_inode(inode);
209 
210 		spin_lock(&fi->lock);
211 		fi->attr_version = atomic64_inc_return(&fc->attr_version);
212 		i_size_write(inode, 0);
213 		spin_unlock(&fi->lock);
214 		file_update_time(file);
215 		fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
216 	}
217 	if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
218 		fuse_link_write_file(file);
219 }
220 
221 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
222 {
223 	struct fuse_mount *fm = get_fuse_mount(inode);
224 	struct fuse_conn *fc = fm->fc;
225 	int err;
226 	bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
227 			  fc->atomic_o_trunc &&
228 			  fc->writeback_cache;
229 	bool dax_truncate = (file->f_flags & O_TRUNC) &&
230 			  fc->atomic_o_trunc && FUSE_IS_DAX(inode);
231 
232 	if (fuse_is_bad(inode))
233 		return -EIO;
234 
235 	err = generic_file_open(inode, file);
236 	if (err)
237 		return err;
238 
239 	if (is_wb_truncate || dax_truncate)
240 		inode_lock(inode);
241 
242 	if (dax_truncate) {
243 		filemap_invalidate_lock(inode->i_mapping);
244 		err = fuse_dax_break_layouts(inode, 0, 0);
245 		if (err)
246 			goto out_inode_unlock;
247 	}
248 
249 	if (is_wb_truncate || dax_truncate)
250 		fuse_set_nowrite(inode);
251 
252 	err = fuse_do_open(fm, get_node_id(inode), file, isdir);
253 	if (!err)
254 		fuse_finish_open(inode, file);
255 
256 	if (is_wb_truncate || dax_truncate)
257 		fuse_release_nowrite(inode);
258 	if (!err) {
259 		struct fuse_file *ff = file->private_data;
260 
261 		if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC))
262 			truncate_pagecache(inode, 0);
263 		else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
264 			invalidate_inode_pages2(inode->i_mapping);
265 	}
266 	if (dax_truncate)
267 		filemap_invalidate_unlock(inode->i_mapping);
268 out_inode_unlock:
269 	if (is_wb_truncate || dax_truncate)
270 		inode_unlock(inode);
271 
272 	return err;
273 }
274 
275 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
276 				 unsigned int flags, int opcode)
277 {
278 	struct fuse_conn *fc = ff->fm->fc;
279 	struct fuse_release_args *ra = ff->release_args;
280 
281 	/* Inode is NULL on error path of fuse_create_open() */
282 	if (likely(fi)) {
283 		spin_lock(&fi->lock);
284 		list_del(&ff->write_entry);
285 		spin_unlock(&fi->lock);
286 	}
287 	spin_lock(&fc->lock);
288 	if (!RB_EMPTY_NODE(&ff->polled_node))
289 		rb_erase(&ff->polled_node, &fc->polled_files);
290 	spin_unlock(&fc->lock);
291 
292 	wake_up_interruptible_all(&ff->poll_wait);
293 
294 	ra->inarg.fh = ff->fh;
295 	ra->inarg.flags = flags;
296 	ra->args.in_numargs = 1;
297 	ra->args.in_args[0].size = sizeof(struct fuse_release_in);
298 	ra->args.in_args[0].value = &ra->inarg;
299 	ra->args.opcode = opcode;
300 	ra->args.nodeid = ff->nodeid;
301 	ra->args.force = true;
302 	ra->args.nocreds = true;
303 }
304 
305 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
306 		       unsigned int open_flags, fl_owner_t id, bool isdir)
307 {
308 	struct fuse_inode *fi = get_fuse_inode(inode);
309 	struct fuse_release_args *ra = ff->release_args;
310 	int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
311 
312 	fuse_prepare_release(fi, ff, open_flags, opcode);
313 
314 	if (ff->flock) {
315 		ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
316 		ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
317 	}
318 	/* Hold inode until release is finished */
319 	ra->inode = igrab(inode);
320 
321 	/*
322 	 * Normally this will send the RELEASE request, however if
323 	 * some asynchronous READ or WRITE requests are outstanding,
324 	 * the sending will be delayed.
325 	 *
326 	 * Make the release synchronous if this is a fuseblk mount,
327 	 * synchronous RELEASE is allowed (and desirable) in this case
328 	 * because the server can be trusted not to screw up.
329 	 */
330 	fuse_file_put(ff, ff->fm->fc->destroy, isdir);
331 }
332 
333 void fuse_release_common(struct file *file, bool isdir)
334 {
335 	fuse_file_release(file_inode(file), file->private_data, file->f_flags,
336 			  (fl_owner_t) file, isdir);
337 }
338 
339 static int fuse_open(struct inode *inode, struct file *file)
340 {
341 	return fuse_open_common(inode, file, false);
342 }
343 
344 static int fuse_release(struct inode *inode, struct file *file)
345 {
346 	struct fuse_conn *fc = get_fuse_conn(inode);
347 
348 	/*
349 	 * Dirty pages might remain despite write_inode_now() call from
350 	 * fuse_flush() due to writes racing with the close.
351 	 */
352 	if (fc->writeback_cache)
353 		write_inode_now(inode, 1);
354 
355 	fuse_release_common(file, false);
356 
357 	/* return value is ignored by VFS */
358 	return 0;
359 }
360 
361 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
362 		       unsigned int flags)
363 {
364 	WARN_ON(refcount_read(&ff->count) > 1);
365 	fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
366 	/*
367 	 * iput(NULL) is a no-op and since the refcount is 1 and everything's
368 	 * synchronous, we are fine with not doing igrab() here"
369 	 */
370 	fuse_file_put(ff, true, false);
371 }
372 EXPORT_SYMBOL_GPL(fuse_sync_release);
373 
374 /*
375  * Scramble the ID space with XTEA, so that the value of the files_struct
376  * pointer is not exposed to userspace.
377  */
378 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
379 {
380 	u32 *k = fc->scramble_key;
381 	u64 v = (unsigned long) id;
382 	u32 v0 = v;
383 	u32 v1 = v >> 32;
384 	u32 sum = 0;
385 	int i;
386 
387 	for (i = 0; i < 32; i++) {
388 		v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
389 		sum += 0x9E3779B9;
390 		v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
391 	}
392 
393 	return (u64) v0 + ((u64) v1 << 32);
394 }
395 
396 struct fuse_writepage_args {
397 	struct fuse_io_args ia;
398 	struct rb_node writepages_entry;
399 	struct list_head queue_entry;
400 	struct fuse_writepage_args *next;
401 	struct inode *inode;
402 	struct fuse_sync_bucket *bucket;
403 };
404 
405 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
406 					    pgoff_t idx_from, pgoff_t idx_to)
407 {
408 	struct rb_node *n;
409 
410 	n = fi->writepages.rb_node;
411 
412 	while (n) {
413 		struct fuse_writepage_args *wpa;
414 		pgoff_t curr_index;
415 
416 		wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
417 		WARN_ON(get_fuse_inode(wpa->inode) != fi);
418 		curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
419 		if (idx_from >= curr_index + wpa->ia.ap.num_pages)
420 			n = n->rb_right;
421 		else if (idx_to < curr_index)
422 			n = n->rb_left;
423 		else
424 			return wpa;
425 	}
426 	return NULL;
427 }
428 
429 /*
430  * Check if any page in a range is under writeback
431  *
432  * This is currently done by walking the list of writepage requests
433  * for the inode, which can be pretty inefficient.
434  */
435 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
436 				   pgoff_t idx_to)
437 {
438 	struct fuse_inode *fi = get_fuse_inode(inode);
439 	bool found;
440 
441 	spin_lock(&fi->lock);
442 	found = fuse_find_writeback(fi, idx_from, idx_to);
443 	spin_unlock(&fi->lock);
444 
445 	return found;
446 }
447 
448 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
449 {
450 	return fuse_range_is_writeback(inode, index, index);
451 }
452 
453 /*
454  * Wait for page writeback to be completed.
455  *
456  * Since fuse doesn't rely on the VM writeback tracking, this has to
457  * use some other means.
458  */
459 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
460 {
461 	struct fuse_inode *fi = get_fuse_inode(inode);
462 
463 	wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
464 }
465 
466 /*
467  * Wait for all pending writepages on the inode to finish.
468  *
469  * This is currently done by blocking further writes with FUSE_NOWRITE
470  * and waiting for all sent writes to complete.
471  *
472  * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
473  * could conflict with truncation.
474  */
475 static void fuse_sync_writes(struct inode *inode)
476 {
477 	fuse_set_nowrite(inode);
478 	fuse_release_nowrite(inode);
479 }
480 
481 static int fuse_flush(struct file *file, fl_owner_t id)
482 {
483 	struct inode *inode = file_inode(file);
484 	struct fuse_mount *fm = get_fuse_mount(inode);
485 	struct fuse_file *ff = file->private_data;
486 	struct fuse_flush_in inarg;
487 	FUSE_ARGS(args);
488 	int err;
489 
490 	if (fuse_is_bad(inode))
491 		return -EIO;
492 
493 	if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
494 		return 0;
495 
496 	err = write_inode_now(inode, 1);
497 	if (err)
498 		return err;
499 
500 	inode_lock(inode);
501 	fuse_sync_writes(inode);
502 	inode_unlock(inode);
503 
504 	err = filemap_check_errors(file->f_mapping);
505 	if (err)
506 		return err;
507 
508 	err = 0;
509 	if (fm->fc->no_flush)
510 		goto inval_attr_out;
511 
512 	memset(&inarg, 0, sizeof(inarg));
513 	inarg.fh = ff->fh;
514 	inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
515 	args.opcode = FUSE_FLUSH;
516 	args.nodeid = get_node_id(inode);
517 	args.in_numargs = 1;
518 	args.in_args[0].size = sizeof(inarg);
519 	args.in_args[0].value = &inarg;
520 	args.force = true;
521 
522 	err = fuse_simple_request(fm, &args);
523 	if (err == -ENOSYS) {
524 		fm->fc->no_flush = 1;
525 		err = 0;
526 	}
527 
528 inval_attr_out:
529 	/*
530 	 * In memory i_blocks is not maintained by fuse, if writeback cache is
531 	 * enabled, i_blocks from cached attr may not be accurate.
532 	 */
533 	if (!err && fm->fc->writeback_cache)
534 		fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
535 	return err;
536 }
537 
538 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
539 		      int datasync, int opcode)
540 {
541 	struct inode *inode = file->f_mapping->host;
542 	struct fuse_mount *fm = get_fuse_mount(inode);
543 	struct fuse_file *ff = file->private_data;
544 	FUSE_ARGS(args);
545 	struct fuse_fsync_in inarg;
546 
547 	memset(&inarg, 0, sizeof(inarg));
548 	inarg.fh = ff->fh;
549 	inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
550 	args.opcode = opcode;
551 	args.nodeid = get_node_id(inode);
552 	args.in_numargs = 1;
553 	args.in_args[0].size = sizeof(inarg);
554 	args.in_args[0].value = &inarg;
555 	return fuse_simple_request(fm, &args);
556 }
557 
558 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
559 		      int datasync)
560 {
561 	struct inode *inode = file->f_mapping->host;
562 	struct fuse_conn *fc = get_fuse_conn(inode);
563 	int err;
564 
565 	if (fuse_is_bad(inode))
566 		return -EIO;
567 
568 	inode_lock(inode);
569 
570 	/*
571 	 * Start writeback against all dirty pages of the inode, then
572 	 * wait for all outstanding writes, before sending the FSYNC
573 	 * request.
574 	 */
575 	err = file_write_and_wait_range(file, start, end);
576 	if (err)
577 		goto out;
578 
579 	fuse_sync_writes(inode);
580 
581 	/*
582 	 * Due to implementation of fuse writeback
583 	 * file_write_and_wait_range() does not catch errors.
584 	 * We have to do this directly after fuse_sync_writes()
585 	 */
586 	err = file_check_and_advance_wb_err(file);
587 	if (err)
588 		goto out;
589 
590 	err = sync_inode_metadata(inode, 1);
591 	if (err)
592 		goto out;
593 
594 	if (fc->no_fsync)
595 		goto out;
596 
597 	err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
598 	if (err == -ENOSYS) {
599 		fc->no_fsync = 1;
600 		err = 0;
601 	}
602 out:
603 	inode_unlock(inode);
604 
605 	return err;
606 }
607 
608 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
609 			 size_t count, int opcode)
610 {
611 	struct fuse_file *ff = file->private_data;
612 	struct fuse_args *args = &ia->ap.args;
613 
614 	ia->read.in.fh = ff->fh;
615 	ia->read.in.offset = pos;
616 	ia->read.in.size = count;
617 	ia->read.in.flags = file->f_flags;
618 	args->opcode = opcode;
619 	args->nodeid = ff->nodeid;
620 	args->in_numargs = 1;
621 	args->in_args[0].size = sizeof(ia->read.in);
622 	args->in_args[0].value = &ia->read.in;
623 	args->out_argvar = true;
624 	args->out_numargs = 1;
625 	args->out_args[0].size = count;
626 }
627 
628 static void fuse_release_user_pages(struct fuse_args_pages *ap,
629 				    bool should_dirty)
630 {
631 	unsigned int i;
632 
633 	for (i = 0; i < ap->num_pages; i++) {
634 		if (should_dirty)
635 			set_page_dirty_lock(ap->pages[i]);
636 		put_page(ap->pages[i]);
637 	}
638 }
639 
640 static void fuse_io_release(struct kref *kref)
641 {
642 	kfree(container_of(kref, struct fuse_io_priv, refcnt));
643 }
644 
645 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
646 {
647 	if (io->err)
648 		return io->err;
649 
650 	if (io->bytes >= 0 && io->write)
651 		return -EIO;
652 
653 	return io->bytes < 0 ? io->size : io->bytes;
654 }
655 
656 /*
657  * In case of short read, the caller sets 'pos' to the position of
658  * actual end of fuse request in IO request. Otherwise, if bytes_requested
659  * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
660  *
661  * An example:
662  * User requested DIO read of 64K. It was split into two 32K fuse requests,
663  * both submitted asynchronously. The first of them was ACKed by userspace as
664  * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
665  * second request was ACKed as short, e.g. only 1K was read, resulting in
666  * pos == 33K.
667  *
668  * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
669  * will be equal to the length of the longest contiguous fragment of
670  * transferred data starting from the beginning of IO request.
671  */
672 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
673 {
674 	int left;
675 
676 	spin_lock(&io->lock);
677 	if (err)
678 		io->err = io->err ? : err;
679 	else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
680 		io->bytes = pos;
681 
682 	left = --io->reqs;
683 	if (!left && io->blocking)
684 		complete(io->done);
685 	spin_unlock(&io->lock);
686 
687 	if (!left && !io->blocking) {
688 		ssize_t res = fuse_get_res_by_io(io);
689 
690 		if (res >= 0) {
691 			struct inode *inode = file_inode(io->iocb->ki_filp);
692 			struct fuse_conn *fc = get_fuse_conn(inode);
693 			struct fuse_inode *fi = get_fuse_inode(inode);
694 
695 			spin_lock(&fi->lock);
696 			fi->attr_version = atomic64_inc_return(&fc->attr_version);
697 			spin_unlock(&fi->lock);
698 		}
699 
700 		io->iocb->ki_complete(io->iocb, res);
701 	}
702 
703 	kref_put(&io->refcnt, fuse_io_release);
704 }
705 
706 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
707 					  unsigned int npages)
708 {
709 	struct fuse_io_args *ia;
710 
711 	ia = kzalloc(sizeof(*ia), GFP_KERNEL);
712 	if (ia) {
713 		ia->io = io;
714 		ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
715 						&ia->ap.descs);
716 		if (!ia->ap.pages) {
717 			kfree(ia);
718 			ia = NULL;
719 		}
720 	}
721 	return ia;
722 }
723 
724 static void fuse_io_free(struct fuse_io_args *ia)
725 {
726 	kfree(ia->ap.pages);
727 	kfree(ia);
728 }
729 
730 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
731 				  int err)
732 {
733 	struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
734 	struct fuse_io_priv *io = ia->io;
735 	ssize_t pos = -1;
736 
737 	fuse_release_user_pages(&ia->ap, io->should_dirty);
738 
739 	if (err) {
740 		/* Nothing */
741 	} else if (io->write) {
742 		if (ia->write.out.size > ia->write.in.size) {
743 			err = -EIO;
744 		} else if (ia->write.in.size != ia->write.out.size) {
745 			pos = ia->write.in.offset - io->offset +
746 				ia->write.out.size;
747 		}
748 	} else {
749 		u32 outsize = args->out_args[0].size;
750 
751 		if (ia->read.in.size != outsize)
752 			pos = ia->read.in.offset - io->offset + outsize;
753 	}
754 
755 	fuse_aio_complete(io, err, pos);
756 	fuse_io_free(ia);
757 }
758 
759 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
760 				   struct fuse_io_args *ia, size_t num_bytes)
761 {
762 	ssize_t err;
763 	struct fuse_io_priv *io = ia->io;
764 
765 	spin_lock(&io->lock);
766 	kref_get(&io->refcnt);
767 	io->size += num_bytes;
768 	io->reqs++;
769 	spin_unlock(&io->lock);
770 
771 	ia->ap.args.end = fuse_aio_complete_req;
772 	ia->ap.args.may_block = io->should_dirty;
773 	err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
774 	if (err)
775 		fuse_aio_complete_req(fm, &ia->ap.args, err);
776 
777 	return num_bytes;
778 }
779 
780 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
781 			      fl_owner_t owner)
782 {
783 	struct file *file = ia->io->iocb->ki_filp;
784 	struct fuse_file *ff = file->private_data;
785 	struct fuse_mount *fm = ff->fm;
786 
787 	fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
788 	if (owner != NULL) {
789 		ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
790 		ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
791 	}
792 
793 	if (ia->io->async)
794 		return fuse_async_req_send(fm, ia, count);
795 
796 	return fuse_simple_request(fm, &ia->ap.args);
797 }
798 
799 static void fuse_read_update_size(struct inode *inode, loff_t size,
800 				  u64 attr_ver)
801 {
802 	struct fuse_conn *fc = get_fuse_conn(inode);
803 	struct fuse_inode *fi = get_fuse_inode(inode);
804 
805 	spin_lock(&fi->lock);
806 	if (attr_ver >= fi->attr_version && size < inode->i_size &&
807 	    !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
808 		fi->attr_version = atomic64_inc_return(&fc->attr_version);
809 		i_size_write(inode, size);
810 	}
811 	spin_unlock(&fi->lock);
812 }
813 
814 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
815 			    struct fuse_args_pages *ap)
816 {
817 	struct fuse_conn *fc = get_fuse_conn(inode);
818 
819 	/*
820 	 * If writeback_cache is enabled, a short read means there's a hole in
821 	 * the file.  Some data after the hole is in page cache, but has not
822 	 * reached the client fs yet.  So the hole is not present there.
823 	 */
824 	if (!fc->writeback_cache) {
825 		loff_t pos = page_offset(ap->pages[0]) + num_read;
826 		fuse_read_update_size(inode, pos, attr_ver);
827 	}
828 }
829 
830 static int fuse_do_readpage(struct file *file, struct page *page)
831 {
832 	struct inode *inode = page->mapping->host;
833 	struct fuse_mount *fm = get_fuse_mount(inode);
834 	loff_t pos = page_offset(page);
835 	struct fuse_page_desc desc = { .length = PAGE_SIZE };
836 	struct fuse_io_args ia = {
837 		.ap.args.page_zeroing = true,
838 		.ap.args.out_pages = true,
839 		.ap.num_pages = 1,
840 		.ap.pages = &page,
841 		.ap.descs = &desc,
842 	};
843 	ssize_t res;
844 	u64 attr_ver;
845 
846 	/*
847 	 * Page writeback can extend beyond the lifetime of the
848 	 * page-cache page, so make sure we read a properly synced
849 	 * page.
850 	 */
851 	fuse_wait_on_page_writeback(inode, page->index);
852 
853 	attr_ver = fuse_get_attr_version(fm->fc);
854 
855 	/* Don't overflow end offset */
856 	if (pos + (desc.length - 1) == LLONG_MAX)
857 		desc.length--;
858 
859 	fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
860 	res = fuse_simple_request(fm, &ia.ap.args);
861 	if (res < 0)
862 		return res;
863 	/*
864 	 * Short read means EOF.  If file size is larger, truncate it
865 	 */
866 	if (res < desc.length)
867 		fuse_short_read(inode, attr_ver, res, &ia.ap);
868 
869 	SetPageUptodate(page);
870 
871 	return 0;
872 }
873 
874 static int fuse_read_folio(struct file *file, struct folio *folio)
875 {
876 	struct page *page = &folio->page;
877 	struct inode *inode = page->mapping->host;
878 	int err;
879 
880 	err = -EIO;
881 	if (fuse_is_bad(inode))
882 		goto out;
883 
884 	err = fuse_do_readpage(file, page);
885 	fuse_invalidate_atime(inode);
886  out:
887 	unlock_page(page);
888 	return err;
889 }
890 
891 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
892 			       int err)
893 {
894 	int i;
895 	struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
896 	struct fuse_args_pages *ap = &ia->ap;
897 	size_t count = ia->read.in.size;
898 	size_t num_read = args->out_args[0].size;
899 	struct address_space *mapping = NULL;
900 
901 	for (i = 0; mapping == NULL && i < ap->num_pages; i++)
902 		mapping = ap->pages[i]->mapping;
903 
904 	if (mapping) {
905 		struct inode *inode = mapping->host;
906 
907 		/*
908 		 * Short read means EOF. If file size is larger, truncate it
909 		 */
910 		if (!err && num_read < count)
911 			fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
912 
913 		fuse_invalidate_atime(inode);
914 	}
915 
916 	for (i = 0; i < ap->num_pages; i++) {
917 		struct page *page = ap->pages[i];
918 
919 		if (!err)
920 			SetPageUptodate(page);
921 		else
922 			SetPageError(page);
923 		unlock_page(page);
924 		put_page(page);
925 	}
926 	if (ia->ff)
927 		fuse_file_put(ia->ff, false, false);
928 
929 	fuse_io_free(ia);
930 }
931 
932 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
933 {
934 	struct fuse_file *ff = file->private_data;
935 	struct fuse_mount *fm = ff->fm;
936 	struct fuse_args_pages *ap = &ia->ap;
937 	loff_t pos = page_offset(ap->pages[0]);
938 	size_t count = ap->num_pages << PAGE_SHIFT;
939 	ssize_t res;
940 	int err;
941 
942 	ap->args.out_pages = true;
943 	ap->args.page_zeroing = true;
944 	ap->args.page_replace = true;
945 
946 	/* Don't overflow end offset */
947 	if (pos + (count - 1) == LLONG_MAX) {
948 		count--;
949 		ap->descs[ap->num_pages - 1].length--;
950 	}
951 	WARN_ON((loff_t) (pos + count) < 0);
952 
953 	fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
954 	ia->read.attr_ver = fuse_get_attr_version(fm->fc);
955 	if (fm->fc->async_read) {
956 		ia->ff = fuse_file_get(ff);
957 		ap->args.end = fuse_readpages_end;
958 		err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
959 		if (!err)
960 			return;
961 	} else {
962 		res = fuse_simple_request(fm, &ap->args);
963 		err = res < 0 ? res : 0;
964 	}
965 	fuse_readpages_end(fm, &ap->args, err);
966 }
967 
968 static void fuse_readahead(struct readahead_control *rac)
969 {
970 	struct inode *inode = rac->mapping->host;
971 	struct fuse_conn *fc = get_fuse_conn(inode);
972 	unsigned int i, max_pages, nr_pages = 0;
973 
974 	if (fuse_is_bad(inode))
975 		return;
976 
977 	max_pages = min_t(unsigned int, fc->max_pages,
978 			fc->max_read / PAGE_SIZE);
979 
980 	for (;;) {
981 		struct fuse_io_args *ia;
982 		struct fuse_args_pages *ap;
983 
984 		if (fc->num_background >= fc->congestion_threshold &&
985 		    rac->ra->async_size >= readahead_count(rac))
986 			/*
987 			 * Congested and only async pages left, so skip the
988 			 * rest.
989 			 */
990 			break;
991 
992 		nr_pages = readahead_count(rac) - nr_pages;
993 		if (nr_pages > max_pages)
994 			nr_pages = max_pages;
995 		if (nr_pages == 0)
996 			break;
997 		ia = fuse_io_alloc(NULL, nr_pages);
998 		if (!ia)
999 			return;
1000 		ap = &ia->ap;
1001 		nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1002 		for (i = 0; i < nr_pages; i++) {
1003 			fuse_wait_on_page_writeback(inode,
1004 						    readahead_index(rac) + i);
1005 			ap->descs[i].length = PAGE_SIZE;
1006 		}
1007 		ap->num_pages = nr_pages;
1008 		fuse_send_readpages(ia, rac->file);
1009 	}
1010 }
1011 
1012 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1013 {
1014 	struct inode *inode = iocb->ki_filp->f_mapping->host;
1015 	struct fuse_conn *fc = get_fuse_conn(inode);
1016 
1017 	/*
1018 	 * In auto invalidate mode, always update attributes on read.
1019 	 * Otherwise, only update if we attempt to read past EOF (to ensure
1020 	 * i_size is up to date).
1021 	 */
1022 	if (fc->auto_inval_data ||
1023 	    (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1024 		int err;
1025 		err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
1026 		if (err)
1027 			return err;
1028 	}
1029 
1030 	return generic_file_read_iter(iocb, to);
1031 }
1032 
1033 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1034 				 loff_t pos, size_t count)
1035 {
1036 	struct fuse_args *args = &ia->ap.args;
1037 
1038 	ia->write.in.fh = ff->fh;
1039 	ia->write.in.offset = pos;
1040 	ia->write.in.size = count;
1041 	args->opcode = FUSE_WRITE;
1042 	args->nodeid = ff->nodeid;
1043 	args->in_numargs = 2;
1044 	if (ff->fm->fc->minor < 9)
1045 		args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1046 	else
1047 		args->in_args[0].size = sizeof(ia->write.in);
1048 	args->in_args[0].value = &ia->write.in;
1049 	args->in_args[1].size = count;
1050 	args->out_numargs = 1;
1051 	args->out_args[0].size = sizeof(ia->write.out);
1052 	args->out_args[0].value = &ia->write.out;
1053 }
1054 
1055 static unsigned int fuse_write_flags(struct kiocb *iocb)
1056 {
1057 	unsigned int flags = iocb->ki_filp->f_flags;
1058 
1059 	if (iocb_is_dsync(iocb))
1060 		flags |= O_DSYNC;
1061 	if (iocb->ki_flags & IOCB_SYNC)
1062 		flags |= O_SYNC;
1063 
1064 	return flags;
1065 }
1066 
1067 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1068 			       size_t count, fl_owner_t owner)
1069 {
1070 	struct kiocb *iocb = ia->io->iocb;
1071 	struct file *file = iocb->ki_filp;
1072 	struct fuse_file *ff = file->private_data;
1073 	struct fuse_mount *fm = ff->fm;
1074 	struct fuse_write_in *inarg = &ia->write.in;
1075 	ssize_t err;
1076 
1077 	fuse_write_args_fill(ia, ff, pos, count);
1078 	inarg->flags = fuse_write_flags(iocb);
1079 	if (owner != NULL) {
1080 		inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1081 		inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
1082 	}
1083 
1084 	if (ia->io->async)
1085 		return fuse_async_req_send(fm, ia, count);
1086 
1087 	err = fuse_simple_request(fm, &ia->ap.args);
1088 	if (!err && ia->write.out.size > count)
1089 		err = -EIO;
1090 
1091 	return err ?: ia->write.out.size;
1092 }
1093 
1094 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
1095 {
1096 	struct fuse_conn *fc = get_fuse_conn(inode);
1097 	struct fuse_inode *fi = get_fuse_inode(inode);
1098 	bool ret = false;
1099 
1100 	spin_lock(&fi->lock);
1101 	fi->attr_version = atomic64_inc_return(&fc->attr_version);
1102 	if (written > 0 && pos > inode->i_size) {
1103 		i_size_write(inode, pos);
1104 		ret = true;
1105 	}
1106 	spin_unlock(&fi->lock);
1107 
1108 	fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1109 
1110 	return ret;
1111 }
1112 
1113 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1114 				     struct kiocb *iocb, struct inode *inode,
1115 				     loff_t pos, size_t count)
1116 {
1117 	struct fuse_args_pages *ap = &ia->ap;
1118 	struct file *file = iocb->ki_filp;
1119 	struct fuse_file *ff = file->private_data;
1120 	struct fuse_mount *fm = ff->fm;
1121 	unsigned int offset, i;
1122 	bool short_write;
1123 	int err;
1124 
1125 	for (i = 0; i < ap->num_pages; i++)
1126 		fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
1127 
1128 	fuse_write_args_fill(ia, ff, pos, count);
1129 	ia->write.in.flags = fuse_write_flags(iocb);
1130 	if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1131 		ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1132 
1133 	err = fuse_simple_request(fm, &ap->args);
1134 	if (!err && ia->write.out.size > count)
1135 		err = -EIO;
1136 
1137 	short_write = ia->write.out.size < count;
1138 	offset = ap->descs[0].offset;
1139 	count = ia->write.out.size;
1140 	for (i = 0; i < ap->num_pages; i++) {
1141 		struct page *page = ap->pages[i];
1142 
1143 		if (err) {
1144 			ClearPageUptodate(page);
1145 		} else {
1146 			if (count >= PAGE_SIZE - offset)
1147 				count -= PAGE_SIZE - offset;
1148 			else {
1149 				if (short_write)
1150 					ClearPageUptodate(page);
1151 				count = 0;
1152 			}
1153 			offset = 0;
1154 		}
1155 		if (ia->write.page_locked && (i == ap->num_pages - 1))
1156 			unlock_page(page);
1157 		put_page(page);
1158 	}
1159 
1160 	return err;
1161 }
1162 
1163 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1164 				     struct address_space *mapping,
1165 				     struct iov_iter *ii, loff_t pos,
1166 				     unsigned int max_pages)
1167 {
1168 	struct fuse_args_pages *ap = &ia->ap;
1169 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
1170 	unsigned offset = pos & (PAGE_SIZE - 1);
1171 	size_t count = 0;
1172 	int err;
1173 
1174 	ap->args.in_pages = true;
1175 	ap->descs[0].offset = offset;
1176 
1177 	do {
1178 		size_t tmp;
1179 		struct page *page;
1180 		pgoff_t index = pos >> PAGE_SHIFT;
1181 		size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1182 				     iov_iter_count(ii));
1183 
1184 		bytes = min_t(size_t, bytes, fc->max_write - count);
1185 
1186  again:
1187 		err = -EFAULT;
1188 		if (fault_in_iov_iter_readable(ii, bytes))
1189 			break;
1190 
1191 		err = -ENOMEM;
1192 		page = grab_cache_page_write_begin(mapping, index);
1193 		if (!page)
1194 			break;
1195 
1196 		if (mapping_writably_mapped(mapping))
1197 			flush_dcache_page(page);
1198 
1199 		tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
1200 		flush_dcache_page(page);
1201 
1202 		if (!tmp) {
1203 			unlock_page(page);
1204 			put_page(page);
1205 			goto again;
1206 		}
1207 
1208 		err = 0;
1209 		ap->pages[ap->num_pages] = page;
1210 		ap->descs[ap->num_pages].length = tmp;
1211 		ap->num_pages++;
1212 
1213 		count += tmp;
1214 		pos += tmp;
1215 		offset += tmp;
1216 		if (offset == PAGE_SIZE)
1217 			offset = 0;
1218 
1219 		/* If we copied full page, mark it uptodate */
1220 		if (tmp == PAGE_SIZE)
1221 			SetPageUptodate(page);
1222 
1223 		if (PageUptodate(page)) {
1224 			unlock_page(page);
1225 		} else {
1226 			ia->write.page_locked = true;
1227 			break;
1228 		}
1229 		if (!fc->big_writes)
1230 			break;
1231 	} while (iov_iter_count(ii) && count < fc->max_write &&
1232 		 ap->num_pages < max_pages && offset == 0);
1233 
1234 	return count > 0 ? count : err;
1235 }
1236 
1237 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1238 				     unsigned int max_pages)
1239 {
1240 	return min_t(unsigned int,
1241 		     ((pos + len - 1) >> PAGE_SHIFT) -
1242 		     (pos >> PAGE_SHIFT) + 1,
1243 		     max_pages);
1244 }
1245 
1246 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1247 {
1248 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1249 	struct inode *inode = mapping->host;
1250 	struct fuse_conn *fc = get_fuse_conn(inode);
1251 	struct fuse_inode *fi = get_fuse_inode(inode);
1252 	loff_t pos = iocb->ki_pos;
1253 	int err = 0;
1254 	ssize_t res = 0;
1255 
1256 	if (inode->i_size < pos + iov_iter_count(ii))
1257 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1258 
1259 	do {
1260 		ssize_t count;
1261 		struct fuse_io_args ia = {};
1262 		struct fuse_args_pages *ap = &ia.ap;
1263 		unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1264 						      fc->max_pages);
1265 
1266 		ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1267 		if (!ap->pages) {
1268 			err = -ENOMEM;
1269 			break;
1270 		}
1271 
1272 		count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1273 		if (count <= 0) {
1274 			err = count;
1275 		} else {
1276 			err = fuse_send_write_pages(&ia, iocb, inode,
1277 						    pos, count);
1278 			if (!err) {
1279 				size_t num_written = ia.write.out.size;
1280 
1281 				res += num_written;
1282 				pos += num_written;
1283 
1284 				/* break out of the loop on short write */
1285 				if (num_written != count)
1286 					err = -EIO;
1287 			}
1288 		}
1289 		kfree(ap->pages);
1290 	} while (!err && iov_iter_count(ii));
1291 
1292 	fuse_write_update_attr(inode, pos, res);
1293 	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1294 
1295 	if (!res)
1296 		return err;
1297 	iocb->ki_pos += res;
1298 	return res;
1299 }
1300 
1301 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1302 {
1303 	struct file *file = iocb->ki_filp;
1304 	struct address_space *mapping = file->f_mapping;
1305 	ssize_t written = 0;
1306 	struct inode *inode = mapping->host;
1307 	ssize_t err;
1308 	struct fuse_conn *fc = get_fuse_conn(inode);
1309 
1310 	if (fc->writeback_cache) {
1311 		/* Update size (EOF optimization) and mode (SUID clearing) */
1312 		err = fuse_update_attributes(mapping->host, file,
1313 					     STATX_SIZE | STATX_MODE);
1314 		if (err)
1315 			return err;
1316 
1317 		if (fc->handle_killpriv_v2 &&
1318 		    setattr_should_drop_suidgid(&nop_mnt_idmap,
1319 						file_inode(file))) {
1320 			goto writethrough;
1321 		}
1322 
1323 		return generic_file_write_iter(iocb, from);
1324 	}
1325 
1326 writethrough:
1327 	inode_lock(inode);
1328 
1329 	err = generic_write_checks(iocb, from);
1330 	if (err <= 0)
1331 		goto out;
1332 
1333 	err = file_remove_privs(file);
1334 	if (err)
1335 		goto out;
1336 
1337 	err = file_update_time(file);
1338 	if (err)
1339 		goto out;
1340 
1341 	if (iocb->ki_flags & IOCB_DIRECT) {
1342 		written = generic_file_direct_write(iocb, from);
1343 		if (written < 0 || !iov_iter_count(from))
1344 			goto out;
1345 		written = direct_write_fallback(iocb, from, written,
1346 				fuse_perform_write(iocb, from));
1347 	} else {
1348 		written = fuse_perform_write(iocb, from);
1349 	}
1350 out:
1351 	inode_unlock(inode);
1352 	if (written > 0)
1353 		written = generic_write_sync(iocb, written);
1354 
1355 	return written ? written : err;
1356 }
1357 
1358 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1359 {
1360 	return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
1361 }
1362 
1363 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1364 					size_t max_size)
1365 {
1366 	return min(iov_iter_single_seg_count(ii), max_size);
1367 }
1368 
1369 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1370 			       size_t *nbytesp, int write,
1371 			       unsigned int max_pages)
1372 {
1373 	size_t nbytes = 0;  /* # bytes already packed in req */
1374 	ssize_t ret = 0;
1375 
1376 	/* Special case for kernel I/O: can copy directly into the buffer */
1377 	if (iov_iter_is_kvec(ii)) {
1378 		unsigned long user_addr = fuse_get_user_addr(ii);
1379 		size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1380 
1381 		if (write)
1382 			ap->args.in_args[1].value = (void *) user_addr;
1383 		else
1384 			ap->args.out_args[0].value = (void *) user_addr;
1385 
1386 		iov_iter_advance(ii, frag_size);
1387 		*nbytesp = frag_size;
1388 		return 0;
1389 	}
1390 
1391 	while (nbytes < *nbytesp && ap->num_pages < max_pages) {
1392 		unsigned npages;
1393 		size_t start;
1394 		ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
1395 					*nbytesp - nbytes,
1396 					max_pages - ap->num_pages,
1397 					&start);
1398 		if (ret < 0)
1399 			break;
1400 
1401 		nbytes += ret;
1402 
1403 		ret += start;
1404 		npages = DIV_ROUND_UP(ret, PAGE_SIZE);
1405 
1406 		ap->descs[ap->num_pages].offset = start;
1407 		fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
1408 
1409 		ap->num_pages += npages;
1410 		ap->descs[ap->num_pages - 1].length -=
1411 			(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
1412 	}
1413 
1414 	ap->args.user_pages = true;
1415 	if (write)
1416 		ap->args.in_pages = true;
1417 	else
1418 		ap->args.out_pages = true;
1419 
1420 	*nbytesp = nbytes;
1421 
1422 	return ret < 0 ? ret : 0;
1423 }
1424 
1425 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1426 		       loff_t *ppos, int flags)
1427 {
1428 	int write = flags & FUSE_DIO_WRITE;
1429 	int cuse = flags & FUSE_DIO_CUSE;
1430 	struct file *file = io->iocb->ki_filp;
1431 	struct address_space *mapping = file->f_mapping;
1432 	struct inode *inode = mapping->host;
1433 	struct fuse_file *ff = file->private_data;
1434 	struct fuse_conn *fc = ff->fm->fc;
1435 	size_t nmax = write ? fc->max_write : fc->max_read;
1436 	loff_t pos = *ppos;
1437 	size_t count = iov_iter_count(iter);
1438 	pgoff_t idx_from = pos >> PAGE_SHIFT;
1439 	pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1440 	ssize_t res = 0;
1441 	int err = 0;
1442 	struct fuse_io_args *ia;
1443 	unsigned int max_pages;
1444 	bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
1445 
1446 	max_pages = iov_iter_npages(iter, fc->max_pages);
1447 	ia = fuse_io_alloc(io, max_pages);
1448 	if (!ia)
1449 		return -ENOMEM;
1450 
1451 	if (fopen_direct_io && fc->direct_io_allow_mmap) {
1452 		res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1453 		if (res) {
1454 			fuse_io_free(ia);
1455 			return res;
1456 		}
1457 	}
1458 	if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1459 		if (!write)
1460 			inode_lock(inode);
1461 		fuse_sync_writes(inode);
1462 		if (!write)
1463 			inode_unlock(inode);
1464 	}
1465 
1466 	if (fopen_direct_io && write) {
1467 		res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1468 		if (res) {
1469 			fuse_io_free(ia);
1470 			return res;
1471 		}
1472 	}
1473 
1474 	io->should_dirty = !write && user_backed_iter(iter);
1475 	while (count) {
1476 		ssize_t nres;
1477 		fl_owner_t owner = current->files;
1478 		size_t nbytes = min(count, nmax);
1479 
1480 		err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1481 					  max_pages);
1482 		if (err && !nbytes)
1483 			break;
1484 
1485 		if (write) {
1486 			if (!capable(CAP_FSETID))
1487 				ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1488 
1489 			nres = fuse_send_write(ia, pos, nbytes, owner);
1490 		} else {
1491 			nres = fuse_send_read(ia, pos, nbytes, owner);
1492 		}
1493 
1494 		if (!io->async || nres < 0) {
1495 			fuse_release_user_pages(&ia->ap, io->should_dirty);
1496 			fuse_io_free(ia);
1497 		}
1498 		ia = NULL;
1499 		if (nres < 0) {
1500 			iov_iter_revert(iter, nbytes);
1501 			err = nres;
1502 			break;
1503 		}
1504 		WARN_ON(nres > nbytes);
1505 
1506 		count -= nres;
1507 		res += nres;
1508 		pos += nres;
1509 		if (nres != nbytes) {
1510 			iov_iter_revert(iter, nbytes - nres);
1511 			break;
1512 		}
1513 		if (count) {
1514 			max_pages = iov_iter_npages(iter, fc->max_pages);
1515 			ia = fuse_io_alloc(io, max_pages);
1516 			if (!ia)
1517 				break;
1518 		}
1519 	}
1520 	if (ia)
1521 		fuse_io_free(ia);
1522 	if (res > 0)
1523 		*ppos = pos;
1524 
1525 	return res > 0 ? res : err;
1526 }
1527 EXPORT_SYMBOL_GPL(fuse_direct_io);
1528 
1529 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1530 				  struct iov_iter *iter,
1531 				  loff_t *ppos)
1532 {
1533 	ssize_t res;
1534 	struct inode *inode = file_inode(io->iocb->ki_filp);
1535 
1536 	res = fuse_direct_io(io, iter, ppos, 0);
1537 
1538 	fuse_invalidate_atime(inode);
1539 
1540 	return res;
1541 }
1542 
1543 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1544 
1545 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1546 {
1547 	ssize_t res;
1548 
1549 	if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1550 		res = fuse_direct_IO(iocb, to);
1551 	} else {
1552 		struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1553 
1554 		res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1555 	}
1556 
1557 	return res;
1558 }
1559 
1560 static bool fuse_direct_write_extending_i_size(struct kiocb *iocb,
1561 					       struct iov_iter *iter)
1562 {
1563 	struct inode *inode = file_inode(iocb->ki_filp);
1564 
1565 	return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1566 }
1567 
1568 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1569 {
1570 	struct inode *inode = file_inode(iocb->ki_filp);
1571 	struct file *file = iocb->ki_filp;
1572 	struct fuse_file *ff = file->private_data;
1573 	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1574 	ssize_t res;
1575 	bool exclusive_lock =
1576 		!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) ||
1577 		get_fuse_conn(inode)->direct_io_allow_mmap ||
1578 		iocb->ki_flags & IOCB_APPEND ||
1579 		fuse_direct_write_extending_i_size(iocb, from);
1580 
1581 	/*
1582 	 * Take exclusive lock if
1583 	 * - Parallel direct writes are disabled - a user space decision
1584 	 * - Parallel direct writes are enabled and i_size is being extended.
1585 	 * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP).
1586 	 *   This might not be needed at all, but needs further investigation.
1587 	 */
1588 	if (exclusive_lock)
1589 		inode_lock(inode);
1590 	else {
1591 		inode_lock_shared(inode);
1592 
1593 		/* A race with truncate might have come up as the decision for
1594 		 * the lock type was done without holding the lock, check again.
1595 		 */
1596 		if (fuse_direct_write_extending_i_size(iocb, from)) {
1597 			inode_unlock_shared(inode);
1598 			inode_lock(inode);
1599 			exclusive_lock = true;
1600 		}
1601 	}
1602 
1603 	res = generic_write_checks(iocb, from);
1604 	if (res > 0) {
1605 		if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1606 			res = fuse_direct_IO(iocb, from);
1607 		} else {
1608 			res = fuse_direct_io(&io, from, &iocb->ki_pos,
1609 					     FUSE_DIO_WRITE);
1610 			fuse_write_update_attr(inode, iocb->ki_pos, res);
1611 		}
1612 	}
1613 	if (exclusive_lock)
1614 		inode_unlock(inode);
1615 	else
1616 		inode_unlock_shared(inode);
1617 
1618 	return res;
1619 }
1620 
1621 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1622 {
1623 	struct file *file = iocb->ki_filp;
1624 	struct fuse_file *ff = file->private_data;
1625 	struct inode *inode = file_inode(file);
1626 
1627 	if (fuse_is_bad(inode))
1628 		return -EIO;
1629 
1630 	if (FUSE_IS_DAX(inode))
1631 		return fuse_dax_read_iter(iocb, to);
1632 
1633 	if (!(ff->open_flags & FOPEN_DIRECT_IO))
1634 		return fuse_cache_read_iter(iocb, to);
1635 	else
1636 		return fuse_direct_read_iter(iocb, to);
1637 }
1638 
1639 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1640 {
1641 	struct file *file = iocb->ki_filp;
1642 	struct fuse_file *ff = file->private_data;
1643 	struct inode *inode = file_inode(file);
1644 
1645 	if (fuse_is_bad(inode))
1646 		return -EIO;
1647 
1648 	if (FUSE_IS_DAX(inode))
1649 		return fuse_dax_write_iter(iocb, from);
1650 
1651 	if (!(ff->open_flags & FOPEN_DIRECT_IO))
1652 		return fuse_cache_write_iter(iocb, from);
1653 	else
1654 		return fuse_direct_write_iter(iocb, from);
1655 }
1656 
1657 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1658 {
1659 	struct fuse_args_pages *ap = &wpa->ia.ap;
1660 	int i;
1661 
1662 	if (wpa->bucket)
1663 		fuse_sync_bucket_dec(wpa->bucket);
1664 
1665 	for (i = 0; i < ap->num_pages; i++)
1666 		__free_page(ap->pages[i]);
1667 
1668 	if (wpa->ia.ff)
1669 		fuse_file_put(wpa->ia.ff, false, false);
1670 
1671 	kfree(ap->pages);
1672 	kfree(wpa);
1673 }
1674 
1675 static void fuse_writepage_finish(struct fuse_mount *fm,
1676 				  struct fuse_writepage_args *wpa)
1677 {
1678 	struct fuse_args_pages *ap = &wpa->ia.ap;
1679 	struct inode *inode = wpa->inode;
1680 	struct fuse_inode *fi = get_fuse_inode(inode);
1681 	struct backing_dev_info *bdi = inode_to_bdi(inode);
1682 	int i;
1683 
1684 	for (i = 0; i < ap->num_pages; i++) {
1685 		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1686 		dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
1687 		wb_writeout_inc(&bdi->wb);
1688 	}
1689 	wake_up(&fi->page_waitq);
1690 }
1691 
1692 /* Called under fi->lock, may release and reacquire it */
1693 static void fuse_send_writepage(struct fuse_mount *fm,
1694 				struct fuse_writepage_args *wpa, loff_t size)
1695 __releases(fi->lock)
1696 __acquires(fi->lock)
1697 {
1698 	struct fuse_writepage_args *aux, *next;
1699 	struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1700 	struct fuse_write_in *inarg = &wpa->ia.write.in;
1701 	struct fuse_args *args = &wpa->ia.ap.args;
1702 	__u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1703 	int err;
1704 
1705 	fi->writectr++;
1706 	if (inarg->offset + data_size <= size) {
1707 		inarg->size = data_size;
1708 	} else if (inarg->offset < size) {
1709 		inarg->size = size - inarg->offset;
1710 	} else {
1711 		/* Got truncated off completely */
1712 		goto out_free;
1713 	}
1714 
1715 	args->in_args[1].size = inarg->size;
1716 	args->force = true;
1717 	args->nocreds = true;
1718 
1719 	err = fuse_simple_background(fm, args, GFP_ATOMIC);
1720 	if (err == -ENOMEM) {
1721 		spin_unlock(&fi->lock);
1722 		err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
1723 		spin_lock(&fi->lock);
1724 	}
1725 
1726 	/* Fails on broken connection only */
1727 	if (unlikely(err))
1728 		goto out_free;
1729 
1730 	return;
1731 
1732  out_free:
1733 	fi->writectr--;
1734 	rb_erase(&wpa->writepages_entry, &fi->writepages);
1735 	fuse_writepage_finish(fm, wpa);
1736 	spin_unlock(&fi->lock);
1737 
1738 	/* After fuse_writepage_finish() aux request list is private */
1739 	for (aux = wpa->next; aux; aux = next) {
1740 		next = aux->next;
1741 		aux->next = NULL;
1742 		fuse_writepage_free(aux);
1743 	}
1744 
1745 	fuse_writepage_free(wpa);
1746 	spin_lock(&fi->lock);
1747 }
1748 
1749 /*
1750  * If fi->writectr is positive (no truncate or fsync going on) send
1751  * all queued writepage requests.
1752  *
1753  * Called with fi->lock
1754  */
1755 void fuse_flush_writepages(struct inode *inode)
1756 __releases(fi->lock)
1757 __acquires(fi->lock)
1758 {
1759 	struct fuse_mount *fm = get_fuse_mount(inode);
1760 	struct fuse_inode *fi = get_fuse_inode(inode);
1761 	loff_t crop = i_size_read(inode);
1762 	struct fuse_writepage_args *wpa;
1763 
1764 	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1765 		wpa = list_entry(fi->queued_writes.next,
1766 				 struct fuse_writepage_args, queue_entry);
1767 		list_del_init(&wpa->queue_entry);
1768 		fuse_send_writepage(fm, wpa, crop);
1769 	}
1770 }
1771 
1772 static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1773 						struct fuse_writepage_args *wpa)
1774 {
1775 	pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1776 	pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1777 	struct rb_node **p = &root->rb_node;
1778 	struct rb_node  *parent = NULL;
1779 
1780 	WARN_ON(!wpa->ia.ap.num_pages);
1781 	while (*p) {
1782 		struct fuse_writepage_args *curr;
1783 		pgoff_t curr_index;
1784 
1785 		parent = *p;
1786 		curr = rb_entry(parent, struct fuse_writepage_args,
1787 				writepages_entry);
1788 		WARN_ON(curr->inode != wpa->inode);
1789 		curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1790 
1791 		if (idx_from >= curr_index + curr->ia.ap.num_pages)
1792 			p = &(*p)->rb_right;
1793 		else if (idx_to < curr_index)
1794 			p = &(*p)->rb_left;
1795 		else
1796 			return curr;
1797 	}
1798 
1799 	rb_link_node(&wpa->writepages_entry, parent, p);
1800 	rb_insert_color(&wpa->writepages_entry, root);
1801 	return NULL;
1802 }
1803 
1804 static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1805 {
1806 	WARN_ON(fuse_insert_writeback(root, wpa));
1807 }
1808 
1809 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
1810 			       int error)
1811 {
1812 	struct fuse_writepage_args *wpa =
1813 		container_of(args, typeof(*wpa), ia.ap.args);
1814 	struct inode *inode = wpa->inode;
1815 	struct fuse_inode *fi = get_fuse_inode(inode);
1816 	struct fuse_conn *fc = get_fuse_conn(inode);
1817 
1818 	mapping_set_error(inode->i_mapping, error);
1819 	/*
1820 	 * A writeback finished and this might have updated mtime/ctime on
1821 	 * server making local mtime/ctime stale.  Hence invalidate attrs.
1822 	 * Do this only if writeback_cache is not enabled.  If writeback_cache
1823 	 * is enabled, we trust local ctime/mtime.
1824 	 */
1825 	if (!fc->writeback_cache)
1826 		fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
1827 	spin_lock(&fi->lock);
1828 	rb_erase(&wpa->writepages_entry, &fi->writepages);
1829 	while (wpa->next) {
1830 		struct fuse_mount *fm = get_fuse_mount(inode);
1831 		struct fuse_write_in *inarg = &wpa->ia.write.in;
1832 		struct fuse_writepage_args *next = wpa->next;
1833 
1834 		wpa->next = next->next;
1835 		next->next = NULL;
1836 		next->ia.ff = fuse_file_get(wpa->ia.ff);
1837 		tree_insert(&fi->writepages, next);
1838 
1839 		/*
1840 		 * Skip fuse_flush_writepages() to make it easy to crop requests
1841 		 * based on primary request size.
1842 		 *
1843 		 * 1st case (trivial): there are no concurrent activities using
1844 		 * fuse_set/release_nowrite.  Then we're on safe side because
1845 		 * fuse_flush_writepages() would call fuse_send_writepage()
1846 		 * anyway.
1847 		 *
1848 		 * 2nd case: someone called fuse_set_nowrite and it is waiting
1849 		 * now for completion of all in-flight requests.  This happens
1850 		 * rarely and no more than once per page, so this should be
1851 		 * okay.
1852 		 *
1853 		 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1854 		 * of fuse_set_nowrite..fuse_release_nowrite section.  The fact
1855 		 * that fuse_set_nowrite returned implies that all in-flight
1856 		 * requests were completed along with all of their secondary
1857 		 * requests.  Further primary requests are blocked by negative
1858 		 * writectr.  Hence there cannot be any in-flight requests and
1859 		 * no invocations of fuse_writepage_end() while we're in
1860 		 * fuse_set_nowrite..fuse_release_nowrite section.
1861 		 */
1862 		fuse_send_writepage(fm, next, inarg->offset + inarg->size);
1863 	}
1864 	fi->writectr--;
1865 	fuse_writepage_finish(fm, wpa);
1866 	spin_unlock(&fi->lock);
1867 	fuse_writepage_free(wpa);
1868 }
1869 
1870 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
1871 {
1872 	struct fuse_file *ff;
1873 
1874 	spin_lock(&fi->lock);
1875 	ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1876 				      write_entry);
1877 	if (ff)
1878 		fuse_file_get(ff);
1879 	spin_unlock(&fi->lock);
1880 
1881 	return ff;
1882 }
1883 
1884 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1885 {
1886 	struct fuse_file *ff = __fuse_write_file_get(fi);
1887 	WARN_ON(!ff);
1888 	return ff;
1889 }
1890 
1891 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1892 {
1893 	struct fuse_inode *fi = get_fuse_inode(inode);
1894 	struct fuse_file *ff;
1895 	int err;
1896 
1897 	/*
1898 	 * Inode is always written before the last reference is dropped and
1899 	 * hence this should not be reached from reclaim.
1900 	 *
1901 	 * Writing back the inode from reclaim can deadlock if the request
1902 	 * processing itself needs an allocation.  Allocations triggering
1903 	 * reclaim while serving a request can't be prevented, because it can
1904 	 * involve any number of unrelated userspace processes.
1905 	 */
1906 	WARN_ON(wbc->for_reclaim);
1907 
1908 	ff = __fuse_write_file_get(fi);
1909 	err = fuse_flush_times(inode, ff);
1910 	if (ff)
1911 		fuse_file_put(ff, false, false);
1912 
1913 	return err;
1914 }
1915 
1916 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
1917 {
1918 	struct fuse_writepage_args *wpa;
1919 	struct fuse_args_pages *ap;
1920 
1921 	wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
1922 	if (wpa) {
1923 		ap = &wpa->ia.ap;
1924 		ap->num_pages = 0;
1925 		ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
1926 		if (!ap->pages) {
1927 			kfree(wpa);
1928 			wpa = NULL;
1929 		}
1930 	}
1931 	return wpa;
1932 
1933 }
1934 
1935 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
1936 					 struct fuse_writepage_args *wpa)
1937 {
1938 	if (!fc->sync_fs)
1939 		return;
1940 
1941 	rcu_read_lock();
1942 	/* Prevent resurrection of dead bucket in unlikely race with syncfs */
1943 	do {
1944 		wpa->bucket = rcu_dereference(fc->curr_bucket);
1945 	} while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
1946 	rcu_read_unlock();
1947 }
1948 
1949 static int fuse_writepage_locked(struct page *page)
1950 {
1951 	struct address_space *mapping = page->mapping;
1952 	struct inode *inode = mapping->host;
1953 	struct fuse_conn *fc = get_fuse_conn(inode);
1954 	struct fuse_inode *fi = get_fuse_inode(inode);
1955 	struct fuse_writepage_args *wpa;
1956 	struct fuse_args_pages *ap;
1957 	struct page *tmp_page;
1958 	int error = -ENOMEM;
1959 
1960 	set_page_writeback(page);
1961 
1962 	wpa = fuse_writepage_args_alloc();
1963 	if (!wpa)
1964 		goto err;
1965 	ap = &wpa->ia.ap;
1966 
1967 	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1968 	if (!tmp_page)
1969 		goto err_free;
1970 
1971 	error = -EIO;
1972 	wpa->ia.ff = fuse_write_file_get(fi);
1973 	if (!wpa->ia.ff)
1974 		goto err_nofile;
1975 
1976 	fuse_writepage_add_to_bucket(fc, wpa);
1977 	fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
1978 
1979 	copy_highpage(tmp_page, page);
1980 	wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
1981 	wpa->next = NULL;
1982 	ap->args.in_pages = true;
1983 	ap->num_pages = 1;
1984 	ap->pages[0] = tmp_page;
1985 	ap->descs[0].offset = 0;
1986 	ap->descs[0].length = PAGE_SIZE;
1987 	ap->args.end = fuse_writepage_end;
1988 	wpa->inode = inode;
1989 
1990 	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1991 	inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
1992 
1993 	spin_lock(&fi->lock);
1994 	tree_insert(&fi->writepages, wpa);
1995 	list_add_tail(&wpa->queue_entry, &fi->queued_writes);
1996 	fuse_flush_writepages(inode);
1997 	spin_unlock(&fi->lock);
1998 
1999 	end_page_writeback(page);
2000 
2001 	return 0;
2002 
2003 err_nofile:
2004 	__free_page(tmp_page);
2005 err_free:
2006 	kfree(wpa);
2007 err:
2008 	mapping_set_error(page->mapping, error);
2009 	end_page_writeback(page);
2010 	return error;
2011 }
2012 
2013 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
2014 {
2015 	struct fuse_conn *fc = get_fuse_conn(page->mapping->host);
2016 	int err;
2017 
2018 	if (fuse_page_is_writeback(page->mapping->host, page->index)) {
2019 		/*
2020 		 * ->writepages() should be called for sync() and friends.  We
2021 		 * should only get here on direct reclaim and then we are
2022 		 * allowed to skip a page which is already in flight
2023 		 */
2024 		WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
2025 
2026 		redirty_page_for_writepage(wbc, page);
2027 		unlock_page(page);
2028 		return 0;
2029 	}
2030 
2031 	if (wbc->sync_mode == WB_SYNC_NONE &&
2032 	    fc->num_background >= fc->congestion_threshold)
2033 		return AOP_WRITEPAGE_ACTIVATE;
2034 
2035 	err = fuse_writepage_locked(page);
2036 	unlock_page(page);
2037 
2038 	return err;
2039 }
2040 
2041 struct fuse_fill_wb_data {
2042 	struct fuse_writepage_args *wpa;
2043 	struct fuse_file *ff;
2044 	struct inode *inode;
2045 	struct page **orig_pages;
2046 	unsigned int max_pages;
2047 };
2048 
2049 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2050 {
2051 	struct fuse_args_pages *ap = &data->wpa->ia.ap;
2052 	struct fuse_conn *fc = get_fuse_conn(data->inode);
2053 	struct page **pages;
2054 	struct fuse_page_desc *descs;
2055 	unsigned int npages = min_t(unsigned int,
2056 				    max_t(unsigned int, data->max_pages * 2,
2057 					  FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2058 				    fc->max_pages);
2059 	WARN_ON(npages <= data->max_pages);
2060 
2061 	pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2062 	if (!pages)
2063 		return false;
2064 
2065 	memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2066 	memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2067 	kfree(ap->pages);
2068 	ap->pages = pages;
2069 	ap->descs = descs;
2070 	data->max_pages = npages;
2071 
2072 	return true;
2073 }
2074 
2075 static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2076 {
2077 	struct fuse_writepage_args *wpa = data->wpa;
2078 	struct inode *inode = data->inode;
2079 	struct fuse_inode *fi = get_fuse_inode(inode);
2080 	int num_pages = wpa->ia.ap.num_pages;
2081 	int i;
2082 
2083 	wpa->ia.ff = fuse_file_get(data->ff);
2084 	spin_lock(&fi->lock);
2085 	list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2086 	fuse_flush_writepages(inode);
2087 	spin_unlock(&fi->lock);
2088 
2089 	for (i = 0; i < num_pages; i++)
2090 		end_page_writeback(data->orig_pages[i]);
2091 }
2092 
2093 /*
2094  * Check under fi->lock if the page is under writeback, and insert it onto the
2095  * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2096  * one already added for a page at this offset.  If there's none, then insert
2097  * this new request onto the auxiliary list, otherwise reuse the existing one by
2098  * swapping the new temp page with the old one.
2099  */
2100 static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2101 			       struct page *page)
2102 {
2103 	struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2104 	struct fuse_writepage_args *tmp;
2105 	struct fuse_writepage_args *old_wpa;
2106 	struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
2107 
2108 	WARN_ON(new_ap->num_pages != 0);
2109 	new_ap->num_pages = 1;
2110 
2111 	spin_lock(&fi->lock);
2112 	old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
2113 	if (!old_wpa) {
2114 		spin_unlock(&fi->lock);
2115 		return true;
2116 	}
2117 
2118 	for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
2119 		pgoff_t curr_index;
2120 
2121 		WARN_ON(tmp->inode != new_wpa->inode);
2122 		curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
2123 		if (curr_index == page->index) {
2124 			WARN_ON(tmp->ia.ap.num_pages != 1);
2125 			swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
2126 			break;
2127 		}
2128 	}
2129 
2130 	if (!tmp) {
2131 		new_wpa->next = old_wpa->next;
2132 		old_wpa->next = new_wpa;
2133 	}
2134 
2135 	spin_unlock(&fi->lock);
2136 
2137 	if (tmp) {
2138 		struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
2139 
2140 		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
2141 		dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
2142 		wb_writeout_inc(&bdi->wb);
2143 		fuse_writepage_free(new_wpa);
2144 	}
2145 
2146 	return false;
2147 }
2148 
2149 static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2150 				     struct fuse_args_pages *ap,
2151 				     struct fuse_fill_wb_data *data)
2152 {
2153 	WARN_ON(!ap->num_pages);
2154 
2155 	/*
2156 	 * Being under writeback is unlikely but possible.  For example direct
2157 	 * read to an mmaped fuse file will set the page dirty twice; once when
2158 	 * the pages are faulted with get_user_pages(), and then after the read
2159 	 * completed.
2160 	 */
2161 	if (fuse_page_is_writeback(data->inode, page->index))
2162 		return true;
2163 
2164 	/* Reached max pages */
2165 	if (ap->num_pages == fc->max_pages)
2166 		return true;
2167 
2168 	/* Reached max write bytes */
2169 	if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2170 		return true;
2171 
2172 	/* Discontinuity */
2173 	if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2174 		return true;
2175 
2176 	/* Need to grow the pages array?  If so, did the expansion fail? */
2177 	if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2178 		return true;
2179 
2180 	return false;
2181 }
2182 
2183 static int fuse_writepages_fill(struct folio *folio,
2184 		struct writeback_control *wbc, void *_data)
2185 {
2186 	struct fuse_fill_wb_data *data = _data;
2187 	struct fuse_writepage_args *wpa = data->wpa;
2188 	struct fuse_args_pages *ap = &wpa->ia.ap;
2189 	struct inode *inode = data->inode;
2190 	struct fuse_inode *fi = get_fuse_inode(inode);
2191 	struct fuse_conn *fc = get_fuse_conn(inode);
2192 	struct page *tmp_page;
2193 	int err;
2194 
2195 	if (!data->ff) {
2196 		err = -EIO;
2197 		data->ff = fuse_write_file_get(fi);
2198 		if (!data->ff)
2199 			goto out_unlock;
2200 	}
2201 
2202 	if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
2203 		fuse_writepages_send(data);
2204 		data->wpa = NULL;
2205 	}
2206 
2207 	err = -ENOMEM;
2208 	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2209 	if (!tmp_page)
2210 		goto out_unlock;
2211 
2212 	/*
2213 	 * The page must not be redirtied until the writeout is completed
2214 	 * (i.e. userspace has sent a reply to the write request).  Otherwise
2215 	 * there could be more than one temporary page instance for each real
2216 	 * page.
2217 	 *
2218 	 * This is ensured by holding the page lock in page_mkwrite() while
2219 	 * checking fuse_page_is_writeback().  We already hold the page lock
2220 	 * since clear_page_dirty_for_io() and keep it held until we add the
2221 	 * request to the fi->writepages list and increment ap->num_pages.
2222 	 * After this fuse_page_is_writeback() will indicate that the page is
2223 	 * under writeback, so we can release the page lock.
2224 	 */
2225 	if (data->wpa == NULL) {
2226 		err = -ENOMEM;
2227 		wpa = fuse_writepage_args_alloc();
2228 		if (!wpa) {
2229 			__free_page(tmp_page);
2230 			goto out_unlock;
2231 		}
2232 		fuse_writepage_add_to_bucket(fc, wpa);
2233 
2234 		data->max_pages = 1;
2235 
2236 		ap = &wpa->ia.ap;
2237 		fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
2238 		wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2239 		wpa->next = NULL;
2240 		ap->args.in_pages = true;
2241 		ap->args.end = fuse_writepage_end;
2242 		ap->num_pages = 0;
2243 		wpa->inode = inode;
2244 	}
2245 	folio_start_writeback(folio);
2246 
2247 	copy_highpage(tmp_page, &folio->page);
2248 	ap->pages[ap->num_pages] = tmp_page;
2249 	ap->descs[ap->num_pages].offset = 0;
2250 	ap->descs[ap->num_pages].length = PAGE_SIZE;
2251 	data->orig_pages[ap->num_pages] = &folio->page;
2252 
2253 	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2254 	inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
2255 
2256 	err = 0;
2257 	if (data->wpa) {
2258 		/*
2259 		 * Protected by fi->lock against concurrent access by
2260 		 * fuse_page_is_writeback().
2261 		 */
2262 		spin_lock(&fi->lock);
2263 		ap->num_pages++;
2264 		spin_unlock(&fi->lock);
2265 	} else if (fuse_writepage_add(wpa, &folio->page)) {
2266 		data->wpa = wpa;
2267 	} else {
2268 		folio_end_writeback(folio);
2269 	}
2270 out_unlock:
2271 	folio_unlock(folio);
2272 
2273 	return err;
2274 }
2275 
2276 static int fuse_writepages(struct address_space *mapping,
2277 			   struct writeback_control *wbc)
2278 {
2279 	struct inode *inode = mapping->host;
2280 	struct fuse_conn *fc = get_fuse_conn(inode);
2281 	struct fuse_fill_wb_data data;
2282 	int err;
2283 
2284 	err = -EIO;
2285 	if (fuse_is_bad(inode))
2286 		goto out;
2287 
2288 	if (wbc->sync_mode == WB_SYNC_NONE &&
2289 	    fc->num_background >= fc->congestion_threshold)
2290 		return 0;
2291 
2292 	data.inode = inode;
2293 	data.wpa = NULL;
2294 	data.ff = NULL;
2295 
2296 	err = -ENOMEM;
2297 	data.orig_pages = kcalloc(fc->max_pages,
2298 				  sizeof(struct page *),
2299 				  GFP_NOFS);
2300 	if (!data.orig_pages)
2301 		goto out;
2302 
2303 	err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
2304 	if (data.wpa) {
2305 		WARN_ON(!data.wpa->ia.ap.num_pages);
2306 		fuse_writepages_send(&data);
2307 	}
2308 	if (data.ff)
2309 		fuse_file_put(data.ff, false, false);
2310 
2311 	kfree(data.orig_pages);
2312 out:
2313 	return err;
2314 }
2315 
2316 /*
2317  * It's worthy to make sure that space is reserved on disk for the write,
2318  * but how to implement it without killing performance need more thinking.
2319  */
2320 static int fuse_write_begin(struct file *file, struct address_space *mapping,
2321 		loff_t pos, unsigned len, struct page **pagep, void **fsdata)
2322 {
2323 	pgoff_t index = pos >> PAGE_SHIFT;
2324 	struct fuse_conn *fc = get_fuse_conn(file_inode(file));
2325 	struct page *page;
2326 	loff_t fsize;
2327 	int err = -ENOMEM;
2328 
2329 	WARN_ON(!fc->writeback_cache);
2330 
2331 	page = grab_cache_page_write_begin(mapping, index);
2332 	if (!page)
2333 		goto error;
2334 
2335 	fuse_wait_on_page_writeback(mapping->host, page->index);
2336 
2337 	if (PageUptodate(page) || len == PAGE_SIZE)
2338 		goto success;
2339 	/*
2340 	 * Check if the start this page comes after the end of file, in which
2341 	 * case the readpage can be optimized away.
2342 	 */
2343 	fsize = i_size_read(mapping->host);
2344 	if (fsize <= (pos & PAGE_MASK)) {
2345 		size_t off = pos & ~PAGE_MASK;
2346 		if (off)
2347 			zero_user_segment(page, 0, off);
2348 		goto success;
2349 	}
2350 	err = fuse_do_readpage(file, page);
2351 	if (err)
2352 		goto cleanup;
2353 success:
2354 	*pagep = page;
2355 	return 0;
2356 
2357 cleanup:
2358 	unlock_page(page);
2359 	put_page(page);
2360 error:
2361 	return err;
2362 }
2363 
2364 static int fuse_write_end(struct file *file, struct address_space *mapping,
2365 		loff_t pos, unsigned len, unsigned copied,
2366 		struct page *page, void *fsdata)
2367 {
2368 	struct inode *inode = page->mapping->host;
2369 
2370 	/* Haven't copied anything?  Skip zeroing, size extending, dirtying. */
2371 	if (!copied)
2372 		goto unlock;
2373 
2374 	pos += copied;
2375 	if (!PageUptodate(page)) {
2376 		/* Zero any unwritten bytes at the end of the page */
2377 		size_t endoff = pos & ~PAGE_MASK;
2378 		if (endoff)
2379 			zero_user_segment(page, endoff, PAGE_SIZE);
2380 		SetPageUptodate(page);
2381 	}
2382 
2383 	if (pos > inode->i_size)
2384 		i_size_write(inode, pos);
2385 
2386 	set_page_dirty(page);
2387 
2388 unlock:
2389 	unlock_page(page);
2390 	put_page(page);
2391 
2392 	return copied;
2393 }
2394 
2395 static int fuse_launder_folio(struct folio *folio)
2396 {
2397 	int err = 0;
2398 	if (folio_clear_dirty_for_io(folio)) {
2399 		struct inode *inode = folio->mapping->host;
2400 
2401 		/* Serialize with pending writeback for the same page */
2402 		fuse_wait_on_page_writeback(inode, folio->index);
2403 		err = fuse_writepage_locked(&folio->page);
2404 		if (!err)
2405 			fuse_wait_on_page_writeback(inode, folio->index);
2406 	}
2407 	return err;
2408 }
2409 
2410 /*
2411  * Write back dirty data/metadata now (there may not be any suitable
2412  * open files later for data)
2413  */
2414 static void fuse_vma_close(struct vm_area_struct *vma)
2415 {
2416 	int err;
2417 
2418 	err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2419 	mapping_set_error(vma->vm_file->f_mapping, err);
2420 }
2421 
2422 /*
2423  * Wait for writeback against this page to complete before allowing it
2424  * to be marked dirty again, and hence written back again, possibly
2425  * before the previous writepage completed.
2426  *
2427  * Block here, instead of in ->writepage(), so that the userspace fs
2428  * can only block processes actually operating on the filesystem.
2429  *
2430  * Otherwise unprivileged userspace fs would be able to block
2431  * unrelated:
2432  *
2433  * - page migration
2434  * - sync(2)
2435  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2436  */
2437 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2438 {
2439 	struct page *page = vmf->page;
2440 	struct inode *inode = file_inode(vmf->vma->vm_file);
2441 
2442 	file_update_time(vmf->vma->vm_file);
2443 	lock_page(page);
2444 	if (page->mapping != inode->i_mapping) {
2445 		unlock_page(page);
2446 		return VM_FAULT_NOPAGE;
2447 	}
2448 
2449 	fuse_wait_on_page_writeback(inode, page->index);
2450 	return VM_FAULT_LOCKED;
2451 }
2452 
2453 static const struct vm_operations_struct fuse_file_vm_ops = {
2454 	.close		= fuse_vma_close,
2455 	.fault		= filemap_fault,
2456 	.map_pages	= filemap_map_pages,
2457 	.page_mkwrite	= fuse_page_mkwrite,
2458 };
2459 
2460 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2461 {
2462 	struct fuse_file *ff = file->private_data;
2463 	struct fuse_conn *fc = ff->fm->fc;
2464 
2465 	/* DAX mmap is superior to direct_io mmap */
2466 	if (FUSE_IS_DAX(file_inode(file)))
2467 		return fuse_dax_mmap(file, vma);
2468 
2469 	if (ff->open_flags & FOPEN_DIRECT_IO) {
2470 		/* Can't provide the coherency needed for MAP_SHARED
2471 		 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2472 		 */
2473 		if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
2474 			return -ENODEV;
2475 
2476 		invalidate_inode_pages2(file->f_mapping);
2477 
2478 		return generic_file_mmap(file, vma);
2479 	}
2480 
2481 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2482 		fuse_link_write_file(file);
2483 
2484 	file_accessed(file);
2485 	vma->vm_ops = &fuse_file_vm_ops;
2486 	return 0;
2487 }
2488 
2489 static int convert_fuse_file_lock(struct fuse_conn *fc,
2490 				  const struct fuse_file_lock *ffl,
2491 				  struct file_lock *fl)
2492 {
2493 	switch (ffl->type) {
2494 	case F_UNLCK:
2495 		break;
2496 
2497 	case F_RDLCK:
2498 	case F_WRLCK:
2499 		if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2500 		    ffl->end < ffl->start)
2501 			return -EIO;
2502 
2503 		fl->fl_start = ffl->start;
2504 		fl->fl_end = ffl->end;
2505 
2506 		/*
2507 		 * Convert pid into init's pid namespace.  The locks API will
2508 		 * translate it into the caller's pid namespace.
2509 		 */
2510 		rcu_read_lock();
2511 		fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2512 		rcu_read_unlock();
2513 		break;
2514 
2515 	default:
2516 		return -EIO;
2517 	}
2518 	fl->fl_type = ffl->type;
2519 	return 0;
2520 }
2521 
2522 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2523 			 const struct file_lock *fl, int opcode, pid_t pid,
2524 			 int flock, struct fuse_lk_in *inarg)
2525 {
2526 	struct inode *inode = file_inode(file);
2527 	struct fuse_conn *fc = get_fuse_conn(inode);
2528 	struct fuse_file *ff = file->private_data;
2529 
2530 	memset(inarg, 0, sizeof(*inarg));
2531 	inarg->fh = ff->fh;
2532 	inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
2533 	inarg->lk.start = fl->fl_start;
2534 	inarg->lk.end = fl->fl_end;
2535 	inarg->lk.type = fl->fl_type;
2536 	inarg->lk.pid = pid;
2537 	if (flock)
2538 		inarg->lk_flags |= FUSE_LK_FLOCK;
2539 	args->opcode = opcode;
2540 	args->nodeid = get_node_id(inode);
2541 	args->in_numargs = 1;
2542 	args->in_args[0].size = sizeof(*inarg);
2543 	args->in_args[0].value = inarg;
2544 }
2545 
2546 static int fuse_getlk(struct file *file, struct file_lock *fl)
2547 {
2548 	struct inode *inode = file_inode(file);
2549 	struct fuse_mount *fm = get_fuse_mount(inode);
2550 	FUSE_ARGS(args);
2551 	struct fuse_lk_in inarg;
2552 	struct fuse_lk_out outarg;
2553 	int err;
2554 
2555 	fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2556 	args.out_numargs = 1;
2557 	args.out_args[0].size = sizeof(outarg);
2558 	args.out_args[0].value = &outarg;
2559 	err = fuse_simple_request(fm, &args);
2560 	if (!err)
2561 		err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
2562 
2563 	return err;
2564 }
2565 
2566 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2567 {
2568 	struct inode *inode = file_inode(file);
2569 	struct fuse_mount *fm = get_fuse_mount(inode);
2570 	FUSE_ARGS(args);
2571 	struct fuse_lk_in inarg;
2572 	int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2573 	struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
2574 	pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
2575 	int err;
2576 
2577 	if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2578 		/* NLM needs asynchronous locks, which we don't support yet */
2579 		return -ENOLCK;
2580 	}
2581 
2582 	/* Unlock on close is handled by the flush method */
2583 	if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
2584 		return 0;
2585 
2586 	fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2587 	err = fuse_simple_request(fm, &args);
2588 
2589 	/* locking is restartable */
2590 	if (err == -EINTR)
2591 		err = -ERESTARTSYS;
2592 
2593 	return err;
2594 }
2595 
2596 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2597 {
2598 	struct inode *inode = file_inode(file);
2599 	struct fuse_conn *fc = get_fuse_conn(inode);
2600 	int err;
2601 
2602 	if (cmd == F_CANCELLK) {
2603 		err = 0;
2604 	} else if (cmd == F_GETLK) {
2605 		if (fc->no_lock) {
2606 			posix_test_lock(file, fl);
2607 			err = 0;
2608 		} else
2609 			err = fuse_getlk(file, fl);
2610 	} else {
2611 		if (fc->no_lock)
2612 			err = posix_lock_file(file, fl, NULL);
2613 		else
2614 			err = fuse_setlk(file, fl, 0);
2615 	}
2616 	return err;
2617 }
2618 
2619 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2620 {
2621 	struct inode *inode = file_inode(file);
2622 	struct fuse_conn *fc = get_fuse_conn(inode);
2623 	int err;
2624 
2625 	if (fc->no_flock) {
2626 		err = locks_lock_file_wait(file, fl);
2627 	} else {
2628 		struct fuse_file *ff = file->private_data;
2629 
2630 		/* emulate flock with POSIX locks */
2631 		ff->flock = true;
2632 		err = fuse_setlk(file, fl, 1);
2633 	}
2634 
2635 	return err;
2636 }
2637 
2638 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2639 {
2640 	struct inode *inode = mapping->host;
2641 	struct fuse_mount *fm = get_fuse_mount(inode);
2642 	FUSE_ARGS(args);
2643 	struct fuse_bmap_in inarg;
2644 	struct fuse_bmap_out outarg;
2645 	int err;
2646 
2647 	if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
2648 		return 0;
2649 
2650 	memset(&inarg, 0, sizeof(inarg));
2651 	inarg.block = block;
2652 	inarg.blocksize = inode->i_sb->s_blocksize;
2653 	args.opcode = FUSE_BMAP;
2654 	args.nodeid = get_node_id(inode);
2655 	args.in_numargs = 1;
2656 	args.in_args[0].size = sizeof(inarg);
2657 	args.in_args[0].value = &inarg;
2658 	args.out_numargs = 1;
2659 	args.out_args[0].size = sizeof(outarg);
2660 	args.out_args[0].value = &outarg;
2661 	err = fuse_simple_request(fm, &args);
2662 	if (err == -ENOSYS)
2663 		fm->fc->no_bmap = 1;
2664 
2665 	return err ? 0 : outarg.block;
2666 }
2667 
2668 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2669 {
2670 	struct inode *inode = file->f_mapping->host;
2671 	struct fuse_mount *fm = get_fuse_mount(inode);
2672 	struct fuse_file *ff = file->private_data;
2673 	FUSE_ARGS(args);
2674 	struct fuse_lseek_in inarg = {
2675 		.fh = ff->fh,
2676 		.offset = offset,
2677 		.whence = whence
2678 	};
2679 	struct fuse_lseek_out outarg;
2680 	int err;
2681 
2682 	if (fm->fc->no_lseek)
2683 		goto fallback;
2684 
2685 	args.opcode = FUSE_LSEEK;
2686 	args.nodeid = ff->nodeid;
2687 	args.in_numargs = 1;
2688 	args.in_args[0].size = sizeof(inarg);
2689 	args.in_args[0].value = &inarg;
2690 	args.out_numargs = 1;
2691 	args.out_args[0].size = sizeof(outarg);
2692 	args.out_args[0].value = &outarg;
2693 	err = fuse_simple_request(fm, &args);
2694 	if (err) {
2695 		if (err == -ENOSYS) {
2696 			fm->fc->no_lseek = 1;
2697 			goto fallback;
2698 		}
2699 		return err;
2700 	}
2701 
2702 	return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2703 
2704 fallback:
2705 	err = fuse_update_attributes(inode, file, STATX_SIZE);
2706 	if (!err)
2707 		return generic_file_llseek(file, offset, whence);
2708 	else
2709 		return err;
2710 }
2711 
2712 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2713 {
2714 	loff_t retval;
2715 	struct inode *inode = file_inode(file);
2716 
2717 	switch (whence) {
2718 	case SEEK_SET:
2719 	case SEEK_CUR:
2720 		 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2721 		retval = generic_file_llseek(file, offset, whence);
2722 		break;
2723 	case SEEK_END:
2724 		inode_lock(inode);
2725 		retval = fuse_update_attributes(inode, file, STATX_SIZE);
2726 		if (!retval)
2727 			retval = generic_file_llseek(file, offset, whence);
2728 		inode_unlock(inode);
2729 		break;
2730 	case SEEK_HOLE:
2731 	case SEEK_DATA:
2732 		inode_lock(inode);
2733 		retval = fuse_lseek(file, offset, whence);
2734 		inode_unlock(inode);
2735 		break;
2736 	default:
2737 		retval = -EINVAL;
2738 	}
2739 
2740 	return retval;
2741 }
2742 
2743 /*
2744  * All files which have been polled are linked to RB tree
2745  * fuse_conn->polled_files which is indexed by kh.  Walk the tree and
2746  * find the matching one.
2747  */
2748 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2749 					      struct rb_node **parent_out)
2750 {
2751 	struct rb_node **link = &fc->polled_files.rb_node;
2752 	struct rb_node *last = NULL;
2753 
2754 	while (*link) {
2755 		struct fuse_file *ff;
2756 
2757 		last = *link;
2758 		ff = rb_entry(last, struct fuse_file, polled_node);
2759 
2760 		if (kh < ff->kh)
2761 			link = &last->rb_left;
2762 		else if (kh > ff->kh)
2763 			link = &last->rb_right;
2764 		else
2765 			return link;
2766 	}
2767 
2768 	if (parent_out)
2769 		*parent_out = last;
2770 	return link;
2771 }
2772 
2773 /*
2774  * The file is about to be polled.  Make sure it's on the polled_files
2775  * RB tree.  Note that files once added to the polled_files tree are
2776  * not removed before the file is released.  This is because a file
2777  * polled once is likely to be polled again.
2778  */
2779 static void fuse_register_polled_file(struct fuse_conn *fc,
2780 				      struct fuse_file *ff)
2781 {
2782 	spin_lock(&fc->lock);
2783 	if (RB_EMPTY_NODE(&ff->polled_node)) {
2784 		struct rb_node **link, *parent;
2785 
2786 		link = fuse_find_polled_node(fc, ff->kh, &parent);
2787 		BUG_ON(*link);
2788 		rb_link_node(&ff->polled_node, parent, link);
2789 		rb_insert_color(&ff->polled_node, &fc->polled_files);
2790 	}
2791 	spin_unlock(&fc->lock);
2792 }
2793 
2794 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
2795 {
2796 	struct fuse_file *ff = file->private_data;
2797 	struct fuse_mount *fm = ff->fm;
2798 	struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2799 	struct fuse_poll_out outarg;
2800 	FUSE_ARGS(args);
2801 	int err;
2802 
2803 	if (fm->fc->no_poll)
2804 		return DEFAULT_POLLMASK;
2805 
2806 	poll_wait(file, &ff->poll_wait, wait);
2807 	inarg.events = mangle_poll(poll_requested_events(wait));
2808 
2809 	/*
2810 	 * Ask for notification iff there's someone waiting for it.
2811 	 * The client may ignore the flag and always notify.
2812 	 */
2813 	if (waitqueue_active(&ff->poll_wait)) {
2814 		inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2815 		fuse_register_polled_file(fm->fc, ff);
2816 	}
2817 
2818 	args.opcode = FUSE_POLL;
2819 	args.nodeid = ff->nodeid;
2820 	args.in_numargs = 1;
2821 	args.in_args[0].size = sizeof(inarg);
2822 	args.in_args[0].value = &inarg;
2823 	args.out_numargs = 1;
2824 	args.out_args[0].size = sizeof(outarg);
2825 	args.out_args[0].value = &outarg;
2826 	err = fuse_simple_request(fm, &args);
2827 
2828 	if (!err)
2829 		return demangle_poll(outarg.revents);
2830 	if (err == -ENOSYS) {
2831 		fm->fc->no_poll = 1;
2832 		return DEFAULT_POLLMASK;
2833 	}
2834 	return EPOLLERR;
2835 }
2836 EXPORT_SYMBOL_GPL(fuse_file_poll);
2837 
2838 /*
2839  * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2840  * wakes up the poll waiters.
2841  */
2842 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2843 			    struct fuse_notify_poll_wakeup_out *outarg)
2844 {
2845 	u64 kh = outarg->kh;
2846 	struct rb_node **link;
2847 
2848 	spin_lock(&fc->lock);
2849 
2850 	link = fuse_find_polled_node(fc, kh, NULL);
2851 	if (*link) {
2852 		struct fuse_file *ff;
2853 
2854 		ff = rb_entry(*link, struct fuse_file, polled_node);
2855 		wake_up_interruptible_sync(&ff->poll_wait);
2856 	}
2857 
2858 	spin_unlock(&fc->lock);
2859 	return 0;
2860 }
2861 
2862 static void fuse_do_truncate(struct file *file)
2863 {
2864 	struct inode *inode = file->f_mapping->host;
2865 	struct iattr attr;
2866 
2867 	attr.ia_valid = ATTR_SIZE;
2868 	attr.ia_size = i_size_read(inode);
2869 
2870 	attr.ia_file = file;
2871 	attr.ia_valid |= ATTR_FILE;
2872 
2873 	fuse_do_setattr(file_dentry(file), &attr, file);
2874 }
2875 
2876 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
2877 {
2878 	return round_up(off, fc->max_pages << PAGE_SHIFT);
2879 }
2880 
2881 static ssize_t
2882 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2883 {
2884 	DECLARE_COMPLETION_ONSTACK(wait);
2885 	ssize_t ret = 0;
2886 	struct file *file = iocb->ki_filp;
2887 	struct fuse_file *ff = file->private_data;
2888 	loff_t pos = 0;
2889 	struct inode *inode;
2890 	loff_t i_size;
2891 	size_t count = iov_iter_count(iter), shortened = 0;
2892 	loff_t offset = iocb->ki_pos;
2893 	struct fuse_io_priv *io;
2894 
2895 	pos = offset;
2896 	inode = file->f_mapping->host;
2897 	i_size = i_size_read(inode);
2898 
2899 	if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
2900 		return 0;
2901 
2902 	io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2903 	if (!io)
2904 		return -ENOMEM;
2905 	spin_lock_init(&io->lock);
2906 	kref_init(&io->refcnt);
2907 	io->reqs = 1;
2908 	io->bytes = -1;
2909 	io->size = 0;
2910 	io->offset = offset;
2911 	io->write = (iov_iter_rw(iter) == WRITE);
2912 	io->err = 0;
2913 	/*
2914 	 * By default, we want to optimize all I/Os with async request
2915 	 * submission to the client filesystem if supported.
2916 	 */
2917 	io->async = ff->fm->fc->async_dio;
2918 	io->iocb = iocb;
2919 	io->blocking = is_sync_kiocb(iocb);
2920 
2921 	/* optimization for short read */
2922 	if (io->async && !io->write && offset + count > i_size) {
2923 		iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
2924 		shortened = count - iov_iter_count(iter);
2925 		count -= shortened;
2926 	}
2927 
2928 	/*
2929 	 * We cannot asynchronously extend the size of a file.
2930 	 * In such case the aio will behave exactly like sync io.
2931 	 */
2932 	if ((offset + count > i_size) && io->write)
2933 		io->blocking = true;
2934 
2935 	if (io->async && io->blocking) {
2936 		/*
2937 		 * Additional reference to keep io around after
2938 		 * calling fuse_aio_complete()
2939 		 */
2940 		kref_get(&io->refcnt);
2941 		io->done = &wait;
2942 	}
2943 
2944 	if (iov_iter_rw(iter) == WRITE) {
2945 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
2946 		fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
2947 	} else {
2948 		ret = __fuse_direct_read(io, iter, &pos);
2949 	}
2950 	iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
2951 
2952 	if (io->async) {
2953 		bool blocking = io->blocking;
2954 
2955 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2956 
2957 		/* we have a non-extending, async request, so return */
2958 		if (!blocking)
2959 			return -EIOCBQUEUED;
2960 
2961 		wait_for_completion(&wait);
2962 		ret = fuse_get_res_by_io(io);
2963 	}
2964 
2965 	kref_put(&io->refcnt, fuse_io_release);
2966 
2967 	if (iov_iter_rw(iter) == WRITE) {
2968 		fuse_write_update_attr(inode, pos, ret);
2969 		/* For extending writes we already hold exclusive lock */
2970 		if (ret < 0 && offset + count > i_size)
2971 			fuse_do_truncate(file);
2972 	}
2973 
2974 	return ret;
2975 }
2976 
2977 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
2978 {
2979 	int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
2980 
2981 	if (!err)
2982 		fuse_sync_writes(inode);
2983 
2984 	return err;
2985 }
2986 
2987 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2988 				loff_t length)
2989 {
2990 	struct fuse_file *ff = file->private_data;
2991 	struct inode *inode = file_inode(file);
2992 	struct fuse_inode *fi = get_fuse_inode(inode);
2993 	struct fuse_mount *fm = ff->fm;
2994 	FUSE_ARGS(args);
2995 	struct fuse_fallocate_in inarg = {
2996 		.fh = ff->fh,
2997 		.offset = offset,
2998 		.length = length,
2999 		.mode = mode
3000 	};
3001 	int err;
3002 	bool block_faults = FUSE_IS_DAX(inode) &&
3003 		(!(mode & FALLOC_FL_KEEP_SIZE) ||
3004 		 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
3005 
3006 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3007 		     FALLOC_FL_ZERO_RANGE))
3008 		return -EOPNOTSUPP;
3009 
3010 	if (fm->fc->no_fallocate)
3011 		return -EOPNOTSUPP;
3012 
3013 	inode_lock(inode);
3014 	if (block_faults) {
3015 		filemap_invalidate_lock(inode->i_mapping);
3016 		err = fuse_dax_break_layouts(inode, 0, 0);
3017 		if (err)
3018 			goto out;
3019 	}
3020 
3021 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3022 		loff_t endbyte = offset + length - 1;
3023 
3024 		err = fuse_writeback_range(inode, offset, endbyte);
3025 		if (err)
3026 			goto out;
3027 	}
3028 
3029 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3030 	    offset + length > i_size_read(inode)) {
3031 		err = inode_newsize_ok(inode, offset + length);
3032 		if (err)
3033 			goto out;
3034 	}
3035 
3036 	err = file_modified(file);
3037 	if (err)
3038 		goto out;
3039 
3040 	if (!(mode & FALLOC_FL_KEEP_SIZE))
3041 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3042 
3043 	args.opcode = FUSE_FALLOCATE;
3044 	args.nodeid = ff->nodeid;
3045 	args.in_numargs = 1;
3046 	args.in_args[0].size = sizeof(inarg);
3047 	args.in_args[0].value = &inarg;
3048 	err = fuse_simple_request(fm, &args);
3049 	if (err == -ENOSYS) {
3050 		fm->fc->no_fallocate = 1;
3051 		err = -EOPNOTSUPP;
3052 	}
3053 	if (err)
3054 		goto out;
3055 
3056 	/* we could have extended the file */
3057 	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3058 		if (fuse_write_update_attr(inode, offset + length, length))
3059 			file_update_time(file);
3060 	}
3061 
3062 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
3063 		truncate_pagecache_range(inode, offset, offset + length - 1);
3064 
3065 	fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
3066 
3067 out:
3068 	if (!(mode & FALLOC_FL_KEEP_SIZE))
3069 		clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3070 
3071 	if (block_faults)
3072 		filemap_invalidate_unlock(inode->i_mapping);
3073 
3074 	inode_unlock(inode);
3075 
3076 	fuse_flush_time_update(inode);
3077 
3078 	return err;
3079 }
3080 
3081 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3082 				      struct file *file_out, loff_t pos_out,
3083 				      size_t len, unsigned int flags)
3084 {
3085 	struct fuse_file *ff_in = file_in->private_data;
3086 	struct fuse_file *ff_out = file_out->private_data;
3087 	struct inode *inode_in = file_inode(file_in);
3088 	struct inode *inode_out = file_inode(file_out);
3089 	struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3090 	struct fuse_mount *fm = ff_in->fm;
3091 	struct fuse_conn *fc = fm->fc;
3092 	FUSE_ARGS(args);
3093 	struct fuse_copy_file_range_in inarg = {
3094 		.fh_in = ff_in->fh,
3095 		.off_in = pos_in,
3096 		.nodeid_out = ff_out->nodeid,
3097 		.fh_out = ff_out->fh,
3098 		.off_out = pos_out,
3099 		.len = len,
3100 		.flags = flags
3101 	};
3102 	struct fuse_write_out outarg;
3103 	ssize_t err;
3104 	/* mark unstable when write-back is not used, and file_out gets
3105 	 * extended */
3106 	bool is_unstable = (!fc->writeback_cache) &&
3107 			   ((pos_out + len) > inode_out->i_size);
3108 
3109 	if (fc->no_copy_file_range)
3110 		return -EOPNOTSUPP;
3111 
3112 	if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3113 		return -EXDEV;
3114 
3115 	inode_lock(inode_in);
3116 	err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3117 	inode_unlock(inode_in);
3118 	if (err)
3119 		return err;
3120 
3121 	inode_lock(inode_out);
3122 
3123 	err = file_modified(file_out);
3124 	if (err)
3125 		goto out;
3126 
3127 	/*
3128 	 * Write out dirty pages in the destination file before sending the COPY
3129 	 * request to userspace.  After the request is completed, truncate off
3130 	 * pages (including partial ones) from the cache that have been copied,
3131 	 * since these contain stale data at that point.
3132 	 *
3133 	 * This should be mostly correct, but if the COPY writes to partial
3134 	 * pages (at the start or end) and the parts not covered by the COPY are
3135 	 * written through a memory map after calling fuse_writeback_range(),
3136 	 * then these partial page modifications will be lost on truncation.
3137 	 *
3138 	 * It is unlikely that someone would rely on such mixed style
3139 	 * modifications.  Yet this does give less guarantees than if the
3140 	 * copying was performed with write(2).
3141 	 *
3142 	 * To fix this a mapping->invalidate_lock could be used to prevent new
3143 	 * faults while the copy is ongoing.
3144 	 */
3145 	err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3146 	if (err)
3147 		goto out;
3148 
3149 	if (is_unstable)
3150 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3151 
3152 	args.opcode = FUSE_COPY_FILE_RANGE;
3153 	args.nodeid = ff_in->nodeid;
3154 	args.in_numargs = 1;
3155 	args.in_args[0].size = sizeof(inarg);
3156 	args.in_args[0].value = &inarg;
3157 	args.out_numargs = 1;
3158 	args.out_args[0].size = sizeof(outarg);
3159 	args.out_args[0].value = &outarg;
3160 	err = fuse_simple_request(fm, &args);
3161 	if (err == -ENOSYS) {
3162 		fc->no_copy_file_range = 1;
3163 		err = -EOPNOTSUPP;
3164 	}
3165 	if (err)
3166 		goto out;
3167 
3168 	truncate_inode_pages_range(inode_out->i_mapping,
3169 				   ALIGN_DOWN(pos_out, PAGE_SIZE),
3170 				   ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3171 
3172 	file_update_time(file_out);
3173 	fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
3174 
3175 	err = outarg.size;
3176 out:
3177 	if (is_unstable)
3178 		clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3179 
3180 	inode_unlock(inode_out);
3181 	file_accessed(file_in);
3182 
3183 	fuse_flush_time_update(inode_out);
3184 
3185 	return err;
3186 }
3187 
3188 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3189 				    struct file *dst_file, loff_t dst_off,
3190 				    size_t len, unsigned int flags)
3191 {
3192 	ssize_t ret;
3193 
3194 	ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3195 				     len, flags);
3196 
3197 	if (ret == -EOPNOTSUPP || ret == -EXDEV)
3198 		ret = generic_copy_file_range(src_file, src_off, dst_file,
3199 					      dst_off, len, flags);
3200 	return ret;
3201 }
3202 
3203 static const struct file_operations fuse_file_operations = {
3204 	.llseek		= fuse_file_llseek,
3205 	.read_iter	= fuse_file_read_iter,
3206 	.write_iter	= fuse_file_write_iter,
3207 	.mmap		= fuse_file_mmap,
3208 	.open		= fuse_open,
3209 	.flush		= fuse_flush,
3210 	.release	= fuse_release,
3211 	.fsync		= fuse_fsync,
3212 	.lock		= fuse_file_lock,
3213 	.get_unmapped_area = thp_get_unmapped_area,
3214 	.flock		= fuse_file_flock,
3215 	.splice_read	= filemap_splice_read,
3216 	.splice_write	= iter_file_splice_write,
3217 	.unlocked_ioctl	= fuse_file_ioctl,
3218 	.compat_ioctl	= fuse_file_compat_ioctl,
3219 	.poll		= fuse_file_poll,
3220 	.fallocate	= fuse_file_fallocate,
3221 	.copy_file_range = fuse_copy_file_range,
3222 };
3223 
3224 static const struct address_space_operations fuse_file_aops  = {
3225 	.read_folio	= fuse_read_folio,
3226 	.readahead	= fuse_readahead,
3227 	.writepage	= fuse_writepage,
3228 	.writepages	= fuse_writepages,
3229 	.launder_folio	= fuse_launder_folio,
3230 	.dirty_folio	= filemap_dirty_folio,
3231 	.bmap		= fuse_bmap,
3232 	.direct_IO	= fuse_direct_IO,
3233 	.write_begin	= fuse_write_begin,
3234 	.write_end	= fuse_write_end,
3235 };
3236 
3237 void fuse_init_file_inode(struct inode *inode, unsigned int flags)
3238 {
3239 	struct fuse_inode *fi = get_fuse_inode(inode);
3240 
3241 	inode->i_fop = &fuse_file_operations;
3242 	inode->i_data.a_ops = &fuse_file_aops;
3243 
3244 	INIT_LIST_HEAD(&fi->write_files);
3245 	INIT_LIST_HEAD(&fi->queued_writes);
3246 	fi->writectr = 0;
3247 	init_waitqueue_head(&fi->page_waitq);
3248 	fi->writepages = RB_ROOT;
3249 
3250 	if (IS_ENABLED(CONFIG_FUSE_DAX))
3251 		fuse_dax_inode_init(inode, flags);
3252 }
3253