1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/signal.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/falloc.h> 19 #include <linux/uio.h> 20 #include <linux/fs.h> 21 #include <linux/filelock.h> 22 23 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, 24 unsigned int open_flags, int opcode, 25 struct fuse_open_out *outargp) 26 { 27 struct fuse_open_in inarg; 28 FUSE_ARGS(args); 29 30 memset(&inarg, 0, sizeof(inarg)); 31 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 32 if (!fm->fc->atomic_o_trunc) 33 inarg.flags &= ~O_TRUNC; 34 35 if (fm->fc->handle_killpriv_v2 && 36 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) { 37 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; 38 } 39 40 args.opcode = opcode; 41 args.nodeid = nodeid; 42 args.in_numargs = 1; 43 args.in_args[0].size = sizeof(inarg); 44 args.in_args[0].value = &inarg; 45 args.out_numargs = 1; 46 args.out_args[0].size = sizeof(*outargp); 47 args.out_args[0].value = outargp; 48 49 return fuse_simple_request(fm, &args); 50 } 51 52 struct fuse_release_args { 53 struct fuse_args args; 54 struct fuse_release_in inarg; 55 struct inode *inode; 56 }; 57 58 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) 59 { 60 struct fuse_file *ff; 61 62 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT); 63 if (unlikely(!ff)) 64 return NULL; 65 66 ff->fm = fm; 67 ff->release_args = kzalloc(sizeof(*ff->release_args), 68 GFP_KERNEL_ACCOUNT); 69 if (!ff->release_args) { 70 kfree(ff); 71 return NULL; 72 } 73 74 INIT_LIST_HEAD(&ff->write_entry); 75 mutex_init(&ff->readdir.lock); 76 refcount_set(&ff->count, 1); 77 RB_CLEAR_NODE(&ff->polled_node); 78 init_waitqueue_head(&ff->poll_wait); 79 80 ff->kh = atomic64_inc_return(&fm->fc->khctr); 81 82 return ff; 83 } 84 85 void fuse_file_free(struct fuse_file *ff) 86 { 87 kfree(ff->release_args); 88 mutex_destroy(&ff->readdir.lock); 89 kfree(ff); 90 } 91 92 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 93 { 94 refcount_inc(&ff->count); 95 return ff; 96 } 97 98 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, 99 int error) 100 { 101 struct fuse_release_args *ra = container_of(args, typeof(*ra), args); 102 103 iput(ra->inode); 104 kfree(ra); 105 } 106 107 static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) 108 { 109 if (refcount_dec_and_test(&ff->count)) { 110 struct fuse_args *args = &ff->release_args->args; 111 112 if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { 113 /* Do nothing when client does not implement 'open' */ 114 fuse_release_end(ff->fm, args, 0); 115 } else if (sync) { 116 fuse_simple_request(ff->fm, args); 117 fuse_release_end(ff->fm, args, 0); 118 } else { 119 args->end = fuse_release_end; 120 if (fuse_simple_background(ff->fm, args, 121 GFP_KERNEL | __GFP_NOFAIL)) 122 fuse_release_end(ff->fm, args, -ENOTCONN); 123 } 124 kfree(ff); 125 } 126 } 127 128 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, 129 unsigned int open_flags, bool isdir) 130 { 131 struct fuse_conn *fc = fm->fc; 132 struct fuse_file *ff; 133 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 134 135 ff = fuse_file_alloc(fm); 136 if (!ff) 137 return ERR_PTR(-ENOMEM); 138 139 ff->fh = 0; 140 /* Default for no-open */ 141 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); 142 if (isdir ? !fc->no_opendir : !fc->no_open) { 143 struct fuse_open_out outarg; 144 int err; 145 146 err = fuse_send_open(fm, nodeid, open_flags, opcode, &outarg); 147 if (!err) { 148 ff->fh = outarg.fh; 149 ff->open_flags = outarg.open_flags; 150 151 } else if (err != -ENOSYS) { 152 fuse_file_free(ff); 153 return ERR_PTR(err); 154 } else { 155 if (isdir) 156 fc->no_opendir = 1; 157 else 158 fc->no_open = 1; 159 } 160 } 161 162 if (isdir) 163 ff->open_flags &= ~FOPEN_DIRECT_IO; 164 165 ff->nodeid = nodeid; 166 167 return ff; 168 } 169 170 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file, 171 bool isdir) 172 { 173 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir); 174 175 if (!IS_ERR(ff)) 176 file->private_data = ff; 177 178 return PTR_ERR_OR_ZERO(ff); 179 } 180 EXPORT_SYMBOL_GPL(fuse_do_open); 181 182 static void fuse_link_write_file(struct file *file) 183 { 184 struct inode *inode = file_inode(file); 185 struct fuse_inode *fi = get_fuse_inode(inode); 186 struct fuse_file *ff = file->private_data; 187 /* 188 * file may be written through mmap, so chain it onto the 189 * inodes's write_file list 190 */ 191 spin_lock(&fi->lock); 192 if (list_empty(&ff->write_entry)) 193 list_add(&ff->write_entry, &fi->write_files); 194 spin_unlock(&fi->lock); 195 } 196 197 void fuse_finish_open(struct inode *inode, struct file *file) 198 { 199 struct fuse_file *ff = file->private_data; 200 struct fuse_conn *fc = get_fuse_conn(inode); 201 202 if (ff->open_flags & FOPEN_STREAM) 203 stream_open(inode, file); 204 else if (ff->open_flags & FOPEN_NONSEEKABLE) 205 nonseekable_open(inode, file); 206 207 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 208 struct fuse_inode *fi = get_fuse_inode(inode); 209 210 spin_lock(&fi->lock); 211 fi->attr_version = atomic64_inc_return(&fc->attr_version); 212 i_size_write(inode, 0); 213 spin_unlock(&fi->lock); 214 file_update_time(file); 215 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 216 } 217 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 218 fuse_link_write_file(file); 219 } 220 221 int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 222 { 223 struct fuse_mount *fm = get_fuse_mount(inode); 224 struct fuse_conn *fc = fm->fc; 225 int err; 226 bool is_wb_truncate = (file->f_flags & O_TRUNC) && 227 fc->atomic_o_trunc && 228 fc->writeback_cache; 229 bool dax_truncate = (file->f_flags & O_TRUNC) && 230 fc->atomic_o_trunc && FUSE_IS_DAX(inode); 231 232 if (fuse_is_bad(inode)) 233 return -EIO; 234 235 err = generic_file_open(inode, file); 236 if (err) 237 return err; 238 239 if (is_wb_truncate || dax_truncate) 240 inode_lock(inode); 241 242 if (dax_truncate) { 243 filemap_invalidate_lock(inode->i_mapping); 244 err = fuse_dax_break_layouts(inode, 0, 0); 245 if (err) 246 goto out_inode_unlock; 247 } 248 249 if (is_wb_truncate || dax_truncate) 250 fuse_set_nowrite(inode); 251 252 err = fuse_do_open(fm, get_node_id(inode), file, isdir); 253 if (!err) 254 fuse_finish_open(inode, file); 255 256 if (is_wb_truncate || dax_truncate) 257 fuse_release_nowrite(inode); 258 if (!err) { 259 struct fuse_file *ff = file->private_data; 260 261 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) 262 truncate_pagecache(inode, 0); 263 else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 264 invalidate_inode_pages2(inode->i_mapping); 265 } 266 if (dax_truncate) 267 filemap_invalidate_unlock(inode->i_mapping); 268 out_inode_unlock: 269 if (is_wb_truncate || dax_truncate) 270 inode_unlock(inode); 271 272 return err; 273 } 274 275 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, 276 unsigned int flags, int opcode) 277 { 278 struct fuse_conn *fc = ff->fm->fc; 279 struct fuse_release_args *ra = ff->release_args; 280 281 /* Inode is NULL on error path of fuse_create_open() */ 282 if (likely(fi)) { 283 spin_lock(&fi->lock); 284 list_del(&ff->write_entry); 285 spin_unlock(&fi->lock); 286 } 287 spin_lock(&fc->lock); 288 if (!RB_EMPTY_NODE(&ff->polled_node)) 289 rb_erase(&ff->polled_node, &fc->polled_files); 290 spin_unlock(&fc->lock); 291 292 wake_up_interruptible_all(&ff->poll_wait); 293 294 ra->inarg.fh = ff->fh; 295 ra->inarg.flags = flags; 296 ra->args.in_numargs = 1; 297 ra->args.in_args[0].size = sizeof(struct fuse_release_in); 298 ra->args.in_args[0].value = &ra->inarg; 299 ra->args.opcode = opcode; 300 ra->args.nodeid = ff->nodeid; 301 ra->args.force = true; 302 ra->args.nocreds = true; 303 } 304 305 void fuse_file_release(struct inode *inode, struct fuse_file *ff, 306 unsigned int open_flags, fl_owner_t id, bool isdir) 307 { 308 struct fuse_inode *fi = get_fuse_inode(inode); 309 struct fuse_release_args *ra = ff->release_args; 310 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 311 312 fuse_prepare_release(fi, ff, open_flags, opcode); 313 314 if (ff->flock) { 315 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 316 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); 317 } 318 /* Hold inode until release is finished */ 319 ra->inode = igrab(inode); 320 321 /* 322 * Normally this will send the RELEASE request, however if 323 * some asynchronous READ or WRITE requests are outstanding, 324 * the sending will be delayed. 325 * 326 * Make the release synchronous if this is a fuseblk mount, 327 * synchronous RELEASE is allowed (and desirable) in this case 328 * because the server can be trusted not to screw up. 329 */ 330 fuse_file_put(ff, ff->fm->fc->destroy, isdir); 331 } 332 333 void fuse_release_common(struct file *file, bool isdir) 334 { 335 fuse_file_release(file_inode(file), file->private_data, file->f_flags, 336 (fl_owner_t) file, isdir); 337 } 338 339 static int fuse_open(struct inode *inode, struct file *file) 340 { 341 return fuse_open_common(inode, file, false); 342 } 343 344 static int fuse_release(struct inode *inode, struct file *file) 345 { 346 struct fuse_conn *fc = get_fuse_conn(inode); 347 348 /* 349 * Dirty pages might remain despite write_inode_now() call from 350 * fuse_flush() due to writes racing with the close. 351 */ 352 if (fc->writeback_cache) 353 write_inode_now(inode, 1); 354 355 fuse_release_common(file, false); 356 357 /* return value is ignored by VFS */ 358 return 0; 359 } 360 361 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, 362 unsigned int flags) 363 { 364 WARN_ON(refcount_read(&ff->count) > 1); 365 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); 366 /* 367 * iput(NULL) is a no-op and since the refcount is 1 and everything's 368 * synchronous, we are fine with not doing igrab() here" 369 */ 370 fuse_file_put(ff, true, false); 371 } 372 EXPORT_SYMBOL_GPL(fuse_sync_release); 373 374 /* 375 * Scramble the ID space with XTEA, so that the value of the files_struct 376 * pointer is not exposed to userspace. 377 */ 378 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 379 { 380 u32 *k = fc->scramble_key; 381 u64 v = (unsigned long) id; 382 u32 v0 = v; 383 u32 v1 = v >> 32; 384 u32 sum = 0; 385 int i; 386 387 for (i = 0; i < 32; i++) { 388 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 389 sum += 0x9E3779B9; 390 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 391 } 392 393 return (u64) v0 + ((u64) v1 << 32); 394 } 395 396 struct fuse_writepage_args { 397 struct fuse_io_args ia; 398 struct rb_node writepages_entry; 399 struct list_head queue_entry; 400 struct fuse_writepage_args *next; 401 struct inode *inode; 402 struct fuse_sync_bucket *bucket; 403 }; 404 405 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, 406 pgoff_t idx_from, pgoff_t idx_to) 407 { 408 struct rb_node *n; 409 410 n = fi->writepages.rb_node; 411 412 while (n) { 413 struct fuse_writepage_args *wpa; 414 pgoff_t curr_index; 415 416 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); 417 WARN_ON(get_fuse_inode(wpa->inode) != fi); 418 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; 419 if (idx_from >= curr_index + wpa->ia.ap.num_pages) 420 n = n->rb_right; 421 else if (idx_to < curr_index) 422 n = n->rb_left; 423 else 424 return wpa; 425 } 426 return NULL; 427 } 428 429 /* 430 * Check if any page in a range is under writeback 431 * 432 * This is currently done by walking the list of writepage requests 433 * for the inode, which can be pretty inefficient. 434 */ 435 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, 436 pgoff_t idx_to) 437 { 438 struct fuse_inode *fi = get_fuse_inode(inode); 439 bool found; 440 441 spin_lock(&fi->lock); 442 found = fuse_find_writeback(fi, idx_from, idx_to); 443 spin_unlock(&fi->lock); 444 445 return found; 446 } 447 448 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 449 { 450 return fuse_range_is_writeback(inode, index, index); 451 } 452 453 /* 454 * Wait for page writeback to be completed. 455 * 456 * Since fuse doesn't rely on the VM writeback tracking, this has to 457 * use some other means. 458 */ 459 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 460 { 461 struct fuse_inode *fi = get_fuse_inode(inode); 462 463 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 464 } 465 466 /* 467 * Wait for all pending writepages on the inode to finish. 468 * 469 * This is currently done by blocking further writes with FUSE_NOWRITE 470 * and waiting for all sent writes to complete. 471 * 472 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 473 * could conflict with truncation. 474 */ 475 static void fuse_sync_writes(struct inode *inode) 476 { 477 fuse_set_nowrite(inode); 478 fuse_release_nowrite(inode); 479 } 480 481 static int fuse_flush(struct file *file, fl_owner_t id) 482 { 483 struct inode *inode = file_inode(file); 484 struct fuse_mount *fm = get_fuse_mount(inode); 485 struct fuse_file *ff = file->private_data; 486 struct fuse_flush_in inarg; 487 FUSE_ARGS(args); 488 int err; 489 490 if (fuse_is_bad(inode)) 491 return -EIO; 492 493 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache) 494 return 0; 495 496 err = write_inode_now(inode, 1); 497 if (err) 498 return err; 499 500 inode_lock(inode); 501 fuse_sync_writes(inode); 502 inode_unlock(inode); 503 504 err = filemap_check_errors(file->f_mapping); 505 if (err) 506 return err; 507 508 err = 0; 509 if (fm->fc->no_flush) 510 goto inval_attr_out; 511 512 memset(&inarg, 0, sizeof(inarg)); 513 inarg.fh = ff->fh; 514 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id); 515 args.opcode = FUSE_FLUSH; 516 args.nodeid = get_node_id(inode); 517 args.in_numargs = 1; 518 args.in_args[0].size = sizeof(inarg); 519 args.in_args[0].value = &inarg; 520 args.force = true; 521 522 err = fuse_simple_request(fm, &args); 523 if (err == -ENOSYS) { 524 fm->fc->no_flush = 1; 525 err = 0; 526 } 527 528 inval_attr_out: 529 /* 530 * In memory i_blocks is not maintained by fuse, if writeback cache is 531 * enabled, i_blocks from cached attr may not be accurate. 532 */ 533 if (!err && fm->fc->writeback_cache) 534 fuse_invalidate_attr_mask(inode, STATX_BLOCKS); 535 return err; 536 } 537 538 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 539 int datasync, int opcode) 540 { 541 struct inode *inode = file->f_mapping->host; 542 struct fuse_mount *fm = get_fuse_mount(inode); 543 struct fuse_file *ff = file->private_data; 544 FUSE_ARGS(args); 545 struct fuse_fsync_in inarg; 546 547 memset(&inarg, 0, sizeof(inarg)); 548 inarg.fh = ff->fh; 549 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0; 550 args.opcode = opcode; 551 args.nodeid = get_node_id(inode); 552 args.in_numargs = 1; 553 args.in_args[0].size = sizeof(inarg); 554 args.in_args[0].value = &inarg; 555 return fuse_simple_request(fm, &args); 556 } 557 558 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 559 int datasync) 560 { 561 struct inode *inode = file->f_mapping->host; 562 struct fuse_conn *fc = get_fuse_conn(inode); 563 int err; 564 565 if (fuse_is_bad(inode)) 566 return -EIO; 567 568 inode_lock(inode); 569 570 /* 571 * Start writeback against all dirty pages of the inode, then 572 * wait for all outstanding writes, before sending the FSYNC 573 * request. 574 */ 575 err = file_write_and_wait_range(file, start, end); 576 if (err) 577 goto out; 578 579 fuse_sync_writes(inode); 580 581 /* 582 * Due to implementation of fuse writeback 583 * file_write_and_wait_range() does not catch errors. 584 * We have to do this directly after fuse_sync_writes() 585 */ 586 err = file_check_and_advance_wb_err(file); 587 if (err) 588 goto out; 589 590 err = sync_inode_metadata(inode, 1); 591 if (err) 592 goto out; 593 594 if (fc->no_fsync) 595 goto out; 596 597 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); 598 if (err == -ENOSYS) { 599 fc->no_fsync = 1; 600 err = 0; 601 } 602 out: 603 inode_unlock(inode); 604 605 return err; 606 } 607 608 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, 609 size_t count, int opcode) 610 { 611 struct fuse_file *ff = file->private_data; 612 struct fuse_args *args = &ia->ap.args; 613 614 ia->read.in.fh = ff->fh; 615 ia->read.in.offset = pos; 616 ia->read.in.size = count; 617 ia->read.in.flags = file->f_flags; 618 args->opcode = opcode; 619 args->nodeid = ff->nodeid; 620 args->in_numargs = 1; 621 args->in_args[0].size = sizeof(ia->read.in); 622 args->in_args[0].value = &ia->read.in; 623 args->out_argvar = true; 624 args->out_numargs = 1; 625 args->out_args[0].size = count; 626 } 627 628 static void fuse_release_user_pages(struct fuse_args_pages *ap, 629 bool should_dirty) 630 { 631 unsigned int i; 632 633 for (i = 0; i < ap->num_pages; i++) { 634 if (should_dirty) 635 set_page_dirty_lock(ap->pages[i]); 636 put_page(ap->pages[i]); 637 } 638 } 639 640 static void fuse_io_release(struct kref *kref) 641 { 642 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 643 } 644 645 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 646 { 647 if (io->err) 648 return io->err; 649 650 if (io->bytes >= 0 && io->write) 651 return -EIO; 652 653 return io->bytes < 0 ? io->size : io->bytes; 654 } 655 656 /* 657 * In case of short read, the caller sets 'pos' to the position of 658 * actual end of fuse request in IO request. Otherwise, if bytes_requested 659 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 660 * 661 * An example: 662 * User requested DIO read of 64K. It was split into two 32K fuse requests, 663 * both submitted asynchronously. The first of them was ACKed by userspace as 664 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 665 * second request was ACKed as short, e.g. only 1K was read, resulting in 666 * pos == 33K. 667 * 668 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 669 * will be equal to the length of the longest contiguous fragment of 670 * transferred data starting from the beginning of IO request. 671 */ 672 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 673 { 674 int left; 675 676 spin_lock(&io->lock); 677 if (err) 678 io->err = io->err ? : err; 679 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 680 io->bytes = pos; 681 682 left = --io->reqs; 683 if (!left && io->blocking) 684 complete(io->done); 685 spin_unlock(&io->lock); 686 687 if (!left && !io->blocking) { 688 ssize_t res = fuse_get_res_by_io(io); 689 690 if (res >= 0) { 691 struct inode *inode = file_inode(io->iocb->ki_filp); 692 struct fuse_conn *fc = get_fuse_conn(inode); 693 struct fuse_inode *fi = get_fuse_inode(inode); 694 695 spin_lock(&fi->lock); 696 fi->attr_version = atomic64_inc_return(&fc->attr_version); 697 spin_unlock(&fi->lock); 698 } 699 700 io->iocb->ki_complete(io->iocb, res); 701 } 702 703 kref_put(&io->refcnt, fuse_io_release); 704 } 705 706 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, 707 unsigned int npages) 708 { 709 struct fuse_io_args *ia; 710 711 ia = kzalloc(sizeof(*ia), GFP_KERNEL); 712 if (ia) { 713 ia->io = io; 714 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, 715 &ia->ap.descs); 716 if (!ia->ap.pages) { 717 kfree(ia); 718 ia = NULL; 719 } 720 } 721 return ia; 722 } 723 724 static void fuse_io_free(struct fuse_io_args *ia) 725 { 726 kfree(ia->ap.pages); 727 kfree(ia); 728 } 729 730 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, 731 int err) 732 { 733 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 734 struct fuse_io_priv *io = ia->io; 735 ssize_t pos = -1; 736 737 fuse_release_user_pages(&ia->ap, io->should_dirty); 738 739 if (err) { 740 /* Nothing */ 741 } else if (io->write) { 742 if (ia->write.out.size > ia->write.in.size) { 743 err = -EIO; 744 } else if (ia->write.in.size != ia->write.out.size) { 745 pos = ia->write.in.offset - io->offset + 746 ia->write.out.size; 747 } 748 } else { 749 u32 outsize = args->out_args[0].size; 750 751 if (ia->read.in.size != outsize) 752 pos = ia->read.in.offset - io->offset + outsize; 753 } 754 755 fuse_aio_complete(io, err, pos); 756 fuse_io_free(ia); 757 } 758 759 static ssize_t fuse_async_req_send(struct fuse_mount *fm, 760 struct fuse_io_args *ia, size_t num_bytes) 761 { 762 ssize_t err; 763 struct fuse_io_priv *io = ia->io; 764 765 spin_lock(&io->lock); 766 kref_get(&io->refcnt); 767 io->size += num_bytes; 768 io->reqs++; 769 spin_unlock(&io->lock); 770 771 ia->ap.args.end = fuse_aio_complete_req; 772 ia->ap.args.may_block = io->should_dirty; 773 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL); 774 if (err) 775 fuse_aio_complete_req(fm, &ia->ap.args, err); 776 777 return num_bytes; 778 } 779 780 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, 781 fl_owner_t owner) 782 { 783 struct file *file = ia->io->iocb->ki_filp; 784 struct fuse_file *ff = file->private_data; 785 struct fuse_mount *fm = ff->fm; 786 787 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 788 if (owner != NULL) { 789 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER; 790 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner); 791 } 792 793 if (ia->io->async) 794 return fuse_async_req_send(fm, ia, count); 795 796 return fuse_simple_request(fm, &ia->ap.args); 797 } 798 799 static void fuse_read_update_size(struct inode *inode, loff_t size, 800 u64 attr_ver) 801 { 802 struct fuse_conn *fc = get_fuse_conn(inode); 803 struct fuse_inode *fi = get_fuse_inode(inode); 804 805 spin_lock(&fi->lock); 806 if (attr_ver >= fi->attr_version && size < inode->i_size && 807 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 808 fi->attr_version = atomic64_inc_return(&fc->attr_version); 809 i_size_write(inode, size); 810 } 811 spin_unlock(&fi->lock); 812 } 813 814 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, 815 struct fuse_args_pages *ap) 816 { 817 struct fuse_conn *fc = get_fuse_conn(inode); 818 819 /* 820 * If writeback_cache is enabled, a short read means there's a hole in 821 * the file. Some data after the hole is in page cache, but has not 822 * reached the client fs yet. So the hole is not present there. 823 */ 824 if (!fc->writeback_cache) { 825 loff_t pos = page_offset(ap->pages[0]) + num_read; 826 fuse_read_update_size(inode, pos, attr_ver); 827 } 828 } 829 830 static int fuse_do_readpage(struct file *file, struct page *page) 831 { 832 struct inode *inode = page->mapping->host; 833 struct fuse_mount *fm = get_fuse_mount(inode); 834 loff_t pos = page_offset(page); 835 struct fuse_page_desc desc = { .length = PAGE_SIZE }; 836 struct fuse_io_args ia = { 837 .ap.args.page_zeroing = true, 838 .ap.args.out_pages = true, 839 .ap.num_pages = 1, 840 .ap.pages = &page, 841 .ap.descs = &desc, 842 }; 843 ssize_t res; 844 u64 attr_ver; 845 846 /* 847 * Page writeback can extend beyond the lifetime of the 848 * page-cache page, so make sure we read a properly synced 849 * page. 850 */ 851 fuse_wait_on_page_writeback(inode, page->index); 852 853 attr_ver = fuse_get_attr_version(fm->fc); 854 855 /* Don't overflow end offset */ 856 if (pos + (desc.length - 1) == LLONG_MAX) 857 desc.length--; 858 859 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); 860 res = fuse_simple_request(fm, &ia.ap.args); 861 if (res < 0) 862 return res; 863 /* 864 * Short read means EOF. If file size is larger, truncate it 865 */ 866 if (res < desc.length) 867 fuse_short_read(inode, attr_ver, res, &ia.ap); 868 869 SetPageUptodate(page); 870 871 return 0; 872 } 873 874 static int fuse_read_folio(struct file *file, struct folio *folio) 875 { 876 struct page *page = &folio->page; 877 struct inode *inode = page->mapping->host; 878 int err; 879 880 err = -EIO; 881 if (fuse_is_bad(inode)) 882 goto out; 883 884 err = fuse_do_readpage(file, page); 885 fuse_invalidate_atime(inode); 886 out: 887 unlock_page(page); 888 return err; 889 } 890 891 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, 892 int err) 893 { 894 int i; 895 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 896 struct fuse_args_pages *ap = &ia->ap; 897 size_t count = ia->read.in.size; 898 size_t num_read = args->out_args[0].size; 899 struct address_space *mapping = NULL; 900 901 for (i = 0; mapping == NULL && i < ap->num_pages; i++) 902 mapping = ap->pages[i]->mapping; 903 904 if (mapping) { 905 struct inode *inode = mapping->host; 906 907 /* 908 * Short read means EOF. If file size is larger, truncate it 909 */ 910 if (!err && num_read < count) 911 fuse_short_read(inode, ia->read.attr_ver, num_read, ap); 912 913 fuse_invalidate_atime(inode); 914 } 915 916 for (i = 0; i < ap->num_pages; i++) { 917 struct page *page = ap->pages[i]; 918 919 if (!err) 920 SetPageUptodate(page); 921 else 922 SetPageError(page); 923 unlock_page(page); 924 put_page(page); 925 } 926 if (ia->ff) 927 fuse_file_put(ia->ff, false, false); 928 929 fuse_io_free(ia); 930 } 931 932 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) 933 { 934 struct fuse_file *ff = file->private_data; 935 struct fuse_mount *fm = ff->fm; 936 struct fuse_args_pages *ap = &ia->ap; 937 loff_t pos = page_offset(ap->pages[0]); 938 size_t count = ap->num_pages << PAGE_SHIFT; 939 ssize_t res; 940 int err; 941 942 ap->args.out_pages = true; 943 ap->args.page_zeroing = true; 944 ap->args.page_replace = true; 945 946 /* Don't overflow end offset */ 947 if (pos + (count - 1) == LLONG_MAX) { 948 count--; 949 ap->descs[ap->num_pages - 1].length--; 950 } 951 WARN_ON((loff_t) (pos + count) < 0); 952 953 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 954 ia->read.attr_ver = fuse_get_attr_version(fm->fc); 955 if (fm->fc->async_read) { 956 ia->ff = fuse_file_get(ff); 957 ap->args.end = fuse_readpages_end; 958 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL); 959 if (!err) 960 return; 961 } else { 962 res = fuse_simple_request(fm, &ap->args); 963 err = res < 0 ? res : 0; 964 } 965 fuse_readpages_end(fm, &ap->args, err); 966 } 967 968 static void fuse_readahead(struct readahead_control *rac) 969 { 970 struct inode *inode = rac->mapping->host; 971 struct fuse_conn *fc = get_fuse_conn(inode); 972 unsigned int i, max_pages, nr_pages = 0; 973 974 if (fuse_is_bad(inode)) 975 return; 976 977 max_pages = min_t(unsigned int, fc->max_pages, 978 fc->max_read / PAGE_SIZE); 979 980 for (;;) { 981 struct fuse_io_args *ia; 982 struct fuse_args_pages *ap; 983 984 if (fc->num_background >= fc->congestion_threshold && 985 rac->ra->async_size >= readahead_count(rac)) 986 /* 987 * Congested and only async pages left, so skip the 988 * rest. 989 */ 990 break; 991 992 nr_pages = readahead_count(rac) - nr_pages; 993 if (nr_pages > max_pages) 994 nr_pages = max_pages; 995 if (nr_pages == 0) 996 break; 997 ia = fuse_io_alloc(NULL, nr_pages); 998 if (!ia) 999 return; 1000 ap = &ia->ap; 1001 nr_pages = __readahead_batch(rac, ap->pages, nr_pages); 1002 for (i = 0; i < nr_pages; i++) { 1003 fuse_wait_on_page_writeback(inode, 1004 readahead_index(rac) + i); 1005 ap->descs[i].length = PAGE_SIZE; 1006 } 1007 ap->num_pages = nr_pages; 1008 fuse_send_readpages(ia, rac->file); 1009 } 1010 } 1011 1012 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) 1013 { 1014 struct inode *inode = iocb->ki_filp->f_mapping->host; 1015 struct fuse_conn *fc = get_fuse_conn(inode); 1016 1017 /* 1018 * In auto invalidate mode, always update attributes on read. 1019 * Otherwise, only update if we attempt to read past EOF (to ensure 1020 * i_size is up to date). 1021 */ 1022 if (fc->auto_inval_data || 1023 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 1024 int err; 1025 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE); 1026 if (err) 1027 return err; 1028 } 1029 1030 return generic_file_read_iter(iocb, to); 1031 } 1032 1033 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff, 1034 loff_t pos, size_t count) 1035 { 1036 struct fuse_args *args = &ia->ap.args; 1037 1038 ia->write.in.fh = ff->fh; 1039 ia->write.in.offset = pos; 1040 ia->write.in.size = count; 1041 args->opcode = FUSE_WRITE; 1042 args->nodeid = ff->nodeid; 1043 args->in_numargs = 2; 1044 if (ff->fm->fc->minor < 9) 1045 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 1046 else 1047 args->in_args[0].size = sizeof(ia->write.in); 1048 args->in_args[0].value = &ia->write.in; 1049 args->in_args[1].size = count; 1050 args->out_numargs = 1; 1051 args->out_args[0].size = sizeof(ia->write.out); 1052 args->out_args[0].value = &ia->write.out; 1053 } 1054 1055 static unsigned int fuse_write_flags(struct kiocb *iocb) 1056 { 1057 unsigned int flags = iocb->ki_filp->f_flags; 1058 1059 if (iocb_is_dsync(iocb)) 1060 flags |= O_DSYNC; 1061 if (iocb->ki_flags & IOCB_SYNC) 1062 flags |= O_SYNC; 1063 1064 return flags; 1065 } 1066 1067 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos, 1068 size_t count, fl_owner_t owner) 1069 { 1070 struct kiocb *iocb = ia->io->iocb; 1071 struct file *file = iocb->ki_filp; 1072 struct fuse_file *ff = file->private_data; 1073 struct fuse_mount *fm = ff->fm; 1074 struct fuse_write_in *inarg = &ia->write.in; 1075 ssize_t err; 1076 1077 fuse_write_args_fill(ia, ff, pos, count); 1078 inarg->flags = fuse_write_flags(iocb); 1079 if (owner != NULL) { 1080 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 1081 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner); 1082 } 1083 1084 if (ia->io->async) 1085 return fuse_async_req_send(fm, ia, count); 1086 1087 err = fuse_simple_request(fm, &ia->ap.args); 1088 if (!err && ia->write.out.size > count) 1089 err = -EIO; 1090 1091 return err ?: ia->write.out.size; 1092 } 1093 1094 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written) 1095 { 1096 struct fuse_conn *fc = get_fuse_conn(inode); 1097 struct fuse_inode *fi = get_fuse_inode(inode); 1098 bool ret = false; 1099 1100 spin_lock(&fi->lock); 1101 fi->attr_version = atomic64_inc_return(&fc->attr_version); 1102 if (written > 0 && pos > inode->i_size) { 1103 i_size_write(inode, pos); 1104 ret = true; 1105 } 1106 spin_unlock(&fi->lock); 1107 1108 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 1109 1110 return ret; 1111 } 1112 1113 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, 1114 struct kiocb *iocb, struct inode *inode, 1115 loff_t pos, size_t count) 1116 { 1117 struct fuse_args_pages *ap = &ia->ap; 1118 struct file *file = iocb->ki_filp; 1119 struct fuse_file *ff = file->private_data; 1120 struct fuse_mount *fm = ff->fm; 1121 unsigned int offset, i; 1122 bool short_write; 1123 int err; 1124 1125 for (i = 0; i < ap->num_pages; i++) 1126 fuse_wait_on_page_writeback(inode, ap->pages[i]->index); 1127 1128 fuse_write_args_fill(ia, ff, pos, count); 1129 ia->write.in.flags = fuse_write_flags(iocb); 1130 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID)) 1131 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1132 1133 err = fuse_simple_request(fm, &ap->args); 1134 if (!err && ia->write.out.size > count) 1135 err = -EIO; 1136 1137 short_write = ia->write.out.size < count; 1138 offset = ap->descs[0].offset; 1139 count = ia->write.out.size; 1140 for (i = 0; i < ap->num_pages; i++) { 1141 struct page *page = ap->pages[i]; 1142 1143 if (err) { 1144 ClearPageUptodate(page); 1145 } else { 1146 if (count >= PAGE_SIZE - offset) 1147 count -= PAGE_SIZE - offset; 1148 else { 1149 if (short_write) 1150 ClearPageUptodate(page); 1151 count = 0; 1152 } 1153 offset = 0; 1154 } 1155 if (ia->write.page_locked && (i == ap->num_pages - 1)) 1156 unlock_page(page); 1157 put_page(page); 1158 } 1159 1160 return err; 1161 } 1162 1163 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, 1164 struct address_space *mapping, 1165 struct iov_iter *ii, loff_t pos, 1166 unsigned int max_pages) 1167 { 1168 struct fuse_args_pages *ap = &ia->ap; 1169 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1170 unsigned offset = pos & (PAGE_SIZE - 1); 1171 size_t count = 0; 1172 int err; 1173 1174 ap->args.in_pages = true; 1175 ap->descs[0].offset = offset; 1176 1177 do { 1178 size_t tmp; 1179 struct page *page; 1180 pgoff_t index = pos >> PAGE_SHIFT; 1181 size_t bytes = min_t(size_t, PAGE_SIZE - offset, 1182 iov_iter_count(ii)); 1183 1184 bytes = min_t(size_t, bytes, fc->max_write - count); 1185 1186 again: 1187 err = -EFAULT; 1188 if (fault_in_iov_iter_readable(ii, bytes)) 1189 break; 1190 1191 err = -ENOMEM; 1192 page = grab_cache_page_write_begin(mapping, index); 1193 if (!page) 1194 break; 1195 1196 if (mapping_writably_mapped(mapping)) 1197 flush_dcache_page(page); 1198 1199 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii); 1200 flush_dcache_page(page); 1201 1202 if (!tmp) { 1203 unlock_page(page); 1204 put_page(page); 1205 goto again; 1206 } 1207 1208 err = 0; 1209 ap->pages[ap->num_pages] = page; 1210 ap->descs[ap->num_pages].length = tmp; 1211 ap->num_pages++; 1212 1213 count += tmp; 1214 pos += tmp; 1215 offset += tmp; 1216 if (offset == PAGE_SIZE) 1217 offset = 0; 1218 1219 /* If we copied full page, mark it uptodate */ 1220 if (tmp == PAGE_SIZE) 1221 SetPageUptodate(page); 1222 1223 if (PageUptodate(page)) { 1224 unlock_page(page); 1225 } else { 1226 ia->write.page_locked = true; 1227 break; 1228 } 1229 if (!fc->big_writes) 1230 break; 1231 } while (iov_iter_count(ii) && count < fc->max_write && 1232 ap->num_pages < max_pages && offset == 0); 1233 1234 return count > 0 ? count : err; 1235 } 1236 1237 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, 1238 unsigned int max_pages) 1239 { 1240 return min_t(unsigned int, 1241 ((pos + len - 1) >> PAGE_SHIFT) - 1242 (pos >> PAGE_SHIFT) + 1, 1243 max_pages); 1244 } 1245 1246 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) 1247 { 1248 struct address_space *mapping = iocb->ki_filp->f_mapping; 1249 struct inode *inode = mapping->host; 1250 struct fuse_conn *fc = get_fuse_conn(inode); 1251 struct fuse_inode *fi = get_fuse_inode(inode); 1252 loff_t pos = iocb->ki_pos; 1253 int err = 0; 1254 ssize_t res = 0; 1255 1256 if (inode->i_size < pos + iov_iter_count(ii)) 1257 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1258 1259 do { 1260 ssize_t count; 1261 struct fuse_io_args ia = {}; 1262 struct fuse_args_pages *ap = &ia.ap; 1263 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), 1264 fc->max_pages); 1265 1266 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); 1267 if (!ap->pages) { 1268 err = -ENOMEM; 1269 break; 1270 } 1271 1272 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages); 1273 if (count <= 0) { 1274 err = count; 1275 } else { 1276 err = fuse_send_write_pages(&ia, iocb, inode, 1277 pos, count); 1278 if (!err) { 1279 size_t num_written = ia.write.out.size; 1280 1281 res += num_written; 1282 pos += num_written; 1283 1284 /* break out of the loop on short write */ 1285 if (num_written != count) 1286 err = -EIO; 1287 } 1288 } 1289 kfree(ap->pages); 1290 } while (!err && iov_iter_count(ii)); 1291 1292 fuse_write_update_attr(inode, pos, res); 1293 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1294 1295 if (!res) 1296 return err; 1297 iocb->ki_pos += res; 1298 return res; 1299 } 1300 1301 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) 1302 { 1303 struct file *file = iocb->ki_filp; 1304 struct address_space *mapping = file->f_mapping; 1305 ssize_t written = 0; 1306 struct inode *inode = mapping->host; 1307 ssize_t err; 1308 struct fuse_conn *fc = get_fuse_conn(inode); 1309 1310 if (fc->writeback_cache) { 1311 /* Update size (EOF optimization) and mode (SUID clearing) */ 1312 err = fuse_update_attributes(mapping->host, file, 1313 STATX_SIZE | STATX_MODE); 1314 if (err) 1315 return err; 1316 1317 if (fc->handle_killpriv_v2 && 1318 setattr_should_drop_suidgid(&nop_mnt_idmap, 1319 file_inode(file))) { 1320 goto writethrough; 1321 } 1322 1323 return generic_file_write_iter(iocb, from); 1324 } 1325 1326 writethrough: 1327 inode_lock(inode); 1328 1329 err = generic_write_checks(iocb, from); 1330 if (err <= 0) 1331 goto out; 1332 1333 err = file_remove_privs(file); 1334 if (err) 1335 goto out; 1336 1337 err = file_update_time(file); 1338 if (err) 1339 goto out; 1340 1341 if (iocb->ki_flags & IOCB_DIRECT) { 1342 written = generic_file_direct_write(iocb, from); 1343 if (written < 0 || !iov_iter_count(from)) 1344 goto out; 1345 written = direct_write_fallback(iocb, from, written, 1346 fuse_perform_write(iocb, from)); 1347 } else { 1348 written = fuse_perform_write(iocb, from); 1349 } 1350 out: 1351 inode_unlock(inode); 1352 if (written > 0) 1353 written = generic_write_sync(iocb, written); 1354 1355 return written ? written : err; 1356 } 1357 1358 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1359 { 1360 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset; 1361 } 1362 1363 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1364 size_t max_size) 1365 { 1366 return min(iov_iter_single_seg_count(ii), max_size); 1367 } 1368 1369 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, 1370 size_t *nbytesp, int write, 1371 unsigned int max_pages) 1372 { 1373 size_t nbytes = 0; /* # bytes already packed in req */ 1374 ssize_t ret = 0; 1375 1376 /* Special case for kernel I/O: can copy directly into the buffer */ 1377 if (iov_iter_is_kvec(ii)) { 1378 unsigned long user_addr = fuse_get_user_addr(ii); 1379 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1380 1381 if (write) 1382 ap->args.in_args[1].value = (void *) user_addr; 1383 else 1384 ap->args.out_args[0].value = (void *) user_addr; 1385 1386 iov_iter_advance(ii, frag_size); 1387 *nbytesp = frag_size; 1388 return 0; 1389 } 1390 1391 while (nbytes < *nbytesp && ap->num_pages < max_pages) { 1392 unsigned npages; 1393 size_t start; 1394 ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages], 1395 *nbytesp - nbytes, 1396 max_pages - ap->num_pages, 1397 &start); 1398 if (ret < 0) 1399 break; 1400 1401 nbytes += ret; 1402 1403 ret += start; 1404 npages = DIV_ROUND_UP(ret, PAGE_SIZE); 1405 1406 ap->descs[ap->num_pages].offset = start; 1407 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); 1408 1409 ap->num_pages += npages; 1410 ap->descs[ap->num_pages - 1].length -= 1411 (PAGE_SIZE - ret) & (PAGE_SIZE - 1); 1412 } 1413 1414 ap->args.user_pages = true; 1415 if (write) 1416 ap->args.in_pages = true; 1417 else 1418 ap->args.out_pages = true; 1419 1420 *nbytesp = nbytes; 1421 1422 return ret < 0 ? ret : 0; 1423 } 1424 1425 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1426 loff_t *ppos, int flags) 1427 { 1428 int write = flags & FUSE_DIO_WRITE; 1429 int cuse = flags & FUSE_DIO_CUSE; 1430 struct file *file = io->iocb->ki_filp; 1431 struct address_space *mapping = file->f_mapping; 1432 struct inode *inode = mapping->host; 1433 struct fuse_file *ff = file->private_data; 1434 struct fuse_conn *fc = ff->fm->fc; 1435 size_t nmax = write ? fc->max_write : fc->max_read; 1436 loff_t pos = *ppos; 1437 size_t count = iov_iter_count(iter); 1438 pgoff_t idx_from = pos >> PAGE_SHIFT; 1439 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; 1440 ssize_t res = 0; 1441 int err = 0; 1442 struct fuse_io_args *ia; 1443 unsigned int max_pages; 1444 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO; 1445 1446 max_pages = iov_iter_npages(iter, fc->max_pages); 1447 ia = fuse_io_alloc(io, max_pages); 1448 if (!ia) 1449 return -ENOMEM; 1450 1451 if (fopen_direct_io && fc->direct_io_allow_mmap) { 1452 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); 1453 if (res) { 1454 fuse_io_free(ia); 1455 return res; 1456 } 1457 } 1458 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { 1459 if (!write) 1460 inode_lock(inode); 1461 fuse_sync_writes(inode); 1462 if (!write) 1463 inode_unlock(inode); 1464 } 1465 1466 if (fopen_direct_io && write) { 1467 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to); 1468 if (res) { 1469 fuse_io_free(ia); 1470 return res; 1471 } 1472 } 1473 1474 io->should_dirty = !write && user_backed_iter(iter); 1475 while (count) { 1476 ssize_t nres; 1477 fl_owner_t owner = current->files; 1478 size_t nbytes = min(count, nmax); 1479 1480 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, 1481 max_pages); 1482 if (err && !nbytes) 1483 break; 1484 1485 if (write) { 1486 if (!capable(CAP_FSETID)) 1487 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1488 1489 nres = fuse_send_write(ia, pos, nbytes, owner); 1490 } else { 1491 nres = fuse_send_read(ia, pos, nbytes, owner); 1492 } 1493 1494 if (!io->async || nres < 0) { 1495 fuse_release_user_pages(&ia->ap, io->should_dirty); 1496 fuse_io_free(ia); 1497 } 1498 ia = NULL; 1499 if (nres < 0) { 1500 iov_iter_revert(iter, nbytes); 1501 err = nres; 1502 break; 1503 } 1504 WARN_ON(nres > nbytes); 1505 1506 count -= nres; 1507 res += nres; 1508 pos += nres; 1509 if (nres != nbytes) { 1510 iov_iter_revert(iter, nbytes - nres); 1511 break; 1512 } 1513 if (count) { 1514 max_pages = iov_iter_npages(iter, fc->max_pages); 1515 ia = fuse_io_alloc(io, max_pages); 1516 if (!ia) 1517 break; 1518 } 1519 } 1520 if (ia) 1521 fuse_io_free(ia); 1522 if (res > 0) 1523 *ppos = pos; 1524 1525 return res > 0 ? res : err; 1526 } 1527 EXPORT_SYMBOL_GPL(fuse_direct_io); 1528 1529 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1530 struct iov_iter *iter, 1531 loff_t *ppos) 1532 { 1533 ssize_t res; 1534 struct inode *inode = file_inode(io->iocb->ki_filp); 1535 1536 res = fuse_direct_io(io, iter, ppos, 0); 1537 1538 fuse_invalidate_atime(inode); 1539 1540 return res; 1541 } 1542 1543 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 1544 1545 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1546 { 1547 ssize_t res; 1548 1549 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1550 res = fuse_direct_IO(iocb, to); 1551 } else { 1552 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1553 1554 res = __fuse_direct_read(&io, to, &iocb->ki_pos); 1555 } 1556 1557 return res; 1558 } 1559 1560 static bool fuse_direct_write_extending_i_size(struct kiocb *iocb, 1561 struct iov_iter *iter) 1562 { 1563 struct inode *inode = file_inode(iocb->ki_filp); 1564 1565 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); 1566 } 1567 1568 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1569 { 1570 struct inode *inode = file_inode(iocb->ki_filp); 1571 struct file *file = iocb->ki_filp; 1572 struct fuse_file *ff = file->private_data; 1573 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1574 ssize_t res; 1575 bool exclusive_lock = 1576 !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) || 1577 get_fuse_conn(inode)->direct_io_allow_mmap || 1578 iocb->ki_flags & IOCB_APPEND || 1579 fuse_direct_write_extending_i_size(iocb, from); 1580 1581 /* 1582 * Take exclusive lock if 1583 * - Parallel direct writes are disabled - a user space decision 1584 * - Parallel direct writes are enabled and i_size is being extended. 1585 * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP). 1586 * This might not be needed at all, but needs further investigation. 1587 */ 1588 if (exclusive_lock) 1589 inode_lock(inode); 1590 else { 1591 inode_lock_shared(inode); 1592 1593 /* A race with truncate might have come up as the decision for 1594 * the lock type was done without holding the lock, check again. 1595 */ 1596 if (fuse_direct_write_extending_i_size(iocb, from)) { 1597 inode_unlock_shared(inode); 1598 inode_lock(inode); 1599 exclusive_lock = true; 1600 } 1601 } 1602 1603 res = generic_write_checks(iocb, from); 1604 if (res > 0) { 1605 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1606 res = fuse_direct_IO(iocb, from); 1607 } else { 1608 res = fuse_direct_io(&io, from, &iocb->ki_pos, 1609 FUSE_DIO_WRITE); 1610 fuse_write_update_attr(inode, iocb->ki_pos, res); 1611 } 1612 } 1613 if (exclusive_lock) 1614 inode_unlock(inode); 1615 else 1616 inode_unlock_shared(inode); 1617 1618 return res; 1619 } 1620 1621 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1622 { 1623 struct file *file = iocb->ki_filp; 1624 struct fuse_file *ff = file->private_data; 1625 struct inode *inode = file_inode(file); 1626 1627 if (fuse_is_bad(inode)) 1628 return -EIO; 1629 1630 if (FUSE_IS_DAX(inode)) 1631 return fuse_dax_read_iter(iocb, to); 1632 1633 if (!(ff->open_flags & FOPEN_DIRECT_IO)) 1634 return fuse_cache_read_iter(iocb, to); 1635 else 1636 return fuse_direct_read_iter(iocb, to); 1637 } 1638 1639 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1640 { 1641 struct file *file = iocb->ki_filp; 1642 struct fuse_file *ff = file->private_data; 1643 struct inode *inode = file_inode(file); 1644 1645 if (fuse_is_bad(inode)) 1646 return -EIO; 1647 1648 if (FUSE_IS_DAX(inode)) 1649 return fuse_dax_write_iter(iocb, from); 1650 1651 if (!(ff->open_flags & FOPEN_DIRECT_IO)) 1652 return fuse_cache_write_iter(iocb, from); 1653 else 1654 return fuse_direct_write_iter(iocb, from); 1655 } 1656 1657 static void fuse_writepage_free(struct fuse_writepage_args *wpa) 1658 { 1659 struct fuse_args_pages *ap = &wpa->ia.ap; 1660 int i; 1661 1662 if (wpa->bucket) 1663 fuse_sync_bucket_dec(wpa->bucket); 1664 1665 for (i = 0; i < ap->num_pages; i++) 1666 __free_page(ap->pages[i]); 1667 1668 if (wpa->ia.ff) 1669 fuse_file_put(wpa->ia.ff, false, false); 1670 1671 kfree(ap->pages); 1672 kfree(wpa); 1673 } 1674 1675 static void fuse_writepage_finish(struct fuse_mount *fm, 1676 struct fuse_writepage_args *wpa) 1677 { 1678 struct fuse_args_pages *ap = &wpa->ia.ap; 1679 struct inode *inode = wpa->inode; 1680 struct fuse_inode *fi = get_fuse_inode(inode); 1681 struct backing_dev_info *bdi = inode_to_bdi(inode); 1682 int i; 1683 1684 for (i = 0; i < ap->num_pages; i++) { 1685 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1686 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); 1687 wb_writeout_inc(&bdi->wb); 1688 } 1689 wake_up(&fi->page_waitq); 1690 } 1691 1692 /* Called under fi->lock, may release and reacquire it */ 1693 static void fuse_send_writepage(struct fuse_mount *fm, 1694 struct fuse_writepage_args *wpa, loff_t size) 1695 __releases(fi->lock) 1696 __acquires(fi->lock) 1697 { 1698 struct fuse_writepage_args *aux, *next; 1699 struct fuse_inode *fi = get_fuse_inode(wpa->inode); 1700 struct fuse_write_in *inarg = &wpa->ia.write.in; 1701 struct fuse_args *args = &wpa->ia.ap.args; 1702 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; 1703 int err; 1704 1705 fi->writectr++; 1706 if (inarg->offset + data_size <= size) { 1707 inarg->size = data_size; 1708 } else if (inarg->offset < size) { 1709 inarg->size = size - inarg->offset; 1710 } else { 1711 /* Got truncated off completely */ 1712 goto out_free; 1713 } 1714 1715 args->in_args[1].size = inarg->size; 1716 args->force = true; 1717 args->nocreds = true; 1718 1719 err = fuse_simple_background(fm, args, GFP_ATOMIC); 1720 if (err == -ENOMEM) { 1721 spin_unlock(&fi->lock); 1722 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL); 1723 spin_lock(&fi->lock); 1724 } 1725 1726 /* Fails on broken connection only */ 1727 if (unlikely(err)) 1728 goto out_free; 1729 1730 return; 1731 1732 out_free: 1733 fi->writectr--; 1734 rb_erase(&wpa->writepages_entry, &fi->writepages); 1735 fuse_writepage_finish(fm, wpa); 1736 spin_unlock(&fi->lock); 1737 1738 /* After rb_erase() aux request list is private */ 1739 for (aux = wpa->next; aux; aux = next) { 1740 struct backing_dev_info *bdi = inode_to_bdi(aux->inode); 1741 1742 next = aux->next; 1743 aux->next = NULL; 1744 1745 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1746 dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); 1747 wb_writeout_inc(&bdi->wb); 1748 fuse_writepage_free(aux); 1749 } 1750 1751 fuse_writepage_free(wpa); 1752 spin_lock(&fi->lock); 1753 } 1754 1755 /* 1756 * If fi->writectr is positive (no truncate or fsync going on) send 1757 * all queued writepage requests. 1758 * 1759 * Called with fi->lock 1760 */ 1761 void fuse_flush_writepages(struct inode *inode) 1762 __releases(fi->lock) 1763 __acquires(fi->lock) 1764 { 1765 struct fuse_mount *fm = get_fuse_mount(inode); 1766 struct fuse_inode *fi = get_fuse_inode(inode); 1767 loff_t crop = i_size_read(inode); 1768 struct fuse_writepage_args *wpa; 1769 1770 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1771 wpa = list_entry(fi->queued_writes.next, 1772 struct fuse_writepage_args, queue_entry); 1773 list_del_init(&wpa->queue_entry); 1774 fuse_send_writepage(fm, wpa, crop); 1775 } 1776 } 1777 1778 static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, 1779 struct fuse_writepage_args *wpa) 1780 { 1781 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; 1782 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; 1783 struct rb_node **p = &root->rb_node; 1784 struct rb_node *parent = NULL; 1785 1786 WARN_ON(!wpa->ia.ap.num_pages); 1787 while (*p) { 1788 struct fuse_writepage_args *curr; 1789 pgoff_t curr_index; 1790 1791 parent = *p; 1792 curr = rb_entry(parent, struct fuse_writepage_args, 1793 writepages_entry); 1794 WARN_ON(curr->inode != wpa->inode); 1795 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; 1796 1797 if (idx_from >= curr_index + curr->ia.ap.num_pages) 1798 p = &(*p)->rb_right; 1799 else if (idx_to < curr_index) 1800 p = &(*p)->rb_left; 1801 else 1802 return curr; 1803 } 1804 1805 rb_link_node(&wpa->writepages_entry, parent, p); 1806 rb_insert_color(&wpa->writepages_entry, root); 1807 return NULL; 1808 } 1809 1810 static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) 1811 { 1812 WARN_ON(fuse_insert_writeback(root, wpa)); 1813 } 1814 1815 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, 1816 int error) 1817 { 1818 struct fuse_writepage_args *wpa = 1819 container_of(args, typeof(*wpa), ia.ap.args); 1820 struct inode *inode = wpa->inode; 1821 struct fuse_inode *fi = get_fuse_inode(inode); 1822 struct fuse_conn *fc = get_fuse_conn(inode); 1823 1824 mapping_set_error(inode->i_mapping, error); 1825 /* 1826 * A writeback finished and this might have updated mtime/ctime on 1827 * server making local mtime/ctime stale. Hence invalidate attrs. 1828 * Do this only if writeback_cache is not enabled. If writeback_cache 1829 * is enabled, we trust local ctime/mtime. 1830 */ 1831 if (!fc->writeback_cache) 1832 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY); 1833 spin_lock(&fi->lock); 1834 rb_erase(&wpa->writepages_entry, &fi->writepages); 1835 while (wpa->next) { 1836 struct fuse_mount *fm = get_fuse_mount(inode); 1837 struct fuse_write_in *inarg = &wpa->ia.write.in; 1838 struct fuse_writepage_args *next = wpa->next; 1839 1840 wpa->next = next->next; 1841 next->next = NULL; 1842 next->ia.ff = fuse_file_get(wpa->ia.ff); 1843 tree_insert(&fi->writepages, next); 1844 1845 /* 1846 * Skip fuse_flush_writepages() to make it easy to crop requests 1847 * based on primary request size. 1848 * 1849 * 1st case (trivial): there are no concurrent activities using 1850 * fuse_set/release_nowrite. Then we're on safe side because 1851 * fuse_flush_writepages() would call fuse_send_writepage() 1852 * anyway. 1853 * 1854 * 2nd case: someone called fuse_set_nowrite and it is waiting 1855 * now for completion of all in-flight requests. This happens 1856 * rarely and no more than once per page, so this should be 1857 * okay. 1858 * 1859 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle 1860 * of fuse_set_nowrite..fuse_release_nowrite section. The fact 1861 * that fuse_set_nowrite returned implies that all in-flight 1862 * requests were completed along with all of their secondary 1863 * requests. Further primary requests are blocked by negative 1864 * writectr. Hence there cannot be any in-flight requests and 1865 * no invocations of fuse_writepage_end() while we're in 1866 * fuse_set_nowrite..fuse_release_nowrite section. 1867 */ 1868 fuse_send_writepage(fm, next, inarg->offset + inarg->size); 1869 } 1870 fi->writectr--; 1871 fuse_writepage_finish(fm, wpa); 1872 spin_unlock(&fi->lock); 1873 fuse_writepage_free(wpa); 1874 } 1875 1876 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi) 1877 { 1878 struct fuse_file *ff; 1879 1880 spin_lock(&fi->lock); 1881 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file, 1882 write_entry); 1883 if (ff) 1884 fuse_file_get(ff); 1885 spin_unlock(&fi->lock); 1886 1887 return ff; 1888 } 1889 1890 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi) 1891 { 1892 struct fuse_file *ff = __fuse_write_file_get(fi); 1893 WARN_ON(!ff); 1894 return ff; 1895 } 1896 1897 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1898 { 1899 struct fuse_inode *fi = get_fuse_inode(inode); 1900 struct fuse_file *ff; 1901 int err; 1902 1903 /* 1904 * Inode is always written before the last reference is dropped and 1905 * hence this should not be reached from reclaim. 1906 * 1907 * Writing back the inode from reclaim can deadlock if the request 1908 * processing itself needs an allocation. Allocations triggering 1909 * reclaim while serving a request can't be prevented, because it can 1910 * involve any number of unrelated userspace processes. 1911 */ 1912 WARN_ON(wbc->for_reclaim); 1913 1914 ff = __fuse_write_file_get(fi); 1915 err = fuse_flush_times(inode, ff); 1916 if (ff) 1917 fuse_file_put(ff, false, false); 1918 1919 return err; 1920 } 1921 1922 static struct fuse_writepage_args *fuse_writepage_args_alloc(void) 1923 { 1924 struct fuse_writepage_args *wpa; 1925 struct fuse_args_pages *ap; 1926 1927 wpa = kzalloc(sizeof(*wpa), GFP_NOFS); 1928 if (wpa) { 1929 ap = &wpa->ia.ap; 1930 ap->num_pages = 0; 1931 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); 1932 if (!ap->pages) { 1933 kfree(wpa); 1934 wpa = NULL; 1935 } 1936 } 1937 return wpa; 1938 1939 } 1940 1941 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, 1942 struct fuse_writepage_args *wpa) 1943 { 1944 if (!fc->sync_fs) 1945 return; 1946 1947 rcu_read_lock(); 1948 /* Prevent resurrection of dead bucket in unlikely race with syncfs */ 1949 do { 1950 wpa->bucket = rcu_dereference(fc->curr_bucket); 1951 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count))); 1952 rcu_read_unlock(); 1953 } 1954 1955 static int fuse_writepage_locked(struct page *page) 1956 { 1957 struct address_space *mapping = page->mapping; 1958 struct inode *inode = mapping->host; 1959 struct fuse_conn *fc = get_fuse_conn(inode); 1960 struct fuse_inode *fi = get_fuse_inode(inode); 1961 struct fuse_writepage_args *wpa; 1962 struct fuse_args_pages *ap; 1963 struct page *tmp_page; 1964 int error = -ENOMEM; 1965 1966 set_page_writeback(page); 1967 1968 wpa = fuse_writepage_args_alloc(); 1969 if (!wpa) 1970 goto err; 1971 ap = &wpa->ia.ap; 1972 1973 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1974 if (!tmp_page) 1975 goto err_free; 1976 1977 error = -EIO; 1978 wpa->ia.ff = fuse_write_file_get(fi); 1979 if (!wpa->ia.ff) 1980 goto err_nofile; 1981 1982 fuse_writepage_add_to_bucket(fc, wpa); 1983 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0); 1984 1985 copy_highpage(tmp_page, page); 1986 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 1987 wpa->next = NULL; 1988 ap->args.in_pages = true; 1989 ap->num_pages = 1; 1990 ap->pages[0] = tmp_page; 1991 ap->descs[0].offset = 0; 1992 ap->descs[0].length = PAGE_SIZE; 1993 ap->args.end = fuse_writepage_end; 1994 wpa->inode = inode; 1995 1996 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1997 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 1998 1999 spin_lock(&fi->lock); 2000 tree_insert(&fi->writepages, wpa); 2001 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 2002 fuse_flush_writepages(inode); 2003 spin_unlock(&fi->lock); 2004 2005 end_page_writeback(page); 2006 2007 return 0; 2008 2009 err_nofile: 2010 __free_page(tmp_page); 2011 err_free: 2012 kfree(wpa); 2013 err: 2014 mapping_set_error(page->mapping, error); 2015 end_page_writeback(page); 2016 return error; 2017 } 2018 2019 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 2020 { 2021 struct fuse_conn *fc = get_fuse_conn(page->mapping->host); 2022 int err; 2023 2024 if (fuse_page_is_writeback(page->mapping->host, page->index)) { 2025 /* 2026 * ->writepages() should be called for sync() and friends. We 2027 * should only get here on direct reclaim and then we are 2028 * allowed to skip a page which is already in flight 2029 */ 2030 WARN_ON(wbc->sync_mode == WB_SYNC_ALL); 2031 2032 redirty_page_for_writepage(wbc, page); 2033 unlock_page(page); 2034 return 0; 2035 } 2036 2037 if (wbc->sync_mode == WB_SYNC_NONE && 2038 fc->num_background >= fc->congestion_threshold) 2039 return AOP_WRITEPAGE_ACTIVATE; 2040 2041 err = fuse_writepage_locked(page); 2042 unlock_page(page); 2043 2044 return err; 2045 } 2046 2047 struct fuse_fill_wb_data { 2048 struct fuse_writepage_args *wpa; 2049 struct fuse_file *ff; 2050 struct inode *inode; 2051 struct page **orig_pages; 2052 unsigned int max_pages; 2053 }; 2054 2055 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) 2056 { 2057 struct fuse_args_pages *ap = &data->wpa->ia.ap; 2058 struct fuse_conn *fc = get_fuse_conn(data->inode); 2059 struct page **pages; 2060 struct fuse_page_desc *descs; 2061 unsigned int npages = min_t(unsigned int, 2062 max_t(unsigned int, data->max_pages * 2, 2063 FUSE_DEFAULT_MAX_PAGES_PER_REQ), 2064 fc->max_pages); 2065 WARN_ON(npages <= data->max_pages); 2066 2067 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); 2068 if (!pages) 2069 return false; 2070 2071 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); 2072 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); 2073 kfree(ap->pages); 2074 ap->pages = pages; 2075 ap->descs = descs; 2076 data->max_pages = npages; 2077 2078 return true; 2079 } 2080 2081 static void fuse_writepages_send(struct fuse_fill_wb_data *data) 2082 { 2083 struct fuse_writepage_args *wpa = data->wpa; 2084 struct inode *inode = data->inode; 2085 struct fuse_inode *fi = get_fuse_inode(inode); 2086 int num_pages = wpa->ia.ap.num_pages; 2087 int i; 2088 2089 wpa->ia.ff = fuse_file_get(data->ff); 2090 spin_lock(&fi->lock); 2091 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 2092 fuse_flush_writepages(inode); 2093 spin_unlock(&fi->lock); 2094 2095 for (i = 0; i < num_pages; i++) 2096 end_page_writeback(data->orig_pages[i]); 2097 } 2098 2099 /* 2100 * Check under fi->lock if the page is under writeback, and insert it onto the 2101 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's 2102 * one already added for a page at this offset. If there's none, then insert 2103 * this new request onto the auxiliary list, otherwise reuse the existing one by 2104 * swapping the new temp page with the old one. 2105 */ 2106 static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, 2107 struct page *page) 2108 { 2109 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); 2110 struct fuse_writepage_args *tmp; 2111 struct fuse_writepage_args *old_wpa; 2112 struct fuse_args_pages *new_ap = &new_wpa->ia.ap; 2113 2114 WARN_ON(new_ap->num_pages != 0); 2115 new_ap->num_pages = 1; 2116 2117 spin_lock(&fi->lock); 2118 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); 2119 if (!old_wpa) { 2120 spin_unlock(&fi->lock); 2121 return true; 2122 } 2123 2124 for (tmp = old_wpa->next; tmp; tmp = tmp->next) { 2125 pgoff_t curr_index; 2126 2127 WARN_ON(tmp->inode != new_wpa->inode); 2128 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; 2129 if (curr_index == page->index) { 2130 WARN_ON(tmp->ia.ap.num_pages != 1); 2131 swap(tmp->ia.ap.pages[0], new_ap->pages[0]); 2132 break; 2133 } 2134 } 2135 2136 if (!tmp) { 2137 new_wpa->next = old_wpa->next; 2138 old_wpa->next = new_wpa; 2139 } 2140 2141 spin_unlock(&fi->lock); 2142 2143 if (tmp) { 2144 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); 2145 2146 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 2147 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); 2148 wb_writeout_inc(&bdi->wb); 2149 fuse_writepage_free(new_wpa); 2150 } 2151 2152 return false; 2153 } 2154 2155 static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, 2156 struct fuse_args_pages *ap, 2157 struct fuse_fill_wb_data *data) 2158 { 2159 WARN_ON(!ap->num_pages); 2160 2161 /* 2162 * Being under writeback is unlikely but possible. For example direct 2163 * read to an mmaped fuse file will set the page dirty twice; once when 2164 * the pages are faulted with get_user_pages(), and then after the read 2165 * completed. 2166 */ 2167 if (fuse_page_is_writeback(data->inode, page->index)) 2168 return true; 2169 2170 /* Reached max pages */ 2171 if (ap->num_pages == fc->max_pages) 2172 return true; 2173 2174 /* Reached max write bytes */ 2175 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) 2176 return true; 2177 2178 /* Discontinuity */ 2179 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) 2180 return true; 2181 2182 /* Need to grow the pages array? If so, did the expansion fail? */ 2183 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) 2184 return true; 2185 2186 return false; 2187 } 2188 2189 static int fuse_writepages_fill(struct folio *folio, 2190 struct writeback_control *wbc, void *_data) 2191 { 2192 struct fuse_fill_wb_data *data = _data; 2193 struct fuse_writepage_args *wpa = data->wpa; 2194 struct fuse_args_pages *ap = &wpa->ia.ap; 2195 struct inode *inode = data->inode; 2196 struct fuse_inode *fi = get_fuse_inode(inode); 2197 struct fuse_conn *fc = get_fuse_conn(inode); 2198 struct page *tmp_page; 2199 int err; 2200 2201 if (!data->ff) { 2202 err = -EIO; 2203 data->ff = fuse_write_file_get(fi); 2204 if (!data->ff) 2205 goto out_unlock; 2206 } 2207 2208 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { 2209 fuse_writepages_send(data); 2210 data->wpa = NULL; 2211 } 2212 2213 err = -ENOMEM; 2214 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2215 if (!tmp_page) 2216 goto out_unlock; 2217 2218 /* 2219 * The page must not be redirtied until the writeout is completed 2220 * (i.e. userspace has sent a reply to the write request). Otherwise 2221 * there could be more than one temporary page instance for each real 2222 * page. 2223 * 2224 * This is ensured by holding the page lock in page_mkwrite() while 2225 * checking fuse_page_is_writeback(). We already hold the page lock 2226 * since clear_page_dirty_for_io() and keep it held until we add the 2227 * request to the fi->writepages list and increment ap->num_pages. 2228 * After this fuse_page_is_writeback() will indicate that the page is 2229 * under writeback, so we can release the page lock. 2230 */ 2231 if (data->wpa == NULL) { 2232 err = -ENOMEM; 2233 wpa = fuse_writepage_args_alloc(); 2234 if (!wpa) { 2235 __free_page(tmp_page); 2236 goto out_unlock; 2237 } 2238 fuse_writepage_add_to_bucket(fc, wpa); 2239 2240 data->max_pages = 1; 2241 2242 ap = &wpa->ia.ap; 2243 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0); 2244 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 2245 wpa->next = NULL; 2246 ap->args.in_pages = true; 2247 ap->args.end = fuse_writepage_end; 2248 ap->num_pages = 0; 2249 wpa->inode = inode; 2250 } 2251 folio_start_writeback(folio); 2252 2253 copy_highpage(tmp_page, &folio->page); 2254 ap->pages[ap->num_pages] = tmp_page; 2255 ap->descs[ap->num_pages].offset = 0; 2256 ap->descs[ap->num_pages].length = PAGE_SIZE; 2257 data->orig_pages[ap->num_pages] = &folio->page; 2258 2259 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 2260 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 2261 2262 err = 0; 2263 if (data->wpa) { 2264 /* 2265 * Protected by fi->lock against concurrent access by 2266 * fuse_page_is_writeback(). 2267 */ 2268 spin_lock(&fi->lock); 2269 ap->num_pages++; 2270 spin_unlock(&fi->lock); 2271 } else if (fuse_writepage_add(wpa, &folio->page)) { 2272 data->wpa = wpa; 2273 } else { 2274 folio_end_writeback(folio); 2275 } 2276 out_unlock: 2277 folio_unlock(folio); 2278 2279 return err; 2280 } 2281 2282 static int fuse_writepages(struct address_space *mapping, 2283 struct writeback_control *wbc) 2284 { 2285 struct inode *inode = mapping->host; 2286 struct fuse_conn *fc = get_fuse_conn(inode); 2287 struct fuse_fill_wb_data data; 2288 int err; 2289 2290 err = -EIO; 2291 if (fuse_is_bad(inode)) 2292 goto out; 2293 2294 if (wbc->sync_mode == WB_SYNC_NONE && 2295 fc->num_background >= fc->congestion_threshold) 2296 return 0; 2297 2298 data.inode = inode; 2299 data.wpa = NULL; 2300 data.ff = NULL; 2301 2302 err = -ENOMEM; 2303 data.orig_pages = kcalloc(fc->max_pages, 2304 sizeof(struct page *), 2305 GFP_NOFS); 2306 if (!data.orig_pages) 2307 goto out; 2308 2309 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 2310 if (data.wpa) { 2311 WARN_ON(!data.wpa->ia.ap.num_pages); 2312 fuse_writepages_send(&data); 2313 } 2314 if (data.ff) 2315 fuse_file_put(data.ff, false, false); 2316 2317 kfree(data.orig_pages); 2318 out: 2319 return err; 2320 } 2321 2322 /* 2323 * It's worthy to make sure that space is reserved on disk for the write, 2324 * but how to implement it without killing performance need more thinking. 2325 */ 2326 static int fuse_write_begin(struct file *file, struct address_space *mapping, 2327 loff_t pos, unsigned len, struct page **pagep, void **fsdata) 2328 { 2329 pgoff_t index = pos >> PAGE_SHIFT; 2330 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 2331 struct page *page; 2332 loff_t fsize; 2333 int err = -ENOMEM; 2334 2335 WARN_ON(!fc->writeback_cache); 2336 2337 page = grab_cache_page_write_begin(mapping, index); 2338 if (!page) 2339 goto error; 2340 2341 fuse_wait_on_page_writeback(mapping->host, page->index); 2342 2343 if (PageUptodate(page) || len == PAGE_SIZE) 2344 goto success; 2345 /* 2346 * Check if the start this page comes after the end of file, in which 2347 * case the readpage can be optimized away. 2348 */ 2349 fsize = i_size_read(mapping->host); 2350 if (fsize <= (pos & PAGE_MASK)) { 2351 size_t off = pos & ~PAGE_MASK; 2352 if (off) 2353 zero_user_segment(page, 0, off); 2354 goto success; 2355 } 2356 err = fuse_do_readpage(file, page); 2357 if (err) 2358 goto cleanup; 2359 success: 2360 *pagep = page; 2361 return 0; 2362 2363 cleanup: 2364 unlock_page(page); 2365 put_page(page); 2366 error: 2367 return err; 2368 } 2369 2370 static int fuse_write_end(struct file *file, struct address_space *mapping, 2371 loff_t pos, unsigned len, unsigned copied, 2372 struct page *page, void *fsdata) 2373 { 2374 struct inode *inode = page->mapping->host; 2375 2376 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 2377 if (!copied) 2378 goto unlock; 2379 2380 pos += copied; 2381 if (!PageUptodate(page)) { 2382 /* Zero any unwritten bytes at the end of the page */ 2383 size_t endoff = pos & ~PAGE_MASK; 2384 if (endoff) 2385 zero_user_segment(page, endoff, PAGE_SIZE); 2386 SetPageUptodate(page); 2387 } 2388 2389 if (pos > inode->i_size) 2390 i_size_write(inode, pos); 2391 2392 set_page_dirty(page); 2393 2394 unlock: 2395 unlock_page(page); 2396 put_page(page); 2397 2398 return copied; 2399 } 2400 2401 static int fuse_launder_folio(struct folio *folio) 2402 { 2403 int err = 0; 2404 if (folio_clear_dirty_for_io(folio)) { 2405 struct inode *inode = folio->mapping->host; 2406 2407 /* Serialize with pending writeback for the same page */ 2408 fuse_wait_on_page_writeback(inode, folio->index); 2409 err = fuse_writepage_locked(&folio->page); 2410 if (!err) 2411 fuse_wait_on_page_writeback(inode, folio->index); 2412 } 2413 return err; 2414 } 2415 2416 /* 2417 * Write back dirty data/metadata now (there may not be any suitable 2418 * open files later for data) 2419 */ 2420 static void fuse_vma_close(struct vm_area_struct *vma) 2421 { 2422 int err; 2423 2424 err = write_inode_now(vma->vm_file->f_mapping->host, 1); 2425 mapping_set_error(vma->vm_file->f_mapping, err); 2426 } 2427 2428 /* 2429 * Wait for writeback against this page to complete before allowing it 2430 * to be marked dirty again, and hence written back again, possibly 2431 * before the previous writepage completed. 2432 * 2433 * Block here, instead of in ->writepage(), so that the userspace fs 2434 * can only block processes actually operating on the filesystem. 2435 * 2436 * Otherwise unprivileged userspace fs would be able to block 2437 * unrelated: 2438 * 2439 * - page migration 2440 * - sync(2) 2441 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2442 */ 2443 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) 2444 { 2445 struct page *page = vmf->page; 2446 struct inode *inode = file_inode(vmf->vma->vm_file); 2447 2448 file_update_time(vmf->vma->vm_file); 2449 lock_page(page); 2450 if (page->mapping != inode->i_mapping) { 2451 unlock_page(page); 2452 return VM_FAULT_NOPAGE; 2453 } 2454 2455 fuse_wait_on_page_writeback(inode, page->index); 2456 return VM_FAULT_LOCKED; 2457 } 2458 2459 static const struct vm_operations_struct fuse_file_vm_ops = { 2460 .close = fuse_vma_close, 2461 .fault = filemap_fault, 2462 .map_pages = filemap_map_pages, 2463 .page_mkwrite = fuse_page_mkwrite, 2464 }; 2465 2466 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2467 { 2468 struct fuse_file *ff = file->private_data; 2469 struct fuse_conn *fc = ff->fm->fc; 2470 2471 /* DAX mmap is superior to direct_io mmap */ 2472 if (FUSE_IS_DAX(file_inode(file))) 2473 return fuse_dax_mmap(file, vma); 2474 2475 if (ff->open_flags & FOPEN_DIRECT_IO) { 2476 /* 2477 * Can't provide the coherency needed for MAP_SHARED 2478 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. 2479 */ 2480 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) 2481 return -ENODEV; 2482 2483 invalidate_inode_pages2(file->f_mapping); 2484 2485 if (!(vma->vm_flags & VM_MAYSHARE)) { 2486 /* MAP_PRIVATE */ 2487 return generic_file_mmap(file, vma); 2488 } 2489 } 2490 2491 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2492 fuse_link_write_file(file); 2493 2494 file_accessed(file); 2495 vma->vm_ops = &fuse_file_vm_ops; 2496 return 0; 2497 } 2498 2499 static int convert_fuse_file_lock(struct fuse_conn *fc, 2500 const struct fuse_file_lock *ffl, 2501 struct file_lock *fl) 2502 { 2503 switch (ffl->type) { 2504 case F_UNLCK: 2505 break; 2506 2507 case F_RDLCK: 2508 case F_WRLCK: 2509 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2510 ffl->end < ffl->start) 2511 return -EIO; 2512 2513 fl->fl_start = ffl->start; 2514 fl->fl_end = ffl->end; 2515 2516 /* 2517 * Convert pid into init's pid namespace. The locks API will 2518 * translate it into the caller's pid namespace. 2519 */ 2520 rcu_read_lock(); 2521 fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); 2522 rcu_read_unlock(); 2523 break; 2524 2525 default: 2526 return -EIO; 2527 } 2528 fl->fl_type = ffl->type; 2529 return 0; 2530 } 2531 2532 static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2533 const struct file_lock *fl, int opcode, pid_t pid, 2534 int flock, struct fuse_lk_in *inarg) 2535 { 2536 struct inode *inode = file_inode(file); 2537 struct fuse_conn *fc = get_fuse_conn(inode); 2538 struct fuse_file *ff = file->private_data; 2539 2540 memset(inarg, 0, sizeof(*inarg)); 2541 inarg->fh = ff->fh; 2542 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 2543 inarg->lk.start = fl->fl_start; 2544 inarg->lk.end = fl->fl_end; 2545 inarg->lk.type = fl->fl_type; 2546 inarg->lk.pid = pid; 2547 if (flock) 2548 inarg->lk_flags |= FUSE_LK_FLOCK; 2549 args->opcode = opcode; 2550 args->nodeid = get_node_id(inode); 2551 args->in_numargs = 1; 2552 args->in_args[0].size = sizeof(*inarg); 2553 args->in_args[0].value = inarg; 2554 } 2555 2556 static int fuse_getlk(struct file *file, struct file_lock *fl) 2557 { 2558 struct inode *inode = file_inode(file); 2559 struct fuse_mount *fm = get_fuse_mount(inode); 2560 FUSE_ARGS(args); 2561 struct fuse_lk_in inarg; 2562 struct fuse_lk_out outarg; 2563 int err; 2564 2565 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2566 args.out_numargs = 1; 2567 args.out_args[0].size = sizeof(outarg); 2568 args.out_args[0].value = &outarg; 2569 err = fuse_simple_request(fm, &args); 2570 if (!err) 2571 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl); 2572 2573 return err; 2574 } 2575 2576 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2577 { 2578 struct inode *inode = file_inode(file); 2579 struct fuse_mount *fm = get_fuse_mount(inode); 2580 FUSE_ARGS(args); 2581 struct fuse_lk_in inarg; 2582 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2583 struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; 2584 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns); 2585 int err; 2586 2587 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2588 /* NLM needs asynchronous locks, which we don't support yet */ 2589 return -ENOLCK; 2590 } 2591 2592 /* Unlock on close is handled by the flush method */ 2593 if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) 2594 return 0; 2595 2596 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); 2597 err = fuse_simple_request(fm, &args); 2598 2599 /* locking is restartable */ 2600 if (err == -EINTR) 2601 err = -ERESTARTSYS; 2602 2603 return err; 2604 } 2605 2606 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2607 { 2608 struct inode *inode = file_inode(file); 2609 struct fuse_conn *fc = get_fuse_conn(inode); 2610 int err; 2611 2612 if (cmd == F_CANCELLK) { 2613 err = 0; 2614 } else if (cmd == F_GETLK) { 2615 if (fc->no_lock) { 2616 posix_test_lock(file, fl); 2617 err = 0; 2618 } else 2619 err = fuse_getlk(file, fl); 2620 } else { 2621 if (fc->no_lock) 2622 err = posix_lock_file(file, fl, NULL); 2623 else 2624 err = fuse_setlk(file, fl, 0); 2625 } 2626 return err; 2627 } 2628 2629 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2630 { 2631 struct inode *inode = file_inode(file); 2632 struct fuse_conn *fc = get_fuse_conn(inode); 2633 int err; 2634 2635 if (fc->no_flock) { 2636 err = locks_lock_file_wait(file, fl); 2637 } else { 2638 struct fuse_file *ff = file->private_data; 2639 2640 /* emulate flock with POSIX locks */ 2641 ff->flock = true; 2642 err = fuse_setlk(file, fl, 1); 2643 } 2644 2645 return err; 2646 } 2647 2648 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2649 { 2650 struct inode *inode = mapping->host; 2651 struct fuse_mount *fm = get_fuse_mount(inode); 2652 FUSE_ARGS(args); 2653 struct fuse_bmap_in inarg; 2654 struct fuse_bmap_out outarg; 2655 int err; 2656 2657 if (!inode->i_sb->s_bdev || fm->fc->no_bmap) 2658 return 0; 2659 2660 memset(&inarg, 0, sizeof(inarg)); 2661 inarg.block = block; 2662 inarg.blocksize = inode->i_sb->s_blocksize; 2663 args.opcode = FUSE_BMAP; 2664 args.nodeid = get_node_id(inode); 2665 args.in_numargs = 1; 2666 args.in_args[0].size = sizeof(inarg); 2667 args.in_args[0].value = &inarg; 2668 args.out_numargs = 1; 2669 args.out_args[0].size = sizeof(outarg); 2670 args.out_args[0].value = &outarg; 2671 err = fuse_simple_request(fm, &args); 2672 if (err == -ENOSYS) 2673 fm->fc->no_bmap = 1; 2674 2675 return err ? 0 : outarg.block; 2676 } 2677 2678 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) 2679 { 2680 struct inode *inode = file->f_mapping->host; 2681 struct fuse_mount *fm = get_fuse_mount(inode); 2682 struct fuse_file *ff = file->private_data; 2683 FUSE_ARGS(args); 2684 struct fuse_lseek_in inarg = { 2685 .fh = ff->fh, 2686 .offset = offset, 2687 .whence = whence 2688 }; 2689 struct fuse_lseek_out outarg; 2690 int err; 2691 2692 if (fm->fc->no_lseek) 2693 goto fallback; 2694 2695 args.opcode = FUSE_LSEEK; 2696 args.nodeid = ff->nodeid; 2697 args.in_numargs = 1; 2698 args.in_args[0].size = sizeof(inarg); 2699 args.in_args[0].value = &inarg; 2700 args.out_numargs = 1; 2701 args.out_args[0].size = sizeof(outarg); 2702 args.out_args[0].value = &outarg; 2703 err = fuse_simple_request(fm, &args); 2704 if (err) { 2705 if (err == -ENOSYS) { 2706 fm->fc->no_lseek = 1; 2707 goto fallback; 2708 } 2709 return err; 2710 } 2711 2712 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); 2713 2714 fallback: 2715 err = fuse_update_attributes(inode, file, STATX_SIZE); 2716 if (!err) 2717 return generic_file_llseek(file, offset, whence); 2718 else 2719 return err; 2720 } 2721 2722 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2723 { 2724 loff_t retval; 2725 struct inode *inode = file_inode(file); 2726 2727 switch (whence) { 2728 case SEEK_SET: 2729 case SEEK_CUR: 2730 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2731 retval = generic_file_llseek(file, offset, whence); 2732 break; 2733 case SEEK_END: 2734 inode_lock(inode); 2735 retval = fuse_update_attributes(inode, file, STATX_SIZE); 2736 if (!retval) 2737 retval = generic_file_llseek(file, offset, whence); 2738 inode_unlock(inode); 2739 break; 2740 case SEEK_HOLE: 2741 case SEEK_DATA: 2742 inode_lock(inode); 2743 retval = fuse_lseek(file, offset, whence); 2744 inode_unlock(inode); 2745 break; 2746 default: 2747 retval = -EINVAL; 2748 } 2749 2750 return retval; 2751 } 2752 2753 /* 2754 * All files which have been polled are linked to RB tree 2755 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2756 * find the matching one. 2757 */ 2758 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2759 struct rb_node **parent_out) 2760 { 2761 struct rb_node **link = &fc->polled_files.rb_node; 2762 struct rb_node *last = NULL; 2763 2764 while (*link) { 2765 struct fuse_file *ff; 2766 2767 last = *link; 2768 ff = rb_entry(last, struct fuse_file, polled_node); 2769 2770 if (kh < ff->kh) 2771 link = &last->rb_left; 2772 else if (kh > ff->kh) 2773 link = &last->rb_right; 2774 else 2775 return link; 2776 } 2777 2778 if (parent_out) 2779 *parent_out = last; 2780 return link; 2781 } 2782 2783 /* 2784 * The file is about to be polled. Make sure it's on the polled_files 2785 * RB tree. Note that files once added to the polled_files tree are 2786 * not removed before the file is released. This is because a file 2787 * polled once is likely to be polled again. 2788 */ 2789 static void fuse_register_polled_file(struct fuse_conn *fc, 2790 struct fuse_file *ff) 2791 { 2792 spin_lock(&fc->lock); 2793 if (RB_EMPTY_NODE(&ff->polled_node)) { 2794 struct rb_node **link, *parent; 2795 2796 link = fuse_find_polled_node(fc, ff->kh, &parent); 2797 BUG_ON(*link); 2798 rb_link_node(&ff->polled_node, parent, link); 2799 rb_insert_color(&ff->polled_node, &fc->polled_files); 2800 } 2801 spin_unlock(&fc->lock); 2802 } 2803 2804 __poll_t fuse_file_poll(struct file *file, poll_table *wait) 2805 { 2806 struct fuse_file *ff = file->private_data; 2807 struct fuse_mount *fm = ff->fm; 2808 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2809 struct fuse_poll_out outarg; 2810 FUSE_ARGS(args); 2811 int err; 2812 2813 if (fm->fc->no_poll) 2814 return DEFAULT_POLLMASK; 2815 2816 poll_wait(file, &ff->poll_wait, wait); 2817 inarg.events = mangle_poll(poll_requested_events(wait)); 2818 2819 /* 2820 * Ask for notification iff there's someone waiting for it. 2821 * The client may ignore the flag and always notify. 2822 */ 2823 if (waitqueue_active(&ff->poll_wait)) { 2824 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2825 fuse_register_polled_file(fm->fc, ff); 2826 } 2827 2828 args.opcode = FUSE_POLL; 2829 args.nodeid = ff->nodeid; 2830 args.in_numargs = 1; 2831 args.in_args[0].size = sizeof(inarg); 2832 args.in_args[0].value = &inarg; 2833 args.out_numargs = 1; 2834 args.out_args[0].size = sizeof(outarg); 2835 args.out_args[0].value = &outarg; 2836 err = fuse_simple_request(fm, &args); 2837 2838 if (!err) 2839 return demangle_poll(outarg.revents); 2840 if (err == -ENOSYS) { 2841 fm->fc->no_poll = 1; 2842 return DEFAULT_POLLMASK; 2843 } 2844 return EPOLLERR; 2845 } 2846 EXPORT_SYMBOL_GPL(fuse_file_poll); 2847 2848 /* 2849 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2850 * wakes up the poll waiters. 2851 */ 2852 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2853 struct fuse_notify_poll_wakeup_out *outarg) 2854 { 2855 u64 kh = outarg->kh; 2856 struct rb_node **link; 2857 2858 spin_lock(&fc->lock); 2859 2860 link = fuse_find_polled_node(fc, kh, NULL); 2861 if (*link) { 2862 struct fuse_file *ff; 2863 2864 ff = rb_entry(*link, struct fuse_file, polled_node); 2865 wake_up_interruptible_sync(&ff->poll_wait); 2866 } 2867 2868 spin_unlock(&fc->lock); 2869 return 0; 2870 } 2871 2872 static void fuse_do_truncate(struct file *file) 2873 { 2874 struct inode *inode = file->f_mapping->host; 2875 struct iattr attr; 2876 2877 attr.ia_valid = ATTR_SIZE; 2878 attr.ia_size = i_size_read(inode); 2879 2880 attr.ia_file = file; 2881 attr.ia_valid |= ATTR_FILE; 2882 2883 fuse_do_setattr(file_dentry(file), &attr, file); 2884 } 2885 2886 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) 2887 { 2888 return round_up(off, fc->max_pages << PAGE_SHIFT); 2889 } 2890 2891 static ssize_t 2892 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2893 { 2894 DECLARE_COMPLETION_ONSTACK(wait); 2895 ssize_t ret = 0; 2896 struct file *file = iocb->ki_filp; 2897 struct fuse_file *ff = file->private_data; 2898 loff_t pos = 0; 2899 struct inode *inode; 2900 loff_t i_size; 2901 size_t count = iov_iter_count(iter), shortened = 0; 2902 loff_t offset = iocb->ki_pos; 2903 struct fuse_io_priv *io; 2904 2905 pos = offset; 2906 inode = file->f_mapping->host; 2907 i_size = i_size_read(inode); 2908 2909 if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) 2910 return 0; 2911 2912 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2913 if (!io) 2914 return -ENOMEM; 2915 spin_lock_init(&io->lock); 2916 kref_init(&io->refcnt); 2917 io->reqs = 1; 2918 io->bytes = -1; 2919 io->size = 0; 2920 io->offset = offset; 2921 io->write = (iov_iter_rw(iter) == WRITE); 2922 io->err = 0; 2923 /* 2924 * By default, we want to optimize all I/Os with async request 2925 * submission to the client filesystem if supported. 2926 */ 2927 io->async = ff->fm->fc->async_dio; 2928 io->iocb = iocb; 2929 io->blocking = is_sync_kiocb(iocb); 2930 2931 /* optimization for short read */ 2932 if (io->async && !io->write && offset + count > i_size) { 2933 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset)); 2934 shortened = count - iov_iter_count(iter); 2935 count -= shortened; 2936 } 2937 2938 /* 2939 * We cannot asynchronously extend the size of a file. 2940 * In such case the aio will behave exactly like sync io. 2941 */ 2942 if ((offset + count > i_size) && io->write) 2943 io->blocking = true; 2944 2945 if (io->async && io->blocking) { 2946 /* 2947 * Additional reference to keep io around after 2948 * calling fuse_aio_complete() 2949 */ 2950 kref_get(&io->refcnt); 2951 io->done = &wait; 2952 } 2953 2954 if (iov_iter_rw(iter) == WRITE) { 2955 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 2956 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 2957 } else { 2958 ret = __fuse_direct_read(io, iter, &pos); 2959 } 2960 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); 2961 2962 if (io->async) { 2963 bool blocking = io->blocking; 2964 2965 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2966 2967 /* we have a non-extending, async request, so return */ 2968 if (!blocking) 2969 return -EIOCBQUEUED; 2970 2971 wait_for_completion(&wait); 2972 ret = fuse_get_res_by_io(io); 2973 } 2974 2975 kref_put(&io->refcnt, fuse_io_release); 2976 2977 if (iov_iter_rw(iter) == WRITE) { 2978 fuse_write_update_attr(inode, pos, ret); 2979 /* For extending writes we already hold exclusive lock */ 2980 if (ret < 0 && offset + count > i_size) 2981 fuse_do_truncate(file); 2982 } 2983 2984 return ret; 2985 } 2986 2987 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) 2988 { 2989 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); 2990 2991 if (!err) 2992 fuse_sync_writes(inode); 2993 2994 return err; 2995 } 2996 2997 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2998 loff_t length) 2999 { 3000 struct fuse_file *ff = file->private_data; 3001 struct inode *inode = file_inode(file); 3002 struct fuse_inode *fi = get_fuse_inode(inode); 3003 struct fuse_mount *fm = ff->fm; 3004 FUSE_ARGS(args); 3005 struct fuse_fallocate_in inarg = { 3006 .fh = ff->fh, 3007 .offset = offset, 3008 .length = length, 3009 .mode = mode 3010 }; 3011 int err; 3012 bool block_faults = FUSE_IS_DAX(inode) && 3013 (!(mode & FALLOC_FL_KEEP_SIZE) || 3014 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))); 3015 3016 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3017 FALLOC_FL_ZERO_RANGE)) 3018 return -EOPNOTSUPP; 3019 3020 if (fm->fc->no_fallocate) 3021 return -EOPNOTSUPP; 3022 3023 inode_lock(inode); 3024 if (block_faults) { 3025 filemap_invalidate_lock(inode->i_mapping); 3026 err = fuse_dax_break_layouts(inode, 0, 0); 3027 if (err) 3028 goto out; 3029 } 3030 3031 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) { 3032 loff_t endbyte = offset + length - 1; 3033 3034 err = fuse_writeback_range(inode, offset, endbyte); 3035 if (err) 3036 goto out; 3037 } 3038 3039 if (!(mode & FALLOC_FL_KEEP_SIZE) && 3040 offset + length > i_size_read(inode)) { 3041 err = inode_newsize_ok(inode, offset + length); 3042 if (err) 3043 goto out; 3044 } 3045 3046 err = file_modified(file); 3047 if (err) 3048 goto out; 3049 3050 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3051 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3052 3053 args.opcode = FUSE_FALLOCATE; 3054 args.nodeid = ff->nodeid; 3055 args.in_numargs = 1; 3056 args.in_args[0].size = sizeof(inarg); 3057 args.in_args[0].value = &inarg; 3058 err = fuse_simple_request(fm, &args); 3059 if (err == -ENOSYS) { 3060 fm->fc->no_fallocate = 1; 3061 err = -EOPNOTSUPP; 3062 } 3063 if (err) 3064 goto out; 3065 3066 /* we could have extended the file */ 3067 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3068 if (fuse_write_update_attr(inode, offset + length, length)) 3069 file_update_time(file); 3070 } 3071 3072 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) 3073 truncate_pagecache_range(inode, offset, offset + length - 1); 3074 3075 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 3076 3077 out: 3078 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3079 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3080 3081 if (block_faults) 3082 filemap_invalidate_unlock(inode->i_mapping); 3083 3084 inode_unlock(inode); 3085 3086 fuse_flush_time_update(inode); 3087 3088 return err; 3089 } 3090 3091 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, 3092 struct file *file_out, loff_t pos_out, 3093 size_t len, unsigned int flags) 3094 { 3095 struct fuse_file *ff_in = file_in->private_data; 3096 struct fuse_file *ff_out = file_out->private_data; 3097 struct inode *inode_in = file_inode(file_in); 3098 struct inode *inode_out = file_inode(file_out); 3099 struct fuse_inode *fi_out = get_fuse_inode(inode_out); 3100 struct fuse_mount *fm = ff_in->fm; 3101 struct fuse_conn *fc = fm->fc; 3102 FUSE_ARGS(args); 3103 struct fuse_copy_file_range_in inarg = { 3104 .fh_in = ff_in->fh, 3105 .off_in = pos_in, 3106 .nodeid_out = ff_out->nodeid, 3107 .fh_out = ff_out->fh, 3108 .off_out = pos_out, 3109 .len = len, 3110 .flags = flags 3111 }; 3112 struct fuse_write_out outarg; 3113 ssize_t err; 3114 /* mark unstable when write-back is not used, and file_out gets 3115 * extended */ 3116 bool is_unstable = (!fc->writeback_cache) && 3117 ((pos_out + len) > inode_out->i_size); 3118 3119 if (fc->no_copy_file_range) 3120 return -EOPNOTSUPP; 3121 3122 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) 3123 return -EXDEV; 3124 3125 inode_lock(inode_in); 3126 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); 3127 inode_unlock(inode_in); 3128 if (err) 3129 return err; 3130 3131 inode_lock(inode_out); 3132 3133 err = file_modified(file_out); 3134 if (err) 3135 goto out; 3136 3137 /* 3138 * Write out dirty pages in the destination file before sending the COPY 3139 * request to userspace. After the request is completed, truncate off 3140 * pages (including partial ones) from the cache that have been copied, 3141 * since these contain stale data at that point. 3142 * 3143 * This should be mostly correct, but if the COPY writes to partial 3144 * pages (at the start or end) and the parts not covered by the COPY are 3145 * written through a memory map after calling fuse_writeback_range(), 3146 * then these partial page modifications will be lost on truncation. 3147 * 3148 * It is unlikely that someone would rely on such mixed style 3149 * modifications. Yet this does give less guarantees than if the 3150 * copying was performed with write(2). 3151 * 3152 * To fix this a mapping->invalidate_lock could be used to prevent new 3153 * faults while the copy is ongoing. 3154 */ 3155 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); 3156 if (err) 3157 goto out; 3158 3159 if (is_unstable) 3160 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3161 3162 args.opcode = FUSE_COPY_FILE_RANGE; 3163 args.nodeid = ff_in->nodeid; 3164 args.in_numargs = 1; 3165 args.in_args[0].size = sizeof(inarg); 3166 args.in_args[0].value = &inarg; 3167 args.out_numargs = 1; 3168 args.out_args[0].size = sizeof(outarg); 3169 args.out_args[0].value = &outarg; 3170 err = fuse_simple_request(fm, &args); 3171 if (err == -ENOSYS) { 3172 fc->no_copy_file_range = 1; 3173 err = -EOPNOTSUPP; 3174 } 3175 if (err) 3176 goto out; 3177 3178 truncate_inode_pages_range(inode_out->i_mapping, 3179 ALIGN_DOWN(pos_out, PAGE_SIZE), 3180 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); 3181 3182 file_update_time(file_out); 3183 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size); 3184 3185 err = outarg.size; 3186 out: 3187 if (is_unstable) 3188 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3189 3190 inode_unlock(inode_out); 3191 file_accessed(file_in); 3192 3193 fuse_flush_time_update(inode_out); 3194 3195 return err; 3196 } 3197 3198 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off, 3199 struct file *dst_file, loff_t dst_off, 3200 size_t len, unsigned int flags) 3201 { 3202 ssize_t ret; 3203 3204 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off, 3205 len, flags); 3206 3207 if (ret == -EOPNOTSUPP || ret == -EXDEV) 3208 ret = generic_copy_file_range(src_file, src_off, dst_file, 3209 dst_off, len, flags); 3210 return ret; 3211 } 3212 3213 static const struct file_operations fuse_file_operations = { 3214 .llseek = fuse_file_llseek, 3215 .read_iter = fuse_file_read_iter, 3216 .write_iter = fuse_file_write_iter, 3217 .mmap = fuse_file_mmap, 3218 .open = fuse_open, 3219 .flush = fuse_flush, 3220 .release = fuse_release, 3221 .fsync = fuse_fsync, 3222 .lock = fuse_file_lock, 3223 .get_unmapped_area = thp_get_unmapped_area, 3224 .flock = fuse_file_flock, 3225 .splice_read = filemap_splice_read, 3226 .splice_write = iter_file_splice_write, 3227 .unlocked_ioctl = fuse_file_ioctl, 3228 .compat_ioctl = fuse_file_compat_ioctl, 3229 .poll = fuse_file_poll, 3230 .fallocate = fuse_file_fallocate, 3231 .copy_file_range = fuse_copy_file_range, 3232 }; 3233 3234 static const struct address_space_operations fuse_file_aops = { 3235 .read_folio = fuse_read_folio, 3236 .readahead = fuse_readahead, 3237 .writepage = fuse_writepage, 3238 .writepages = fuse_writepages, 3239 .launder_folio = fuse_launder_folio, 3240 .dirty_folio = filemap_dirty_folio, 3241 .bmap = fuse_bmap, 3242 .direct_IO = fuse_direct_IO, 3243 .write_begin = fuse_write_begin, 3244 .write_end = fuse_write_end, 3245 }; 3246 3247 void fuse_init_file_inode(struct inode *inode, unsigned int flags) 3248 { 3249 struct fuse_inode *fi = get_fuse_inode(inode); 3250 3251 inode->i_fop = &fuse_file_operations; 3252 inode->i_data.a_ops = &fuse_file_aops; 3253 3254 INIT_LIST_HEAD(&fi->write_files); 3255 INIT_LIST_HEAD(&fi->queued_writes); 3256 fi->writectr = 0; 3257 init_waitqueue_head(&fi->page_waitq); 3258 fi->writepages = RB_ROOT; 3259 3260 if (IS_ENABLED(CONFIG_FUSE_DAX)) 3261 fuse_dax_inode_init(inode, flags); 3262 } 3263