1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 16 static const struct file_operations fuse_direct_io_file_operations; 17 18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 19 struct fuse_open_out *outargp) 20 { 21 struct fuse_conn *fc = get_fuse_conn(inode); 22 struct fuse_open_in inarg; 23 struct fuse_req *req; 24 int err; 25 26 req = fuse_get_req(fc); 27 if (IS_ERR(req)) 28 return PTR_ERR(req); 29 30 memset(&inarg, 0, sizeof(inarg)); 31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 32 if (!fc->atomic_o_trunc) 33 inarg.flags &= ~O_TRUNC; 34 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 35 req->in.h.nodeid = get_node_id(inode); 36 req->in.numargs = 1; 37 req->in.args[0].size = sizeof(inarg); 38 req->in.args[0].value = &inarg; 39 req->out.numargs = 1; 40 req->out.args[0].size = sizeof(*outargp); 41 req->out.args[0].value = outargp; 42 fuse_request_send(fc, req); 43 err = req->out.h.error; 44 fuse_put_request(fc, req); 45 46 return err; 47 } 48 49 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 50 { 51 struct fuse_file *ff; 52 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 53 if (ff) { 54 ff->reserved_req = fuse_request_alloc(); 55 if (!ff->reserved_req) { 56 kfree(ff); 57 return NULL; 58 } else { 59 INIT_LIST_HEAD(&ff->write_entry); 60 atomic_set(&ff->count, 0); 61 spin_lock(&fc->lock); 62 ff->kh = ++fc->khctr; 63 spin_unlock(&fc->lock); 64 } 65 RB_CLEAR_NODE(&ff->polled_node); 66 init_waitqueue_head(&ff->poll_wait); 67 } 68 return ff; 69 } 70 71 void fuse_file_free(struct fuse_file *ff) 72 { 73 fuse_request_free(ff->reserved_req); 74 kfree(ff); 75 } 76 77 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 78 { 79 atomic_inc(&ff->count); 80 return ff; 81 } 82 83 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 84 { 85 dput(req->misc.release.dentry); 86 mntput(req->misc.release.vfsmount); 87 } 88 89 static void fuse_file_put(struct fuse_file *ff) 90 { 91 if (atomic_dec_and_test(&ff->count)) { 92 struct fuse_req *req = ff->reserved_req; 93 struct inode *inode = req->misc.release.dentry->d_inode; 94 struct fuse_conn *fc = get_fuse_conn(inode); 95 req->end = fuse_release_end; 96 fuse_request_send_background(fc, req); 97 kfree(ff); 98 } 99 } 100 101 void fuse_finish_open(struct inode *inode, struct file *file, 102 struct fuse_file *ff, struct fuse_open_out *outarg) 103 { 104 if (outarg->open_flags & FOPEN_DIRECT_IO) 105 file->f_op = &fuse_direct_io_file_operations; 106 if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 107 invalidate_inode_pages2(inode->i_mapping); 108 if (outarg->open_flags & FOPEN_NONSEEKABLE) 109 nonseekable_open(inode, file); 110 ff->fh = outarg->fh; 111 file->private_data = fuse_file_get(ff); 112 } 113 114 int fuse_open_common(struct inode *inode, struct file *file, int isdir) 115 { 116 struct fuse_conn *fc = get_fuse_conn(inode); 117 struct fuse_open_out outarg; 118 struct fuse_file *ff; 119 int err; 120 121 /* VFS checks this, but only _after_ ->open() */ 122 if (file->f_flags & O_DIRECT) 123 return -EINVAL; 124 125 err = generic_file_open(inode, file); 126 if (err) 127 return err; 128 129 ff = fuse_file_alloc(fc); 130 if (!ff) 131 return -ENOMEM; 132 133 err = fuse_send_open(inode, file, isdir, &outarg); 134 if (err) 135 fuse_file_free(ff); 136 else { 137 if (isdir) 138 outarg.open_flags &= ~FOPEN_DIRECT_IO; 139 fuse_finish_open(inode, file, ff, &outarg); 140 } 141 142 return err; 143 } 144 145 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) 146 { 147 struct fuse_req *req = ff->reserved_req; 148 struct fuse_release_in *inarg = &req->misc.release.in; 149 150 inarg->fh = ff->fh; 151 inarg->flags = flags; 152 req->in.h.opcode = opcode; 153 req->in.h.nodeid = nodeid; 154 req->in.numargs = 1; 155 req->in.args[0].size = sizeof(struct fuse_release_in); 156 req->in.args[0].value = inarg; 157 } 158 159 int fuse_release_common(struct inode *inode, struct file *file, int isdir) 160 { 161 struct fuse_file *ff = file->private_data; 162 if (ff) { 163 struct fuse_conn *fc = get_fuse_conn(inode); 164 struct fuse_req *req = ff->reserved_req; 165 166 fuse_release_fill(ff, get_node_id(inode), file->f_flags, 167 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 168 169 /* Hold vfsmount and dentry until release is finished */ 170 req->misc.release.vfsmount = mntget(file->f_path.mnt); 171 req->misc.release.dentry = dget(file->f_path.dentry); 172 173 spin_lock(&fc->lock); 174 list_del(&ff->write_entry); 175 if (!RB_EMPTY_NODE(&ff->polled_node)) 176 rb_erase(&ff->polled_node, &fc->polled_files); 177 spin_unlock(&fc->lock); 178 179 wake_up_interruptible_sync(&ff->poll_wait); 180 /* 181 * Normally this will send the RELEASE request, 182 * however if some asynchronous READ or WRITE requests 183 * are outstanding, the sending will be delayed 184 */ 185 fuse_file_put(ff); 186 } 187 188 /* Return value is ignored by VFS */ 189 return 0; 190 } 191 192 static int fuse_open(struct inode *inode, struct file *file) 193 { 194 return fuse_open_common(inode, file, 0); 195 } 196 197 static int fuse_release(struct inode *inode, struct file *file) 198 { 199 return fuse_release_common(inode, file, 0); 200 } 201 202 /* 203 * Scramble the ID space with XTEA, so that the value of the files_struct 204 * pointer is not exposed to userspace. 205 */ 206 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 207 { 208 u32 *k = fc->scramble_key; 209 u64 v = (unsigned long) id; 210 u32 v0 = v; 211 u32 v1 = v >> 32; 212 u32 sum = 0; 213 int i; 214 215 for (i = 0; i < 32; i++) { 216 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 217 sum += 0x9E3779B9; 218 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 219 } 220 221 return (u64) v0 + ((u64) v1 << 32); 222 } 223 224 /* 225 * Check if page is under writeback 226 * 227 * This is currently done by walking the list of writepage requests 228 * for the inode, which can be pretty inefficient. 229 */ 230 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 231 { 232 struct fuse_conn *fc = get_fuse_conn(inode); 233 struct fuse_inode *fi = get_fuse_inode(inode); 234 struct fuse_req *req; 235 bool found = false; 236 237 spin_lock(&fc->lock); 238 list_for_each_entry(req, &fi->writepages, writepages_entry) { 239 pgoff_t curr_index; 240 241 BUG_ON(req->inode != inode); 242 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 243 if (curr_index == index) { 244 found = true; 245 break; 246 } 247 } 248 spin_unlock(&fc->lock); 249 250 return found; 251 } 252 253 /* 254 * Wait for page writeback to be completed. 255 * 256 * Since fuse doesn't rely on the VM writeback tracking, this has to 257 * use some other means. 258 */ 259 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 260 { 261 struct fuse_inode *fi = get_fuse_inode(inode); 262 263 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 264 return 0; 265 } 266 267 static int fuse_flush(struct file *file, fl_owner_t id) 268 { 269 struct inode *inode = file->f_path.dentry->d_inode; 270 struct fuse_conn *fc = get_fuse_conn(inode); 271 struct fuse_file *ff = file->private_data; 272 struct fuse_req *req; 273 struct fuse_flush_in inarg; 274 int err; 275 276 if (is_bad_inode(inode)) 277 return -EIO; 278 279 if (fc->no_flush) 280 return 0; 281 282 req = fuse_get_req_nofail(fc, file); 283 memset(&inarg, 0, sizeof(inarg)); 284 inarg.fh = ff->fh; 285 inarg.lock_owner = fuse_lock_owner_id(fc, id); 286 req->in.h.opcode = FUSE_FLUSH; 287 req->in.h.nodeid = get_node_id(inode); 288 req->in.numargs = 1; 289 req->in.args[0].size = sizeof(inarg); 290 req->in.args[0].value = &inarg; 291 req->force = 1; 292 fuse_request_send(fc, req); 293 err = req->out.h.error; 294 fuse_put_request(fc, req); 295 if (err == -ENOSYS) { 296 fc->no_flush = 1; 297 err = 0; 298 } 299 return err; 300 } 301 302 /* 303 * Wait for all pending writepages on the inode to finish. 304 * 305 * This is currently done by blocking further writes with FUSE_NOWRITE 306 * and waiting for all sent writes to complete. 307 * 308 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 309 * could conflict with truncation. 310 */ 311 static void fuse_sync_writes(struct inode *inode) 312 { 313 fuse_set_nowrite(inode); 314 fuse_release_nowrite(inode); 315 } 316 317 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 318 int isdir) 319 { 320 struct inode *inode = de->d_inode; 321 struct fuse_conn *fc = get_fuse_conn(inode); 322 struct fuse_file *ff = file->private_data; 323 struct fuse_req *req; 324 struct fuse_fsync_in inarg; 325 int err; 326 327 if (is_bad_inode(inode)) 328 return -EIO; 329 330 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 331 return 0; 332 333 /* 334 * Start writeback against all dirty pages of the inode, then 335 * wait for all outstanding writes, before sending the FSYNC 336 * request. 337 */ 338 err = write_inode_now(inode, 0); 339 if (err) 340 return err; 341 342 fuse_sync_writes(inode); 343 344 req = fuse_get_req(fc); 345 if (IS_ERR(req)) 346 return PTR_ERR(req); 347 348 memset(&inarg, 0, sizeof(inarg)); 349 inarg.fh = ff->fh; 350 inarg.fsync_flags = datasync ? 1 : 0; 351 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 352 req->in.h.nodeid = get_node_id(inode); 353 req->in.numargs = 1; 354 req->in.args[0].size = sizeof(inarg); 355 req->in.args[0].value = &inarg; 356 fuse_request_send(fc, req); 357 err = req->out.h.error; 358 fuse_put_request(fc, req); 359 if (err == -ENOSYS) { 360 if (isdir) 361 fc->no_fsyncdir = 1; 362 else 363 fc->no_fsync = 1; 364 err = 0; 365 } 366 return err; 367 } 368 369 static int fuse_fsync(struct file *file, struct dentry *de, int datasync) 370 { 371 return fuse_fsync_common(file, de, datasync, 0); 372 } 373 374 void fuse_read_fill(struct fuse_req *req, struct file *file, 375 struct inode *inode, loff_t pos, size_t count, int opcode) 376 { 377 struct fuse_read_in *inarg = &req->misc.read.in; 378 struct fuse_file *ff = file->private_data; 379 380 inarg->fh = ff->fh; 381 inarg->offset = pos; 382 inarg->size = count; 383 inarg->flags = file->f_flags; 384 req->in.h.opcode = opcode; 385 req->in.h.nodeid = get_node_id(inode); 386 req->in.numargs = 1; 387 req->in.args[0].size = sizeof(struct fuse_read_in); 388 req->in.args[0].value = inarg; 389 req->out.argvar = 1; 390 req->out.numargs = 1; 391 req->out.args[0].size = count; 392 } 393 394 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 395 struct inode *inode, loff_t pos, size_t count, 396 fl_owner_t owner) 397 { 398 struct fuse_conn *fc = get_fuse_conn(inode); 399 400 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 401 if (owner != NULL) { 402 struct fuse_read_in *inarg = &req->misc.read.in; 403 404 inarg->read_flags |= FUSE_READ_LOCKOWNER; 405 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 406 } 407 fuse_request_send(fc, req); 408 return req->out.args[0].size; 409 } 410 411 static void fuse_read_update_size(struct inode *inode, loff_t size, 412 u64 attr_ver) 413 { 414 struct fuse_conn *fc = get_fuse_conn(inode); 415 struct fuse_inode *fi = get_fuse_inode(inode); 416 417 spin_lock(&fc->lock); 418 if (attr_ver == fi->attr_version && size < inode->i_size) { 419 fi->attr_version = ++fc->attr_version; 420 i_size_write(inode, size); 421 } 422 spin_unlock(&fc->lock); 423 } 424 425 static int fuse_readpage(struct file *file, struct page *page) 426 { 427 struct inode *inode = page->mapping->host; 428 struct fuse_conn *fc = get_fuse_conn(inode); 429 struct fuse_req *req; 430 size_t num_read; 431 loff_t pos = page_offset(page); 432 size_t count = PAGE_CACHE_SIZE; 433 u64 attr_ver; 434 int err; 435 436 err = -EIO; 437 if (is_bad_inode(inode)) 438 goto out; 439 440 /* 441 * Page writeback can extend beyond the liftime of the 442 * page-cache page, so make sure we read a properly synced 443 * page. 444 */ 445 fuse_wait_on_page_writeback(inode, page->index); 446 447 req = fuse_get_req(fc); 448 err = PTR_ERR(req); 449 if (IS_ERR(req)) 450 goto out; 451 452 attr_ver = fuse_get_attr_version(fc); 453 454 req->out.page_zeroing = 1; 455 req->out.argpages = 1; 456 req->num_pages = 1; 457 req->pages[0] = page; 458 num_read = fuse_send_read(req, file, inode, pos, count, NULL); 459 err = req->out.h.error; 460 fuse_put_request(fc, req); 461 462 if (!err) { 463 /* 464 * Short read means EOF. If file size is larger, truncate it 465 */ 466 if (num_read < count) 467 fuse_read_update_size(inode, pos + num_read, attr_ver); 468 469 SetPageUptodate(page); 470 } 471 472 fuse_invalidate_attr(inode); /* atime changed */ 473 out: 474 unlock_page(page); 475 return err; 476 } 477 478 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 479 { 480 int i; 481 size_t count = req->misc.read.in.size; 482 size_t num_read = req->out.args[0].size; 483 struct inode *inode = req->pages[0]->mapping->host; 484 485 /* 486 * Short read means EOF. If file size is larger, truncate it 487 */ 488 if (!req->out.h.error && num_read < count) { 489 loff_t pos = page_offset(req->pages[0]) + num_read; 490 fuse_read_update_size(inode, pos, req->misc.read.attr_ver); 491 } 492 493 fuse_invalidate_attr(inode); /* atime changed */ 494 495 for (i = 0; i < req->num_pages; i++) { 496 struct page *page = req->pages[i]; 497 if (!req->out.h.error) 498 SetPageUptodate(page); 499 else 500 SetPageError(page); 501 unlock_page(page); 502 } 503 if (req->ff) 504 fuse_file_put(req->ff); 505 } 506 507 static void fuse_send_readpages(struct fuse_req *req, struct file *file, 508 struct inode *inode) 509 { 510 struct fuse_conn *fc = get_fuse_conn(inode); 511 loff_t pos = page_offset(req->pages[0]); 512 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 513 514 req->out.argpages = 1; 515 req->out.page_zeroing = 1; 516 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 517 req->misc.read.attr_ver = fuse_get_attr_version(fc); 518 if (fc->async_read) { 519 struct fuse_file *ff = file->private_data; 520 req->ff = fuse_file_get(ff); 521 req->end = fuse_readpages_end; 522 fuse_request_send_background(fc, req); 523 } else { 524 fuse_request_send(fc, req); 525 fuse_readpages_end(fc, req); 526 fuse_put_request(fc, req); 527 } 528 } 529 530 struct fuse_fill_data { 531 struct fuse_req *req; 532 struct file *file; 533 struct inode *inode; 534 }; 535 536 static int fuse_readpages_fill(void *_data, struct page *page) 537 { 538 struct fuse_fill_data *data = _data; 539 struct fuse_req *req = data->req; 540 struct inode *inode = data->inode; 541 struct fuse_conn *fc = get_fuse_conn(inode); 542 543 fuse_wait_on_page_writeback(inode, page->index); 544 545 if (req->num_pages && 546 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 547 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 548 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 549 fuse_send_readpages(req, data->file, inode); 550 data->req = req = fuse_get_req(fc); 551 if (IS_ERR(req)) { 552 unlock_page(page); 553 return PTR_ERR(req); 554 } 555 } 556 req->pages[req->num_pages] = page; 557 req->num_pages++; 558 return 0; 559 } 560 561 static int fuse_readpages(struct file *file, struct address_space *mapping, 562 struct list_head *pages, unsigned nr_pages) 563 { 564 struct inode *inode = mapping->host; 565 struct fuse_conn *fc = get_fuse_conn(inode); 566 struct fuse_fill_data data; 567 int err; 568 569 err = -EIO; 570 if (is_bad_inode(inode)) 571 goto out; 572 573 data.file = file; 574 data.inode = inode; 575 data.req = fuse_get_req(fc); 576 err = PTR_ERR(data.req); 577 if (IS_ERR(data.req)) 578 goto out; 579 580 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 581 if (!err) { 582 if (data.req->num_pages) 583 fuse_send_readpages(data.req, file, inode); 584 else 585 fuse_put_request(fc, data.req); 586 } 587 out: 588 return err; 589 } 590 591 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 592 unsigned long nr_segs, loff_t pos) 593 { 594 struct inode *inode = iocb->ki_filp->f_mapping->host; 595 596 if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { 597 int err; 598 /* 599 * If trying to read past EOF, make sure the i_size 600 * attribute is up-to-date. 601 */ 602 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 603 if (err) 604 return err; 605 } 606 607 return generic_file_aio_read(iocb, iov, nr_segs, pos); 608 } 609 610 static void fuse_write_fill(struct fuse_req *req, struct file *file, 611 struct fuse_file *ff, struct inode *inode, 612 loff_t pos, size_t count, int writepage) 613 { 614 struct fuse_conn *fc = get_fuse_conn(inode); 615 struct fuse_write_in *inarg = &req->misc.write.in; 616 struct fuse_write_out *outarg = &req->misc.write.out; 617 618 memset(inarg, 0, sizeof(struct fuse_write_in)); 619 inarg->fh = ff->fh; 620 inarg->offset = pos; 621 inarg->size = count; 622 inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0; 623 inarg->flags = file ? file->f_flags : 0; 624 req->in.h.opcode = FUSE_WRITE; 625 req->in.h.nodeid = get_node_id(inode); 626 req->in.numargs = 2; 627 if (fc->minor < 9) 628 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 629 else 630 req->in.args[0].size = sizeof(struct fuse_write_in); 631 req->in.args[0].value = inarg; 632 req->in.args[1].size = count; 633 req->out.numargs = 1; 634 req->out.args[0].size = sizeof(struct fuse_write_out); 635 req->out.args[0].value = outarg; 636 } 637 638 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 639 struct inode *inode, loff_t pos, size_t count, 640 fl_owner_t owner) 641 { 642 struct fuse_conn *fc = get_fuse_conn(inode); 643 fuse_write_fill(req, file, file->private_data, inode, pos, count, 0); 644 if (owner != NULL) { 645 struct fuse_write_in *inarg = &req->misc.write.in; 646 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 647 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 648 } 649 fuse_request_send(fc, req); 650 return req->misc.write.out.size; 651 } 652 653 static int fuse_write_begin(struct file *file, struct address_space *mapping, 654 loff_t pos, unsigned len, unsigned flags, 655 struct page **pagep, void **fsdata) 656 { 657 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 658 659 *pagep = grab_cache_page_write_begin(mapping, index, flags); 660 if (!*pagep) 661 return -ENOMEM; 662 return 0; 663 } 664 665 static void fuse_write_update_size(struct inode *inode, loff_t pos) 666 { 667 struct fuse_conn *fc = get_fuse_conn(inode); 668 struct fuse_inode *fi = get_fuse_inode(inode); 669 670 spin_lock(&fc->lock); 671 fi->attr_version = ++fc->attr_version; 672 if (pos > inode->i_size) 673 i_size_write(inode, pos); 674 spin_unlock(&fc->lock); 675 } 676 677 static int fuse_buffered_write(struct file *file, struct inode *inode, 678 loff_t pos, unsigned count, struct page *page) 679 { 680 int err; 681 size_t nres; 682 struct fuse_conn *fc = get_fuse_conn(inode); 683 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 684 struct fuse_req *req; 685 686 if (is_bad_inode(inode)) 687 return -EIO; 688 689 /* 690 * Make sure writepages on the same page are not mixed up with 691 * plain writes. 692 */ 693 fuse_wait_on_page_writeback(inode, page->index); 694 695 req = fuse_get_req(fc); 696 if (IS_ERR(req)) 697 return PTR_ERR(req); 698 699 req->in.argpages = 1; 700 req->num_pages = 1; 701 req->pages[0] = page; 702 req->page_offset = offset; 703 nres = fuse_send_write(req, file, inode, pos, count, NULL); 704 err = req->out.h.error; 705 fuse_put_request(fc, req); 706 if (!err && !nres) 707 err = -EIO; 708 if (!err) { 709 pos += nres; 710 fuse_write_update_size(inode, pos); 711 if (count == PAGE_CACHE_SIZE) 712 SetPageUptodate(page); 713 } 714 fuse_invalidate_attr(inode); 715 return err ? err : nres; 716 } 717 718 static int fuse_write_end(struct file *file, struct address_space *mapping, 719 loff_t pos, unsigned len, unsigned copied, 720 struct page *page, void *fsdata) 721 { 722 struct inode *inode = mapping->host; 723 int res = 0; 724 725 if (copied) 726 res = fuse_buffered_write(file, inode, pos, copied, page); 727 728 unlock_page(page); 729 page_cache_release(page); 730 return res; 731 } 732 733 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 734 struct inode *inode, loff_t pos, 735 size_t count) 736 { 737 size_t res; 738 unsigned offset; 739 unsigned i; 740 741 for (i = 0; i < req->num_pages; i++) 742 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 743 744 res = fuse_send_write(req, file, inode, pos, count, NULL); 745 746 offset = req->page_offset; 747 count = res; 748 for (i = 0; i < req->num_pages; i++) { 749 struct page *page = req->pages[i]; 750 751 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 752 SetPageUptodate(page); 753 754 if (count > PAGE_CACHE_SIZE - offset) 755 count -= PAGE_CACHE_SIZE - offset; 756 else 757 count = 0; 758 offset = 0; 759 760 unlock_page(page); 761 page_cache_release(page); 762 } 763 764 return res; 765 } 766 767 static ssize_t fuse_fill_write_pages(struct fuse_req *req, 768 struct address_space *mapping, 769 struct iov_iter *ii, loff_t pos) 770 { 771 struct fuse_conn *fc = get_fuse_conn(mapping->host); 772 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 773 size_t count = 0; 774 int err; 775 776 req->in.argpages = 1; 777 req->page_offset = offset; 778 779 do { 780 size_t tmp; 781 struct page *page; 782 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 783 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 784 iov_iter_count(ii)); 785 786 bytes = min_t(size_t, bytes, fc->max_write - count); 787 788 again: 789 err = -EFAULT; 790 if (iov_iter_fault_in_readable(ii, bytes)) 791 break; 792 793 err = -ENOMEM; 794 page = grab_cache_page_write_begin(mapping, index, 0); 795 if (!page) 796 break; 797 798 pagefault_disable(); 799 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 800 pagefault_enable(); 801 flush_dcache_page(page); 802 803 if (!tmp) { 804 unlock_page(page); 805 page_cache_release(page); 806 bytes = min(bytes, iov_iter_single_seg_count(ii)); 807 goto again; 808 } 809 810 err = 0; 811 req->pages[req->num_pages] = page; 812 req->num_pages++; 813 814 iov_iter_advance(ii, tmp); 815 count += tmp; 816 pos += tmp; 817 offset += tmp; 818 if (offset == PAGE_CACHE_SIZE) 819 offset = 0; 820 821 if (!fc->big_writes) 822 break; 823 } while (iov_iter_count(ii) && count < fc->max_write && 824 req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); 825 826 return count > 0 ? count : err; 827 } 828 829 static ssize_t fuse_perform_write(struct file *file, 830 struct address_space *mapping, 831 struct iov_iter *ii, loff_t pos) 832 { 833 struct inode *inode = mapping->host; 834 struct fuse_conn *fc = get_fuse_conn(inode); 835 int err = 0; 836 ssize_t res = 0; 837 838 if (is_bad_inode(inode)) 839 return -EIO; 840 841 do { 842 struct fuse_req *req; 843 ssize_t count; 844 845 req = fuse_get_req(fc); 846 if (IS_ERR(req)) { 847 err = PTR_ERR(req); 848 break; 849 } 850 851 count = fuse_fill_write_pages(req, mapping, ii, pos); 852 if (count <= 0) { 853 err = count; 854 } else { 855 size_t num_written; 856 857 num_written = fuse_send_write_pages(req, file, inode, 858 pos, count); 859 err = req->out.h.error; 860 if (!err) { 861 res += num_written; 862 pos += num_written; 863 864 /* break out of the loop on short write */ 865 if (num_written != count) 866 err = -EIO; 867 } 868 } 869 fuse_put_request(fc, req); 870 } while (!err && iov_iter_count(ii)); 871 872 if (res > 0) 873 fuse_write_update_size(inode, pos); 874 875 fuse_invalidate_attr(inode); 876 877 return res > 0 ? res : err; 878 } 879 880 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 881 unsigned long nr_segs, loff_t pos) 882 { 883 struct file *file = iocb->ki_filp; 884 struct address_space *mapping = file->f_mapping; 885 size_t count = 0; 886 ssize_t written = 0; 887 struct inode *inode = mapping->host; 888 ssize_t err; 889 struct iov_iter i; 890 891 WARN_ON(iocb->ki_pos != pos); 892 893 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 894 if (err) 895 return err; 896 897 mutex_lock(&inode->i_mutex); 898 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 899 900 /* We can write back this queue in page reclaim */ 901 current->backing_dev_info = mapping->backing_dev_info; 902 903 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 904 if (err) 905 goto out; 906 907 if (count == 0) 908 goto out; 909 910 err = file_remove_suid(file); 911 if (err) 912 goto out; 913 914 file_update_time(file); 915 916 iov_iter_init(&i, iov, nr_segs, count, 0); 917 written = fuse_perform_write(file, mapping, &i, pos); 918 if (written >= 0) 919 iocb->ki_pos = pos + written; 920 921 out: 922 current->backing_dev_info = NULL; 923 mutex_unlock(&inode->i_mutex); 924 925 return written ? written : err; 926 } 927 928 static void fuse_release_user_pages(struct fuse_req *req, int write) 929 { 930 unsigned i; 931 932 for (i = 0; i < req->num_pages; i++) { 933 struct page *page = req->pages[i]; 934 if (write) 935 set_page_dirty_lock(page); 936 put_page(page); 937 } 938 } 939 940 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 941 size_t *nbytesp, int write) 942 { 943 size_t nbytes = *nbytesp; 944 unsigned long user_addr = (unsigned long) buf; 945 unsigned offset = user_addr & ~PAGE_MASK; 946 int npages; 947 948 /* Special case for kernel I/O: can copy directly into the buffer */ 949 if (segment_eq(get_fs(), KERNEL_DS)) { 950 if (write) 951 req->in.args[1].value = (void *) user_addr; 952 else 953 req->out.args[0].value = (void *) user_addr; 954 955 return 0; 956 } 957 958 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 959 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 960 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 961 down_read(¤t->mm->mmap_sem); 962 npages = get_user_pages(current, current->mm, user_addr, npages, !write, 963 0, req->pages, NULL); 964 up_read(¤t->mm->mmap_sem); 965 if (npages < 0) 966 return npages; 967 968 req->num_pages = npages; 969 req->page_offset = offset; 970 971 if (write) 972 req->in.argpages = 1; 973 else 974 req->out.argpages = 1; 975 976 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 977 *nbytesp = min(*nbytesp, nbytes); 978 979 return 0; 980 } 981 982 static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 983 size_t count, loff_t *ppos, int write) 984 { 985 struct inode *inode = file->f_path.dentry->d_inode; 986 struct fuse_conn *fc = get_fuse_conn(inode); 987 size_t nmax = write ? fc->max_write : fc->max_read; 988 loff_t pos = *ppos; 989 ssize_t res = 0; 990 struct fuse_req *req; 991 992 if (is_bad_inode(inode)) 993 return -EIO; 994 995 req = fuse_get_req(fc); 996 if (IS_ERR(req)) 997 return PTR_ERR(req); 998 999 while (count) { 1000 size_t nres; 1001 size_t nbytes = min(count, nmax); 1002 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1003 if (err) { 1004 res = err; 1005 break; 1006 } 1007 1008 if (write) 1009 nres = fuse_send_write(req, file, inode, pos, nbytes, 1010 current->files); 1011 else 1012 nres = fuse_send_read(req, file, inode, pos, nbytes, 1013 current->files); 1014 fuse_release_user_pages(req, !write); 1015 if (req->out.h.error) { 1016 if (!res) 1017 res = req->out.h.error; 1018 break; 1019 } else if (nres > nbytes) { 1020 res = -EIO; 1021 break; 1022 } 1023 count -= nres; 1024 res += nres; 1025 pos += nres; 1026 buf += nres; 1027 if (nres != nbytes) 1028 break; 1029 if (count) { 1030 fuse_put_request(fc, req); 1031 req = fuse_get_req(fc); 1032 if (IS_ERR(req)) 1033 break; 1034 } 1035 } 1036 fuse_put_request(fc, req); 1037 if (res > 0) { 1038 if (write) 1039 fuse_write_update_size(inode, pos); 1040 *ppos = pos; 1041 } 1042 fuse_invalidate_attr(inode); 1043 1044 return res; 1045 } 1046 1047 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1048 size_t count, loff_t *ppos) 1049 { 1050 return fuse_direct_io(file, buf, count, ppos, 0); 1051 } 1052 1053 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1054 size_t count, loff_t *ppos) 1055 { 1056 struct inode *inode = file->f_path.dentry->d_inode; 1057 ssize_t res; 1058 /* Don't allow parallel writes to the same file */ 1059 mutex_lock(&inode->i_mutex); 1060 res = generic_write_checks(file, ppos, &count, 0); 1061 if (!res) 1062 res = fuse_direct_io(file, buf, count, ppos, 1); 1063 mutex_unlock(&inode->i_mutex); 1064 return res; 1065 } 1066 1067 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1068 { 1069 __free_page(req->pages[0]); 1070 fuse_file_put(req->ff); 1071 } 1072 1073 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1074 { 1075 struct inode *inode = req->inode; 1076 struct fuse_inode *fi = get_fuse_inode(inode); 1077 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1078 1079 list_del(&req->writepages_entry); 1080 dec_bdi_stat(bdi, BDI_WRITEBACK); 1081 dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); 1082 bdi_writeout_inc(bdi); 1083 wake_up(&fi->page_waitq); 1084 } 1085 1086 /* Called under fc->lock, may release and reacquire it */ 1087 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1088 __releases(&fc->lock) 1089 __acquires(&fc->lock) 1090 { 1091 struct fuse_inode *fi = get_fuse_inode(req->inode); 1092 loff_t size = i_size_read(req->inode); 1093 struct fuse_write_in *inarg = &req->misc.write.in; 1094 1095 if (!fc->connected) 1096 goto out_free; 1097 1098 if (inarg->offset + PAGE_CACHE_SIZE <= size) { 1099 inarg->size = PAGE_CACHE_SIZE; 1100 } else if (inarg->offset < size) { 1101 inarg->size = size & (PAGE_CACHE_SIZE - 1); 1102 } else { 1103 /* Got truncated off completely */ 1104 goto out_free; 1105 } 1106 1107 req->in.args[1].size = inarg->size; 1108 fi->writectr++; 1109 fuse_request_send_background_locked(fc, req); 1110 return; 1111 1112 out_free: 1113 fuse_writepage_finish(fc, req); 1114 spin_unlock(&fc->lock); 1115 fuse_writepage_free(fc, req); 1116 fuse_put_request(fc, req); 1117 spin_lock(&fc->lock); 1118 } 1119 1120 /* 1121 * If fi->writectr is positive (no truncate or fsync going on) send 1122 * all queued writepage requests. 1123 * 1124 * Called with fc->lock 1125 */ 1126 void fuse_flush_writepages(struct inode *inode) 1127 __releases(&fc->lock) 1128 __acquires(&fc->lock) 1129 { 1130 struct fuse_conn *fc = get_fuse_conn(inode); 1131 struct fuse_inode *fi = get_fuse_inode(inode); 1132 struct fuse_req *req; 1133 1134 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1135 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1136 list_del_init(&req->list); 1137 fuse_send_writepage(fc, req); 1138 } 1139 } 1140 1141 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1142 { 1143 struct inode *inode = req->inode; 1144 struct fuse_inode *fi = get_fuse_inode(inode); 1145 1146 mapping_set_error(inode->i_mapping, req->out.h.error); 1147 spin_lock(&fc->lock); 1148 fi->writectr--; 1149 fuse_writepage_finish(fc, req); 1150 spin_unlock(&fc->lock); 1151 fuse_writepage_free(fc, req); 1152 } 1153 1154 static int fuse_writepage_locked(struct page *page) 1155 { 1156 struct address_space *mapping = page->mapping; 1157 struct inode *inode = mapping->host; 1158 struct fuse_conn *fc = get_fuse_conn(inode); 1159 struct fuse_inode *fi = get_fuse_inode(inode); 1160 struct fuse_req *req; 1161 struct fuse_file *ff; 1162 struct page *tmp_page; 1163 1164 set_page_writeback(page); 1165 1166 req = fuse_request_alloc_nofs(); 1167 if (!req) 1168 goto err; 1169 1170 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1171 if (!tmp_page) 1172 goto err_free; 1173 1174 spin_lock(&fc->lock); 1175 BUG_ON(list_empty(&fi->write_files)); 1176 ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); 1177 req->ff = fuse_file_get(ff); 1178 spin_unlock(&fc->lock); 1179 1180 fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1); 1181 1182 copy_highpage(tmp_page, page); 1183 req->in.argpages = 1; 1184 req->num_pages = 1; 1185 req->pages[0] = tmp_page; 1186 req->page_offset = 0; 1187 req->end = fuse_writepage_end; 1188 req->inode = inode; 1189 1190 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1191 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1192 end_page_writeback(page); 1193 1194 spin_lock(&fc->lock); 1195 list_add(&req->writepages_entry, &fi->writepages); 1196 list_add_tail(&req->list, &fi->queued_writes); 1197 fuse_flush_writepages(inode); 1198 spin_unlock(&fc->lock); 1199 1200 return 0; 1201 1202 err_free: 1203 fuse_request_free(req); 1204 err: 1205 end_page_writeback(page); 1206 return -ENOMEM; 1207 } 1208 1209 static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1210 { 1211 int err; 1212 1213 err = fuse_writepage_locked(page); 1214 unlock_page(page); 1215 1216 return err; 1217 } 1218 1219 static int fuse_launder_page(struct page *page) 1220 { 1221 int err = 0; 1222 if (clear_page_dirty_for_io(page)) { 1223 struct inode *inode = page->mapping->host; 1224 err = fuse_writepage_locked(page); 1225 if (!err) 1226 fuse_wait_on_page_writeback(inode, page->index); 1227 } 1228 return err; 1229 } 1230 1231 /* 1232 * Write back dirty pages now, because there may not be any suitable 1233 * open files later 1234 */ 1235 static void fuse_vma_close(struct vm_area_struct *vma) 1236 { 1237 filemap_write_and_wait(vma->vm_file->f_mapping); 1238 } 1239 1240 /* 1241 * Wait for writeback against this page to complete before allowing it 1242 * to be marked dirty again, and hence written back again, possibly 1243 * before the previous writepage completed. 1244 * 1245 * Block here, instead of in ->writepage(), so that the userspace fs 1246 * can only block processes actually operating on the filesystem. 1247 * 1248 * Otherwise unprivileged userspace fs would be able to block 1249 * unrelated: 1250 * 1251 * - page migration 1252 * - sync(2) 1253 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 1254 */ 1255 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1256 { 1257 struct page *page = vmf->page; 1258 /* 1259 * Don't use page->mapping as it may become NULL from a 1260 * concurrent truncate. 1261 */ 1262 struct inode *inode = vma->vm_file->f_mapping->host; 1263 1264 fuse_wait_on_page_writeback(inode, page->index); 1265 return 0; 1266 } 1267 1268 static struct vm_operations_struct fuse_file_vm_ops = { 1269 .close = fuse_vma_close, 1270 .fault = filemap_fault, 1271 .page_mkwrite = fuse_page_mkwrite, 1272 }; 1273 1274 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 1275 { 1276 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { 1277 struct inode *inode = file->f_dentry->d_inode; 1278 struct fuse_conn *fc = get_fuse_conn(inode); 1279 struct fuse_inode *fi = get_fuse_inode(inode); 1280 struct fuse_file *ff = file->private_data; 1281 /* 1282 * file may be written through mmap, so chain it onto the 1283 * inodes's write_file list 1284 */ 1285 spin_lock(&fc->lock); 1286 if (list_empty(&ff->write_entry)) 1287 list_add(&ff->write_entry, &fi->write_files); 1288 spin_unlock(&fc->lock); 1289 } 1290 file_accessed(file); 1291 vma->vm_ops = &fuse_file_vm_ops; 1292 return 0; 1293 } 1294 1295 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 1296 { 1297 /* Can't provide the coherency needed for MAP_SHARED */ 1298 if (vma->vm_flags & VM_MAYSHARE) 1299 return -ENODEV; 1300 1301 invalidate_inode_pages2(file->f_mapping); 1302 1303 return generic_file_mmap(file, vma); 1304 } 1305 1306 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 1307 struct file_lock *fl) 1308 { 1309 switch (ffl->type) { 1310 case F_UNLCK: 1311 break; 1312 1313 case F_RDLCK: 1314 case F_WRLCK: 1315 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 1316 ffl->end < ffl->start) 1317 return -EIO; 1318 1319 fl->fl_start = ffl->start; 1320 fl->fl_end = ffl->end; 1321 fl->fl_pid = ffl->pid; 1322 break; 1323 1324 default: 1325 return -EIO; 1326 } 1327 fl->fl_type = ffl->type; 1328 return 0; 1329 } 1330 1331 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 1332 const struct file_lock *fl, int opcode, pid_t pid, 1333 int flock) 1334 { 1335 struct inode *inode = file->f_path.dentry->d_inode; 1336 struct fuse_conn *fc = get_fuse_conn(inode); 1337 struct fuse_file *ff = file->private_data; 1338 struct fuse_lk_in *arg = &req->misc.lk_in; 1339 1340 arg->fh = ff->fh; 1341 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 1342 arg->lk.start = fl->fl_start; 1343 arg->lk.end = fl->fl_end; 1344 arg->lk.type = fl->fl_type; 1345 arg->lk.pid = pid; 1346 if (flock) 1347 arg->lk_flags |= FUSE_LK_FLOCK; 1348 req->in.h.opcode = opcode; 1349 req->in.h.nodeid = get_node_id(inode); 1350 req->in.numargs = 1; 1351 req->in.args[0].size = sizeof(*arg); 1352 req->in.args[0].value = arg; 1353 } 1354 1355 static int fuse_getlk(struct file *file, struct file_lock *fl) 1356 { 1357 struct inode *inode = file->f_path.dentry->d_inode; 1358 struct fuse_conn *fc = get_fuse_conn(inode); 1359 struct fuse_req *req; 1360 struct fuse_lk_out outarg; 1361 int err; 1362 1363 req = fuse_get_req(fc); 1364 if (IS_ERR(req)) 1365 return PTR_ERR(req); 1366 1367 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); 1368 req->out.numargs = 1; 1369 req->out.args[0].size = sizeof(outarg); 1370 req->out.args[0].value = &outarg; 1371 fuse_request_send(fc, req); 1372 err = req->out.h.error; 1373 fuse_put_request(fc, req); 1374 if (!err) 1375 err = convert_fuse_file_lock(&outarg.lk, fl); 1376 1377 return err; 1378 } 1379 1380 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 1381 { 1382 struct inode *inode = file->f_path.dentry->d_inode; 1383 struct fuse_conn *fc = get_fuse_conn(inode); 1384 struct fuse_req *req; 1385 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 1386 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 1387 int err; 1388 1389 if (fl->fl_lmops && fl->fl_lmops->fl_grant) { 1390 /* NLM needs asynchronous locks, which we don't support yet */ 1391 return -ENOLCK; 1392 } 1393 1394 /* Unlock on close is handled by the flush method */ 1395 if (fl->fl_flags & FL_CLOSE) 1396 return 0; 1397 1398 req = fuse_get_req(fc); 1399 if (IS_ERR(req)) 1400 return PTR_ERR(req); 1401 1402 fuse_lk_fill(req, file, fl, opcode, pid, flock); 1403 fuse_request_send(fc, req); 1404 err = req->out.h.error; 1405 /* locking is restartable */ 1406 if (err == -EINTR) 1407 err = -ERESTARTSYS; 1408 fuse_put_request(fc, req); 1409 return err; 1410 } 1411 1412 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 1413 { 1414 struct inode *inode = file->f_path.dentry->d_inode; 1415 struct fuse_conn *fc = get_fuse_conn(inode); 1416 int err; 1417 1418 if (cmd == F_CANCELLK) { 1419 err = 0; 1420 } else if (cmd == F_GETLK) { 1421 if (fc->no_lock) { 1422 posix_test_lock(file, fl); 1423 err = 0; 1424 } else 1425 err = fuse_getlk(file, fl); 1426 } else { 1427 if (fc->no_lock) 1428 err = posix_lock_file(file, fl, NULL); 1429 else 1430 err = fuse_setlk(file, fl, 0); 1431 } 1432 return err; 1433 } 1434 1435 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 1436 { 1437 struct inode *inode = file->f_path.dentry->d_inode; 1438 struct fuse_conn *fc = get_fuse_conn(inode); 1439 int err; 1440 1441 if (fc->no_lock) { 1442 err = flock_lock_file_wait(file, fl); 1443 } else { 1444 /* emulate flock with POSIX locks */ 1445 fl->fl_owner = (fl_owner_t) file; 1446 err = fuse_setlk(file, fl, 1); 1447 } 1448 1449 return err; 1450 } 1451 1452 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 1453 { 1454 struct inode *inode = mapping->host; 1455 struct fuse_conn *fc = get_fuse_conn(inode); 1456 struct fuse_req *req; 1457 struct fuse_bmap_in inarg; 1458 struct fuse_bmap_out outarg; 1459 int err; 1460 1461 if (!inode->i_sb->s_bdev || fc->no_bmap) 1462 return 0; 1463 1464 req = fuse_get_req(fc); 1465 if (IS_ERR(req)) 1466 return 0; 1467 1468 memset(&inarg, 0, sizeof(inarg)); 1469 inarg.block = block; 1470 inarg.blocksize = inode->i_sb->s_blocksize; 1471 req->in.h.opcode = FUSE_BMAP; 1472 req->in.h.nodeid = get_node_id(inode); 1473 req->in.numargs = 1; 1474 req->in.args[0].size = sizeof(inarg); 1475 req->in.args[0].value = &inarg; 1476 req->out.numargs = 1; 1477 req->out.args[0].size = sizeof(outarg); 1478 req->out.args[0].value = &outarg; 1479 fuse_request_send(fc, req); 1480 err = req->out.h.error; 1481 fuse_put_request(fc, req); 1482 if (err == -ENOSYS) 1483 fc->no_bmap = 1; 1484 1485 return err ? 0 : outarg.block; 1486 } 1487 1488 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) 1489 { 1490 loff_t retval; 1491 struct inode *inode = file->f_path.dentry->d_inode; 1492 1493 mutex_lock(&inode->i_mutex); 1494 switch (origin) { 1495 case SEEK_END: 1496 retval = fuse_update_attributes(inode, NULL, file, NULL); 1497 if (retval) 1498 goto exit; 1499 offset += i_size_read(inode); 1500 break; 1501 case SEEK_CUR: 1502 offset += file->f_pos; 1503 } 1504 retval = -EINVAL; 1505 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 1506 if (offset != file->f_pos) { 1507 file->f_pos = offset; 1508 file->f_version = 0; 1509 } 1510 retval = offset; 1511 } 1512 exit: 1513 mutex_unlock(&inode->i_mutex); 1514 return retval; 1515 } 1516 1517 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 1518 unsigned int nr_segs, size_t bytes, bool to_user) 1519 { 1520 struct iov_iter ii; 1521 int page_idx = 0; 1522 1523 if (!bytes) 1524 return 0; 1525 1526 iov_iter_init(&ii, iov, nr_segs, bytes, 0); 1527 1528 while (iov_iter_count(&ii)) { 1529 struct page *page = pages[page_idx++]; 1530 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 1531 void *kaddr, *map; 1532 1533 kaddr = map = kmap(page); 1534 1535 while (todo) { 1536 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 1537 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 1538 size_t copy = min(todo, iov_len); 1539 size_t left; 1540 1541 if (!to_user) 1542 left = copy_from_user(kaddr, uaddr, copy); 1543 else 1544 left = copy_to_user(uaddr, kaddr, copy); 1545 1546 if (unlikely(left)) 1547 return -EFAULT; 1548 1549 iov_iter_advance(&ii, copy); 1550 todo -= copy; 1551 kaddr += copy; 1552 } 1553 1554 kunmap(map); 1555 } 1556 1557 return 0; 1558 } 1559 1560 /* 1561 * For ioctls, there is no generic way to determine how much memory 1562 * needs to be read and/or written. Furthermore, ioctls are allowed 1563 * to dereference the passed pointer, so the parameter requires deep 1564 * copying but FUSE has no idea whatsoever about what to copy in or 1565 * out. 1566 * 1567 * This is solved by allowing FUSE server to retry ioctl with 1568 * necessary in/out iovecs. Let's assume the ioctl implementation 1569 * needs to read in the following structure. 1570 * 1571 * struct a { 1572 * char *buf; 1573 * size_t buflen; 1574 * } 1575 * 1576 * On the first callout to FUSE server, inarg->in_size and 1577 * inarg->out_size will be NULL; then, the server completes the ioctl 1578 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 1579 * the actual iov array to 1580 * 1581 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 1582 * 1583 * which tells FUSE to copy in the requested area and retry the ioctl. 1584 * On the second round, the server has access to the structure and 1585 * from that it can tell what to look for next, so on the invocation, 1586 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 1587 * 1588 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 1589 * { .iov_base = a.buf, .iov_len = a.buflen } } 1590 * 1591 * FUSE will copy both struct a and the pointed buffer from the 1592 * process doing the ioctl and retry ioctl with both struct a and the 1593 * buffer. 1594 * 1595 * This time, FUSE server has everything it needs and completes ioctl 1596 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 1597 * 1598 * Copying data out works the same way. 1599 * 1600 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 1601 * automatically initializes in and out iovs by decoding @cmd with 1602 * _IOC_* macros and the server is not allowed to request RETRY. This 1603 * limits ioctl data transfers to well-formed ioctls and is the forced 1604 * behavior for all FUSE servers. 1605 */ 1606 static long fuse_file_do_ioctl(struct file *file, unsigned int cmd, 1607 unsigned long arg, unsigned int flags) 1608 { 1609 struct inode *inode = file->f_dentry->d_inode; 1610 struct fuse_file *ff = file->private_data; 1611 struct fuse_conn *fc = get_fuse_conn(inode); 1612 struct fuse_ioctl_in inarg = { 1613 .fh = ff->fh, 1614 .cmd = cmd, 1615 .arg = arg, 1616 .flags = flags 1617 }; 1618 struct fuse_ioctl_out outarg; 1619 struct fuse_req *req = NULL; 1620 struct page **pages = NULL; 1621 struct page *iov_page = NULL; 1622 struct iovec *in_iov = NULL, *out_iov = NULL; 1623 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 1624 size_t in_size, out_size, transferred; 1625 int err; 1626 1627 /* assume all the iovs returned by client always fits in a page */ 1628 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1629 1630 if (!fuse_allow_task(fc, current)) 1631 return -EACCES; 1632 1633 err = -EIO; 1634 if (is_bad_inode(inode)) 1635 goto out; 1636 1637 err = -ENOMEM; 1638 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); 1639 iov_page = alloc_page(GFP_KERNEL); 1640 if (!pages || !iov_page) 1641 goto out; 1642 1643 /* 1644 * If restricted, initialize IO parameters as encoded in @cmd. 1645 * RETRY from server is not allowed. 1646 */ 1647 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 1648 struct iovec *iov = page_address(iov_page); 1649 1650 iov->iov_base = (void __user *)arg; 1651 iov->iov_len = _IOC_SIZE(cmd); 1652 1653 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1654 in_iov = iov; 1655 in_iovs = 1; 1656 } 1657 1658 if (_IOC_DIR(cmd) & _IOC_READ) { 1659 out_iov = iov; 1660 out_iovs = 1; 1661 } 1662 } 1663 1664 retry: 1665 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 1666 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 1667 1668 /* 1669 * Out data can be used either for actual out data or iovs, 1670 * make sure there always is at least one page. 1671 */ 1672 out_size = max_t(size_t, out_size, PAGE_SIZE); 1673 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 1674 1675 /* make sure there are enough buffer pages and init request with them */ 1676 err = -ENOMEM; 1677 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 1678 goto out; 1679 while (num_pages < max_pages) { 1680 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 1681 if (!pages[num_pages]) 1682 goto out; 1683 num_pages++; 1684 } 1685 1686 req = fuse_get_req(fc); 1687 if (IS_ERR(req)) { 1688 err = PTR_ERR(req); 1689 req = NULL; 1690 goto out; 1691 } 1692 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 1693 req->num_pages = num_pages; 1694 1695 /* okay, let's send it to the client */ 1696 req->in.h.opcode = FUSE_IOCTL; 1697 req->in.h.nodeid = get_node_id(inode); 1698 req->in.numargs = 1; 1699 req->in.args[0].size = sizeof(inarg); 1700 req->in.args[0].value = &inarg; 1701 if (in_size) { 1702 req->in.numargs++; 1703 req->in.args[1].size = in_size; 1704 req->in.argpages = 1; 1705 1706 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 1707 false); 1708 if (err) 1709 goto out; 1710 } 1711 1712 req->out.numargs = 2; 1713 req->out.args[0].size = sizeof(outarg); 1714 req->out.args[0].value = &outarg; 1715 req->out.args[1].size = out_size; 1716 req->out.argpages = 1; 1717 req->out.argvar = 1; 1718 1719 fuse_request_send(fc, req); 1720 err = req->out.h.error; 1721 transferred = req->out.args[1].size; 1722 fuse_put_request(fc, req); 1723 req = NULL; 1724 if (err) 1725 goto out; 1726 1727 /* did it ask for retry? */ 1728 if (outarg.flags & FUSE_IOCTL_RETRY) { 1729 char *vaddr; 1730 1731 /* no retry if in restricted mode */ 1732 err = -EIO; 1733 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 1734 goto out; 1735 1736 in_iovs = outarg.in_iovs; 1737 out_iovs = outarg.out_iovs; 1738 1739 /* 1740 * Make sure things are in boundary, separate checks 1741 * are to protect against overflow. 1742 */ 1743 err = -ENOMEM; 1744 if (in_iovs > FUSE_IOCTL_MAX_IOV || 1745 out_iovs > FUSE_IOCTL_MAX_IOV || 1746 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1747 goto out; 1748 1749 err = -EIO; 1750 if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred) 1751 goto out; 1752 1753 /* okay, copy in iovs and retry */ 1754 vaddr = kmap_atomic(pages[0], KM_USER0); 1755 memcpy(page_address(iov_page), vaddr, transferred); 1756 kunmap_atomic(vaddr, KM_USER0); 1757 1758 in_iov = page_address(iov_page); 1759 out_iov = in_iov + in_iovs; 1760 1761 goto retry; 1762 } 1763 1764 err = -EIO; 1765 if (transferred > inarg.out_size) 1766 goto out; 1767 1768 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 1769 out: 1770 if (req) 1771 fuse_put_request(fc, req); 1772 if (iov_page) 1773 __free_page(iov_page); 1774 while (num_pages) 1775 __free_page(pages[--num_pages]); 1776 kfree(pages); 1777 1778 return err ? err : outarg.result; 1779 } 1780 1781 static long fuse_file_ioctl(struct file *file, unsigned int cmd, 1782 unsigned long arg) 1783 { 1784 return fuse_file_do_ioctl(file, cmd, arg, 0); 1785 } 1786 1787 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 1788 unsigned long arg) 1789 { 1790 return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); 1791 } 1792 1793 /* 1794 * All files which have been polled are linked to RB tree 1795 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 1796 * find the matching one. 1797 */ 1798 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 1799 struct rb_node **parent_out) 1800 { 1801 struct rb_node **link = &fc->polled_files.rb_node; 1802 struct rb_node *last = NULL; 1803 1804 while (*link) { 1805 struct fuse_file *ff; 1806 1807 last = *link; 1808 ff = rb_entry(last, struct fuse_file, polled_node); 1809 1810 if (kh < ff->kh) 1811 link = &last->rb_left; 1812 else if (kh > ff->kh) 1813 link = &last->rb_right; 1814 else 1815 return link; 1816 } 1817 1818 if (parent_out) 1819 *parent_out = last; 1820 return link; 1821 } 1822 1823 /* 1824 * The file is about to be polled. Make sure it's on the polled_files 1825 * RB tree. Note that files once added to the polled_files tree are 1826 * not removed before the file is released. This is because a file 1827 * polled once is likely to be polled again. 1828 */ 1829 static void fuse_register_polled_file(struct fuse_conn *fc, 1830 struct fuse_file *ff) 1831 { 1832 spin_lock(&fc->lock); 1833 if (RB_EMPTY_NODE(&ff->polled_node)) { 1834 struct rb_node **link, *parent; 1835 1836 link = fuse_find_polled_node(fc, ff->kh, &parent); 1837 BUG_ON(*link); 1838 rb_link_node(&ff->polled_node, parent, link); 1839 rb_insert_color(&ff->polled_node, &fc->polled_files); 1840 } 1841 spin_unlock(&fc->lock); 1842 } 1843 1844 static unsigned fuse_file_poll(struct file *file, poll_table *wait) 1845 { 1846 struct inode *inode = file->f_dentry->d_inode; 1847 struct fuse_file *ff = file->private_data; 1848 struct fuse_conn *fc = get_fuse_conn(inode); 1849 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 1850 struct fuse_poll_out outarg; 1851 struct fuse_req *req; 1852 int err; 1853 1854 if (fc->no_poll) 1855 return DEFAULT_POLLMASK; 1856 1857 poll_wait(file, &ff->poll_wait, wait); 1858 1859 /* 1860 * Ask for notification iff there's someone waiting for it. 1861 * The client may ignore the flag and always notify. 1862 */ 1863 if (waitqueue_active(&ff->poll_wait)) { 1864 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 1865 fuse_register_polled_file(fc, ff); 1866 } 1867 1868 req = fuse_get_req(fc); 1869 if (IS_ERR(req)) 1870 return PTR_ERR(req); 1871 1872 req->in.h.opcode = FUSE_POLL; 1873 req->in.h.nodeid = get_node_id(inode); 1874 req->in.numargs = 1; 1875 req->in.args[0].size = sizeof(inarg); 1876 req->in.args[0].value = &inarg; 1877 req->out.numargs = 1; 1878 req->out.args[0].size = sizeof(outarg); 1879 req->out.args[0].value = &outarg; 1880 fuse_request_send(fc, req); 1881 err = req->out.h.error; 1882 fuse_put_request(fc, req); 1883 1884 if (!err) 1885 return outarg.revents; 1886 if (err == -ENOSYS) { 1887 fc->no_poll = 1; 1888 return DEFAULT_POLLMASK; 1889 } 1890 return POLLERR; 1891 } 1892 1893 /* 1894 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 1895 * wakes up the poll waiters. 1896 */ 1897 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 1898 struct fuse_notify_poll_wakeup_out *outarg) 1899 { 1900 u64 kh = outarg->kh; 1901 struct rb_node **link; 1902 1903 spin_lock(&fc->lock); 1904 1905 link = fuse_find_polled_node(fc, kh, NULL); 1906 if (*link) { 1907 struct fuse_file *ff; 1908 1909 ff = rb_entry(*link, struct fuse_file, polled_node); 1910 wake_up_interruptible_sync(&ff->poll_wait); 1911 } 1912 1913 spin_unlock(&fc->lock); 1914 return 0; 1915 } 1916 1917 static const struct file_operations fuse_file_operations = { 1918 .llseek = fuse_file_llseek, 1919 .read = do_sync_read, 1920 .aio_read = fuse_file_aio_read, 1921 .write = do_sync_write, 1922 .aio_write = fuse_file_aio_write, 1923 .mmap = fuse_file_mmap, 1924 .open = fuse_open, 1925 .flush = fuse_flush, 1926 .release = fuse_release, 1927 .fsync = fuse_fsync, 1928 .lock = fuse_file_lock, 1929 .flock = fuse_file_flock, 1930 .splice_read = generic_file_splice_read, 1931 .unlocked_ioctl = fuse_file_ioctl, 1932 .compat_ioctl = fuse_file_compat_ioctl, 1933 .poll = fuse_file_poll, 1934 }; 1935 1936 static const struct file_operations fuse_direct_io_file_operations = { 1937 .llseek = fuse_file_llseek, 1938 .read = fuse_direct_read, 1939 .write = fuse_direct_write, 1940 .mmap = fuse_direct_mmap, 1941 .open = fuse_open, 1942 .flush = fuse_flush, 1943 .release = fuse_release, 1944 .fsync = fuse_fsync, 1945 .lock = fuse_file_lock, 1946 .flock = fuse_file_flock, 1947 .unlocked_ioctl = fuse_file_ioctl, 1948 .compat_ioctl = fuse_file_compat_ioctl, 1949 .poll = fuse_file_poll, 1950 /* no splice_read */ 1951 }; 1952 1953 static const struct address_space_operations fuse_file_aops = { 1954 .readpage = fuse_readpage, 1955 .writepage = fuse_writepage, 1956 .launder_page = fuse_launder_page, 1957 .write_begin = fuse_write_begin, 1958 .write_end = fuse_write_end, 1959 .readpages = fuse_readpages, 1960 .set_page_dirty = __set_page_dirty_nobuffers, 1961 .bmap = fuse_bmap, 1962 }; 1963 1964 void fuse_init_file_inode(struct inode *inode) 1965 { 1966 inode->i_fop = &fuse_file_operations; 1967 inode->i_data.a_ops = &fuse_file_aops; 1968 } 1969