1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 15 static const struct file_operations fuse_direct_io_file_operations; 16 17 static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 18 struct fuse_open_out *outargp) 19 { 20 struct fuse_conn *fc = get_fuse_conn(inode); 21 struct fuse_open_in inarg; 22 struct fuse_req *req; 23 int err; 24 25 req = fuse_get_req(fc); 26 if (IS_ERR(req)) 27 return PTR_ERR(req); 28 29 memset(&inarg, 0, sizeof(inarg)); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); 31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 32 req->in.h.nodeid = get_node_id(inode); 33 req->in.numargs = 1; 34 req->in.args[0].size = sizeof(inarg); 35 req->in.args[0].value = &inarg; 36 req->out.numargs = 1; 37 req->out.args[0].size = sizeof(*outargp); 38 req->out.args[0].value = outargp; 39 request_send(fc, req); 40 err = req->out.h.error; 41 fuse_put_request(fc, req); 42 43 return err; 44 } 45 46 struct fuse_file *fuse_file_alloc(void) 47 { 48 struct fuse_file *ff; 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (ff) { 51 ff->reserved_req = fuse_request_alloc(); 52 if (!ff->reserved_req) { 53 kfree(ff); 54 ff = NULL; 55 } 56 } 57 return ff; 58 } 59 60 void fuse_file_free(struct fuse_file *ff) 61 { 62 fuse_request_free(ff->reserved_req); 63 kfree(ff); 64 } 65 66 void fuse_finish_open(struct inode *inode, struct file *file, 67 struct fuse_file *ff, struct fuse_open_out *outarg) 68 { 69 if (outarg->open_flags & FOPEN_DIRECT_IO) 70 file->f_op = &fuse_direct_io_file_operations; 71 if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 72 invalidate_inode_pages(inode->i_mapping); 73 ff->fh = outarg->fh; 74 file->private_data = ff; 75 } 76 77 int fuse_open_common(struct inode *inode, struct file *file, int isdir) 78 { 79 struct fuse_open_out outarg; 80 struct fuse_file *ff; 81 int err; 82 83 /* VFS checks this, but only _after_ ->open() */ 84 if (file->f_flags & O_DIRECT) 85 return -EINVAL; 86 87 err = generic_file_open(inode, file); 88 if (err) 89 return err; 90 91 /* If opening the root node, no lookup has been performed on 92 it, so the attributes must be refreshed */ 93 if (get_node_id(inode) == FUSE_ROOT_ID) { 94 err = fuse_do_getattr(inode); 95 if (err) 96 return err; 97 } 98 99 ff = fuse_file_alloc(); 100 if (!ff) 101 return -ENOMEM; 102 103 err = fuse_send_open(inode, file, isdir, &outarg); 104 if (err) 105 fuse_file_free(ff); 106 else { 107 if (isdir) 108 outarg.open_flags &= ~FOPEN_DIRECT_IO; 109 fuse_finish_open(inode, file, ff, &outarg); 110 } 111 112 return err; 113 } 114 115 struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, 116 int opcode) 117 { 118 struct fuse_req *req = ff->reserved_req; 119 struct fuse_release_in *inarg = &req->misc.release_in; 120 121 inarg->fh = ff->fh; 122 inarg->flags = flags; 123 req->in.h.opcode = opcode; 124 req->in.h.nodeid = nodeid; 125 req->in.numargs = 1; 126 req->in.args[0].size = sizeof(struct fuse_release_in); 127 req->in.args[0].value = inarg; 128 kfree(ff); 129 130 return req; 131 } 132 133 int fuse_release_common(struct inode *inode, struct file *file, int isdir) 134 { 135 struct fuse_file *ff = file->private_data; 136 if (ff) { 137 struct fuse_conn *fc = get_fuse_conn(inode); 138 struct fuse_req *req; 139 140 req = fuse_release_fill(ff, get_node_id(inode), file->f_flags, 141 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 142 143 /* Hold vfsmount and dentry until release is finished */ 144 req->vfsmount = mntget(file->f_path.mnt); 145 req->dentry = dget(file->f_path.dentry); 146 request_send_background(fc, req); 147 } 148 149 /* Return value is ignored by VFS */ 150 return 0; 151 } 152 153 static int fuse_open(struct inode *inode, struct file *file) 154 { 155 return fuse_open_common(inode, file, 0); 156 } 157 158 static int fuse_release(struct inode *inode, struct file *file) 159 { 160 return fuse_release_common(inode, file, 0); 161 } 162 163 /* 164 * Scramble the ID space with XTEA, so that the value of the files_struct 165 * pointer is not exposed to userspace. 166 */ 167 static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 168 { 169 u32 *k = fc->scramble_key; 170 u64 v = (unsigned long) id; 171 u32 v0 = v; 172 u32 v1 = v >> 32; 173 u32 sum = 0; 174 int i; 175 176 for (i = 0; i < 32; i++) { 177 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 178 sum += 0x9E3779B9; 179 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 180 } 181 182 return (u64) v0 + ((u64) v1 << 32); 183 } 184 185 static int fuse_flush(struct file *file, fl_owner_t id) 186 { 187 struct inode *inode = file->f_path.dentry->d_inode; 188 struct fuse_conn *fc = get_fuse_conn(inode); 189 struct fuse_file *ff = file->private_data; 190 struct fuse_req *req; 191 struct fuse_flush_in inarg; 192 int err; 193 194 if (is_bad_inode(inode)) 195 return -EIO; 196 197 if (fc->no_flush) 198 return 0; 199 200 req = fuse_get_req_nofail(fc, file); 201 memset(&inarg, 0, sizeof(inarg)); 202 inarg.fh = ff->fh; 203 inarg.lock_owner = fuse_lock_owner_id(fc, id); 204 req->in.h.opcode = FUSE_FLUSH; 205 req->in.h.nodeid = get_node_id(inode); 206 req->in.numargs = 1; 207 req->in.args[0].size = sizeof(inarg); 208 req->in.args[0].value = &inarg; 209 req->force = 1; 210 request_send(fc, req); 211 err = req->out.h.error; 212 fuse_put_request(fc, req); 213 if (err == -ENOSYS) { 214 fc->no_flush = 1; 215 err = 0; 216 } 217 return err; 218 } 219 220 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 221 int isdir) 222 { 223 struct inode *inode = de->d_inode; 224 struct fuse_conn *fc = get_fuse_conn(inode); 225 struct fuse_file *ff = file->private_data; 226 struct fuse_req *req; 227 struct fuse_fsync_in inarg; 228 int err; 229 230 if (is_bad_inode(inode)) 231 return -EIO; 232 233 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 234 return 0; 235 236 req = fuse_get_req(fc); 237 if (IS_ERR(req)) 238 return PTR_ERR(req); 239 240 memset(&inarg, 0, sizeof(inarg)); 241 inarg.fh = ff->fh; 242 inarg.fsync_flags = datasync ? 1 : 0; 243 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 244 req->in.h.nodeid = get_node_id(inode); 245 req->in.numargs = 1; 246 req->in.args[0].size = sizeof(inarg); 247 req->in.args[0].value = &inarg; 248 request_send(fc, req); 249 err = req->out.h.error; 250 fuse_put_request(fc, req); 251 if (err == -ENOSYS) { 252 if (isdir) 253 fc->no_fsyncdir = 1; 254 else 255 fc->no_fsync = 1; 256 err = 0; 257 } 258 return err; 259 } 260 261 static int fuse_fsync(struct file *file, struct dentry *de, int datasync) 262 { 263 return fuse_fsync_common(file, de, datasync, 0); 264 } 265 266 void fuse_read_fill(struct fuse_req *req, struct file *file, 267 struct inode *inode, loff_t pos, size_t count, int opcode) 268 { 269 struct fuse_file *ff = file->private_data; 270 struct fuse_read_in *inarg = &req->misc.read_in; 271 272 inarg->fh = ff->fh; 273 inarg->offset = pos; 274 inarg->size = count; 275 req->in.h.opcode = opcode; 276 req->in.h.nodeid = get_node_id(inode); 277 req->in.numargs = 1; 278 req->in.args[0].size = sizeof(struct fuse_read_in); 279 req->in.args[0].value = inarg; 280 req->out.argpages = 1; 281 req->out.argvar = 1; 282 req->out.numargs = 1; 283 req->out.args[0].size = count; 284 } 285 286 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 287 struct inode *inode, loff_t pos, size_t count) 288 { 289 struct fuse_conn *fc = get_fuse_conn(inode); 290 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 291 request_send(fc, req); 292 return req->out.args[0].size; 293 } 294 295 static int fuse_readpage(struct file *file, struct page *page) 296 { 297 struct inode *inode = page->mapping->host; 298 struct fuse_conn *fc = get_fuse_conn(inode); 299 struct fuse_req *req; 300 int err; 301 302 err = -EIO; 303 if (is_bad_inode(inode)) 304 goto out; 305 306 req = fuse_get_req(fc); 307 err = PTR_ERR(req); 308 if (IS_ERR(req)) 309 goto out; 310 311 req->out.page_zeroing = 1; 312 req->num_pages = 1; 313 req->pages[0] = page; 314 fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE); 315 err = req->out.h.error; 316 fuse_put_request(fc, req); 317 if (!err) 318 SetPageUptodate(page); 319 fuse_invalidate_attr(inode); /* atime changed */ 320 out: 321 unlock_page(page); 322 return err; 323 } 324 325 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 326 { 327 int i; 328 329 fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */ 330 331 for (i = 0; i < req->num_pages; i++) { 332 struct page *page = req->pages[i]; 333 if (!req->out.h.error) 334 SetPageUptodate(page); 335 else 336 SetPageError(page); 337 unlock_page(page); 338 } 339 fuse_put_request(fc, req); 340 } 341 342 static void fuse_send_readpages(struct fuse_req *req, struct file *file, 343 struct inode *inode) 344 { 345 struct fuse_conn *fc = get_fuse_conn(inode); 346 loff_t pos = page_offset(req->pages[0]); 347 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 348 req->out.page_zeroing = 1; 349 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 350 if (fc->async_read) { 351 get_file(file); 352 req->file = file; 353 req->end = fuse_readpages_end; 354 request_send_background(fc, req); 355 } else { 356 request_send(fc, req); 357 fuse_readpages_end(fc, req); 358 } 359 } 360 361 struct fuse_readpages_data { 362 struct fuse_req *req; 363 struct file *file; 364 struct inode *inode; 365 }; 366 367 static int fuse_readpages_fill(void *_data, struct page *page) 368 { 369 struct fuse_readpages_data *data = _data; 370 struct fuse_req *req = data->req; 371 struct inode *inode = data->inode; 372 struct fuse_conn *fc = get_fuse_conn(inode); 373 374 if (req->num_pages && 375 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 376 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 377 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 378 fuse_send_readpages(req, data->file, inode); 379 data->req = req = fuse_get_req(fc); 380 if (IS_ERR(req)) { 381 unlock_page(page); 382 return PTR_ERR(req); 383 } 384 } 385 req->pages[req->num_pages] = page; 386 req->num_pages ++; 387 return 0; 388 } 389 390 static int fuse_readpages(struct file *file, struct address_space *mapping, 391 struct list_head *pages, unsigned nr_pages) 392 { 393 struct inode *inode = mapping->host; 394 struct fuse_conn *fc = get_fuse_conn(inode); 395 struct fuse_readpages_data data; 396 int err; 397 398 err = -EIO; 399 if (is_bad_inode(inode)) 400 goto out; 401 402 data.file = file; 403 data.inode = inode; 404 data.req = fuse_get_req(fc); 405 err = PTR_ERR(data.req); 406 if (IS_ERR(data.req)) 407 goto out; 408 409 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 410 if (!err) { 411 if (data.req->num_pages) 412 fuse_send_readpages(data.req, file, inode); 413 else 414 fuse_put_request(fc, data.req); 415 } 416 out: 417 return err; 418 } 419 420 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 421 struct inode *inode, loff_t pos, size_t count) 422 { 423 struct fuse_conn *fc = get_fuse_conn(inode); 424 struct fuse_file *ff = file->private_data; 425 struct fuse_write_in inarg; 426 struct fuse_write_out outarg; 427 428 memset(&inarg, 0, sizeof(struct fuse_write_in)); 429 inarg.fh = ff->fh; 430 inarg.offset = pos; 431 inarg.size = count; 432 req->in.h.opcode = FUSE_WRITE; 433 req->in.h.nodeid = get_node_id(inode); 434 req->in.argpages = 1; 435 req->in.numargs = 2; 436 req->in.args[0].size = sizeof(struct fuse_write_in); 437 req->in.args[0].value = &inarg; 438 req->in.args[1].size = count; 439 req->out.numargs = 1; 440 req->out.args[0].size = sizeof(struct fuse_write_out); 441 req->out.args[0].value = &outarg; 442 request_send(fc, req); 443 return outarg.size; 444 } 445 446 static int fuse_prepare_write(struct file *file, struct page *page, 447 unsigned offset, unsigned to) 448 { 449 /* No op */ 450 return 0; 451 } 452 453 static int fuse_commit_write(struct file *file, struct page *page, 454 unsigned offset, unsigned to) 455 { 456 int err; 457 size_t nres; 458 unsigned count = to - offset; 459 struct inode *inode = page->mapping->host; 460 struct fuse_conn *fc = get_fuse_conn(inode); 461 loff_t pos = page_offset(page) + offset; 462 struct fuse_req *req; 463 464 if (is_bad_inode(inode)) 465 return -EIO; 466 467 req = fuse_get_req(fc); 468 if (IS_ERR(req)) 469 return PTR_ERR(req); 470 471 req->num_pages = 1; 472 req->pages[0] = page; 473 req->page_offset = offset; 474 nres = fuse_send_write(req, file, inode, pos, count); 475 err = req->out.h.error; 476 fuse_put_request(fc, req); 477 if (!err && nres != count) 478 err = -EIO; 479 if (!err) { 480 pos += count; 481 spin_lock(&fc->lock); 482 if (pos > inode->i_size) 483 i_size_write(inode, pos); 484 spin_unlock(&fc->lock); 485 486 if (offset == 0 && to == PAGE_CACHE_SIZE) { 487 clear_page_dirty(page); 488 SetPageUptodate(page); 489 } 490 } 491 fuse_invalidate_attr(inode); 492 return err; 493 } 494 495 static void fuse_release_user_pages(struct fuse_req *req, int write) 496 { 497 unsigned i; 498 499 for (i = 0; i < req->num_pages; i++) { 500 struct page *page = req->pages[i]; 501 if (write) 502 set_page_dirty_lock(page); 503 put_page(page); 504 } 505 } 506 507 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 508 unsigned nbytes, int write) 509 { 510 unsigned long user_addr = (unsigned long) buf; 511 unsigned offset = user_addr & ~PAGE_MASK; 512 int npages; 513 514 /* This doesn't work with nfsd */ 515 if (!current->mm) 516 return -EPERM; 517 518 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 519 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 520 npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ); 521 down_read(¤t->mm->mmap_sem); 522 npages = get_user_pages(current, current->mm, user_addr, npages, write, 523 0, req->pages, NULL); 524 up_read(¤t->mm->mmap_sem); 525 if (npages < 0) 526 return npages; 527 528 req->num_pages = npages; 529 req->page_offset = offset; 530 return 0; 531 } 532 533 static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 534 size_t count, loff_t *ppos, int write) 535 { 536 struct inode *inode = file->f_path.dentry->d_inode; 537 struct fuse_conn *fc = get_fuse_conn(inode); 538 size_t nmax = write ? fc->max_write : fc->max_read; 539 loff_t pos = *ppos; 540 ssize_t res = 0; 541 struct fuse_req *req; 542 543 if (is_bad_inode(inode)) 544 return -EIO; 545 546 req = fuse_get_req(fc); 547 if (IS_ERR(req)) 548 return PTR_ERR(req); 549 550 while (count) { 551 size_t nres; 552 size_t nbytes = min(count, nmax); 553 int err = fuse_get_user_pages(req, buf, nbytes, !write); 554 if (err) { 555 res = err; 556 break; 557 } 558 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 559 nbytes = min(count, nbytes); 560 if (write) 561 nres = fuse_send_write(req, file, inode, pos, nbytes); 562 else 563 nres = fuse_send_read(req, file, inode, pos, nbytes); 564 fuse_release_user_pages(req, !write); 565 if (req->out.h.error) { 566 if (!res) 567 res = req->out.h.error; 568 break; 569 } else if (nres > nbytes) { 570 res = -EIO; 571 break; 572 } 573 count -= nres; 574 res += nres; 575 pos += nres; 576 buf += nres; 577 if (nres != nbytes) 578 break; 579 if (count) { 580 fuse_put_request(fc, req); 581 req = fuse_get_req(fc); 582 if (IS_ERR(req)) 583 break; 584 } 585 } 586 fuse_put_request(fc, req); 587 if (res > 0) { 588 if (write) { 589 spin_lock(&fc->lock); 590 if (pos > inode->i_size) 591 i_size_write(inode, pos); 592 spin_unlock(&fc->lock); 593 } 594 *ppos = pos; 595 } 596 fuse_invalidate_attr(inode); 597 598 return res; 599 } 600 601 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 602 size_t count, loff_t *ppos) 603 { 604 return fuse_direct_io(file, buf, count, ppos, 0); 605 } 606 607 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 608 size_t count, loff_t *ppos) 609 { 610 struct inode *inode = file->f_path.dentry->d_inode; 611 ssize_t res; 612 /* Don't allow parallel writes to the same file */ 613 mutex_lock(&inode->i_mutex); 614 res = fuse_direct_io(file, buf, count, ppos, 1); 615 mutex_unlock(&inode->i_mutex); 616 return res; 617 } 618 619 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 620 { 621 if ((vma->vm_flags & VM_SHARED)) { 622 if ((vma->vm_flags & VM_WRITE)) 623 return -ENODEV; 624 else 625 vma->vm_flags &= ~VM_MAYWRITE; 626 } 627 return generic_file_mmap(file, vma); 628 } 629 630 static int fuse_set_page_dirty(struct page *page) 631 { 632 printk("fuse_set_page_dirty: should not happen\n"); 633 dump_stack(); 634 return 0; 635 } 636 637 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 638 struct file_lock *fl) 639 { 640 switch (ffl->type) { 641 case F_UNLCK: 642 break; 643 644 case F_RDLCK: 645 case F_WRLCK: 646 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 647 ffl->end < ffl->start) 648 return -EIO; 649 650 fl->fl_start = ffl->start; 651 fl->fl_end = ffl->end; 652 fl->fl_pid = ffl->pid; 653 break; 654 655 default: 656 return -EIO; 657 } 658 fl->fl_type = ffl->type; 659 return 0; 660 } 661 662 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 663 const struct file_lock *fl, int opcode, pid_t pid) 664 { 665 struct inode *inode = file->f_path.dentry->d_inode; 666 struct fuse_conn *fc = get_fuse_conn(inode); 667 struct fuse_file *ff = file->private_data; 668 struct fuse_lk_in *arg = &req->misc.lk_in; 669 670 arg->fh = ff->fh; 671 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 672 arg->lk.start = fl->fl_start; 673 arg->lk.end = fl->fl_end; 674 arg->lk.type = fl->fl_type; 675 arg->lk.pid = pid; 676 req->in.h.opcode = opcode; 677 req->in.h.nodeid = get_node_id(inode); 678 req->in.numargs = 1; 679 req->in.args[0].size = sizeof(*arg); 680 req->in.args[0].value = arg; 681 } 682 683 static int fuse_getlk(struct file *file, struct file_lock *fl) 684 { 685 struct inode *inode = file->f_path.dentry->d_inode; 686 struct fuse_conn *fc = get_fuse_conn(inode); 687 struct fuse_req *req; 688 struct fuse_lk_out outarg; 689 int err; 690 691 req = fuse_get_req(fc); 692 if (IS_ERR(req)) 693 return PTR_ERR(req); 694 695 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0); 696 req->out.numargs = 1; 697 req->out.args[0].size = sizeof(outarg); 698 req->out.args[0].value = &outarg; 699 request_send(fc, req); 700 err = req->out.h.error; 701 fuse_put_request(fc, req); 702 if (!err) 703 err = convert_fuse_file_lock(&outarg.lk, fl); 704 705 return err; 706 } 707 708 static int fuse_setlk(struct file *file, struct file_lock *fl) 709 { 710 struct inode *inode = file->f_path.dentry->d_inode; 711 struct fuse_conn *fc = get_fuse_conn(inode); 712 struct fuse_req *req; 713 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 714 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 715 int err; 716 717 /* Unlock on close is handled by the flush method */ 718 if (fl->fl_flags & FL_CLOSE) 719 return 0; 720 721 req = fuse_get_req(fc); 722 if (IS_ERR(req)) 723 return PTR_ERR(req); 724 725 fuse_lk_fill(req, file, fl, opcode, pid); 726 request_send(fc, req); 727 err = req->out.h.error; 728 /* locking is restartable */ 729 if (err == -EINTR) 730 err = -ERESTARTSYS; 731 fuse_put_request(fc, req); 732 return err; 733 } 734 735 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 736 { 737 struct inode *inode = file->f_path.dentry->d_inode; 738 struct fuse_conn *fc = get_fuse_conn(inode); 739 int err; 740 741 if (cmd == F_GETLK) { 742 if (fc->no_lock) { 743 if (!posix_test_lock(file, fl, fl)) 744 fl->fl_type = F_UNLCK; 745 err = 0; 746 } else 747 err = fuse_getlk(file, fl); 748 } else { 749 if (fc->no_lock) 750 err = posix_lock_file_wait(file, fl); 751 else 752 err = fuse_setlk(file, fl); 753 } 754 return err; 755 } 756 757 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 758 { 759 struct inode *inode = mapping->host; 760 struct fuse_conn *fc = get_fuse_conn(inode); 761 struct fuse_req *req; 762 struct fuse_bmap_in inarg; 763 struct fuse_bmap_out outarg; 764 int err; 765 766 if (!inode->i_sb->s_bdev || fc->no_bmap) 767 return 0; 768 769 req = fuse_get_req(fc); 770 if (IS_ERR(req)) 771 return 0; 772 773 memset(&inarg, 0, sizeof(inarg)); 774 inarg.block = block; 775 inarg.blocksize = inode->i_sb->s_blocksize; 776 req->in.h.opcode = FUSE_BMAP; 777 req->in.h.nodeid = get_node_id(inode); 778 req->in.numargs = 1; 779 req->in.args[0].size = sizeof(inarg); 780 req->in.args[0].value = &inarg; 781 req->out.numargs = 1; 782 req->out.args[0].size = sizeof(outarg); 783 req->out.args[0].value = &outarg; 784 request_send(fc, req); 785 err = req->out.h.error; 786 fuse_put_request(fc, req); 787 if (err == -ENOSYS) 788 fc->no_bmap = 1; 789 790 return err ? 0 : outarg.block; 791 } 792 793 static const struct file_operations fuse_file_operations = { 794 .llseek = generic_file_llseek, 795 .read = do_sync_read, 796 .aio_read = generic_file_aio_read, 797 .write = do_sync_write, 798 .aio_write = generic_file_aio_write, 799 .mmap = fuse_file_mmap, 800 .open = fuse_open, 801 .flush = fuse_flush, 802 .release = fuse_release, 803 .fsync = fuse_fsync, 804 .lock = fuse_file_lock, 805 .sendfile = generic_file_sendfile, 806 }; 807 808 static const struct file_operations fuse_direct_io_file_operations = { 809 .llseek = generic_file_llseek, 810 .read = fuse_direct_read, 811 .write = fuse_direct_write, 812 .open = fuse_open, 813 .flush = fuse_flush, 814 .release = fuse_release, 815 .fsync = fuse_fsync, 816 .lock = fuse_file_lock, 817 /* no mmap and sendfile */ 818 }; 819 820 static const struct address_space_operations fuse_file_aops = { 821 .readpage = fuse_readpage, 822 .prepare_write = fuse_prepare_write, 823 .commit_write = fuse_commit_write, 824 .readpages = fuse_readpages, 825 .set_page_dirty = fuse_set_page_dirty, 826 .bmap = fuse_bmap, 827 }; 828 829 void fuse_init_file_inode(struct inode *inode) 830 { 831 inode->i_fop = &fuse_file_operations; 832 inode->i_data.a_ops = &fuse_file_aops; 833 } 834