1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/namei.h> 8 #include <linux/writeback.h> 9 10 #include "super.h" 11 #include "mds_client.h" 12 13 /* 14 * Ceph file operations 15 * 16 * Implement basic open/close functionality, and implement 17 * read/write. 18 * 19 * We implement three modes of file I/O: 20 * - buffered uses the generic_file_aio_{read,write} helpers 21 * 22 * - synchronous is used when there is multi-client read/write 23 * sharing, avoids the page cache, and synchronously waits for an 24 * ack from the OSD. 25 * 26 * - direct io takes the variant of the sync path that references 27 * user pages directly. 28 * 29 * fsync() flushes and waits on dirty pages, but just queues metadata 30 * for writeback: since the MDS can recover size and mtime there is no 31 * need to wait for MDS acknowledgement. 32 */ 33 34 35 /* 36 * Prepare an open request. Preallocate ceph_cap to avoid an 37 * inopportune ENOMEM later. 38 */ 39 static struct ceph_mds_request * 40 prepare_open_request(struct super_block *sb, int flags, int create_mode) 41 { 42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 43 struct ceph_mds_client *mdsc = fsc->mdsc; 44 struct ceph_mds_request *req; 45 int want_auth = USE_ANY_MDS; 46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 47 48 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 49 want_auth = USE_AUTH_MDS; 50 51 req = ceph_mdsc_create_request(mdsc, op, want_auth); 52 if (IS_ERR(req)) 53 goto out; 54 req->r_fmode = ceph_flags_to_mode(flags); 55 req->r_args.open.flags = cpu_to_le32(flags); 56 req->r_args.open.mode = cpu_to_le32(create_mode); 57 req->r_args.open.preferred = cpu_to_le32(-1); 58 out: 59 return req; 60 } 61 62 /* 63 * initialize private struct file data. 64 * if we fail, clean up by dropping fmode reference on the ceph_inode 65 */ 66 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 67 { 68 struct ceph_file_info *cf; 69 int ret = 0; 70 71 switch (inode->i_mode & S_IFMT) { 72 case S_IFREG: 73 case S_IFDIR: 74 dout("init_file %p %p 0%o (regular)\n", inode, file, 75 inode->i_mode); 76 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); 77 if (cf == NULL) { 78 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 79 return -ENOMEM; 80 } 81 cf->fmode = fmode; 82 cf->next_offset = 2; 83 file->private_data = cf; 84 BUG_ON(inode->i_fop->release != ceph_release); 85 break; 86 87 case S_IFLNK: 88 dout("init_file %p %p 0%o (symlink)\n", inode, file, 89 inode->i_mode); 90 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 91 break; 92 93 default: 94 dout("init_file %p %p 0%o (special)\n", inode, file, 95 inode->i_mode); 96 /* 97 * we need to drop the open ref now, since we don't 98 * have .release set to ceph_release. 99 */ 100 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 101 BUG_ON(inode->i_fop->release == ceph_release); 102 103 /* call the proper open fop */ 104 ret = inode->i_fop->open(inode, file); 105 } 106 return ret; 107 } 108 109 /* 110 * If the filp already has private_data, that means the file was 111 * already opened by intent during lookup, and we do nothing. 112 * 113 * If we already have the requisite capabilities, we can satisfy 114 * the open request locally (no need to request new caps from the 115 * MDS). We do, however, need to inform the MDS (asynchronously) 116 * if our wanted caps set expands. 117 */ 118 int ceph_open(struct inode *inode, struct file *file) 119 { 120 struct ceph_inode_info *ci = ceph_inode(inode); 121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 122 struct ceph_mds_client *mdsc = fsc->mdsc; 123 struct ceph_mds_request *req; 124 struct ceph_file_info *cf = file->private_data; 125 struct inode *parent_inode = file->f_dentry->d_parent->d_inode; 126 int err; 127 int flags, fmode, wanted; 128 129 if (cf) { 130 dout("open file %p is already opened\n", file); 131 return 0; 132 } 133 134 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 135 flags = file->f_flags & ~(O_CREAT|O_EXCL); 136 if (S_ISDIR(inode->i_mode)) 137 flags = O_DIRECTORY; /* mds likes to know */ 138 139 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 140 ceph_vinop(inode), file, flags, file->f_flags); 141 fmode = ceph_flags_to_mode(flags); 142 wanted = ceph_caps_for_mode(fmode); 143 144 /* snapped files are read-only */ 145 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 146 return -EROFS; 147 148 /* trivially open snapdir */ 149 if (ceph_snap(inode) == CEPH_SNAPDIR) { 150 spin_lock(&inode->i_lock); 151 __ceph_get_fmode(ci, fmode); 152 spin_unlock(&inode->i_lock); 153 return ceph_init_file(inode, file, fmode); 154 } 155 156 /* 157 * No need to block if we have caps on the auth MDS (for 158 * write) or any MDS (for read). Update wanted set 159 * asynchronously. 160 */ 161 spin_lock(&inode->i_lock); 162 if (__ceph_is_any_real_caps(ci) && 163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 164 int mds_wanted = __ceph_caps_mds_wanted(ci); 165 int issued = __ceph_caps_issued(ci, NULL); 166 167 dout("open %p fmode %d want %s issued %s using existing\n", 168 inode, fmode, ceph_cap_string(wanted), 169 ceph_cap_string(issued)); 170 __ceph_get_fmode(ci, fmode); 171 spin_unlock(&inode->i_lock); 172 173 /* adjust wanted? */ 174 if ((issued & wanted) != wanted && 175 (mds_wanted & wanted) != wanted && 176 ceph_snap(inode) != CEPH_SNAPDIR) 177 ceph_check_caps(ci, 0, NULL); 178 179 return ceph_init_file(inode, file, fmode); 180 } else if (ceph_snap(inode) != CEPH_NOSNAP && 181 (ci->i_snap_caps & wanted) == wanted) { 182 __ceph_get_fmode(ci, fmode); 183 spin_unlock(&inode->i_lock); 184 return ceph_init_file(inode, file, fmode); 185 } 186 spin_unlock(&inode->i_lock); 187 188 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 189 req = prepare_open_request(inode->i_sb, flags, 0); 190 if (IS_ERR(req)) { 191 err = PTR_ERR(req); 192 goto out; 193 } 194 req->r_inode = inode; 195 ihold(inode); 196 req->r_num_caps = 1; 197 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 198 if (!err) 199 err = ceph_init_file(inode, file, req->r_fmode); 200 ceph_mdsc_put_request(req); 201 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 202 out: 203 return err; 204 } 205 206 207 /* 208 * Do a lookup + open with a single request. 209 * 210 * If this succeeds, but some subsequent check in the vfs 211 * may_open() fails, the struct *file gets cleaned up (i.e. 212 * ceph_release gets called). So fear not! 213 */ 214 /* 215 * flags 216 * path_lookup_open -> LOOKUP_OPEN 217 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE 218 */ 219 struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, 220 struct nameidata *nd, int mode, 221 int locked_dir) 222 { 223 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 224 struct ceph_mds_client *mdsc = fsc->mdsc; 225 struct file *file = nd->intent.open.file; 226 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); 227 struct ceph_mds_request *req; 228 int err; 229 int flags = nd->intent.open.flags - 1; /* silly vfs! */ 230 231 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", 232 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); 233 234 /* do the open */ 235 req = prepare_open_request(dir->i_sb, flags, mode); 236 if (IS_ERR(req)) 237 return ERR_CAST(req); 238 req->r_dentry = dget(dentry); 239 req->r_num_caps = 2; 240 if (flags & O_CREAT) { 241 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 242 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 243 } 244 req->r_locked_dir = dir; /* caller holds dir->i_mutex */ 245 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 246 dentry = ceph_finish_lookup(req, dentry, err); 247 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 248 err = ceph_handle_notrace_create(dir, dentry); 249 if (!err) 250 err = ceph_init_file(req->r_dentry->d_inode, file, 251 req->r_fmode); 252 ceph_mdsc_put_request(req); 253 dout("ceph_lookup_open result=%p\n", dentry); 254 return dentry; 255 } 256 257 int ceph_release(struct inode *inode, struct file *file) 258 { 259 struct ceph_inode_info *ci = ceph_inode(inode); 260 struct ceph_file_info *cf = file->private_data; 261 262 dout("release inode %p file %p\n", inode, file); 263 ceph_put_fmode(ci, cf->fmode); 264 if (cf->last_readdir) 265 ceph_mdsc_put_request(cf->last_readdir); 266 kfree(cf->last_name); 267 kfree(cf->dir_info); 268 dput(cf->dentry); 269 kmem_cache_free(ceph_file_cachep, cf); 270 271 /* wake up anyone waiting for caps on this inode */ 272 wake_up_all(&ci->i_cap_wq); 273 return 0; 274 } 275 276 /* 277 * Read a range of bytes striped over one or more objects. Iterate over 278 * objects we stripe over. (That's not atomic, but good enough for now.) 279 * 280 * If we get a short result from the OSD, check against i_size; we need to 281 * only return a short read to the caller if we hit EOF. 282 */ 283 static int striped_read(struct inode *inode, 284 u64 off, u64 len, 285 struct page **pages, int num_pages, 286 int *checkeof, bool o_direct, 287 unsigned long buf_align) 288 { 289 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 290 struct ceph_inode_info *ci = ceph_inode(inode); 291 u64 pos, this_len; 292 int io_align, page_align; 293 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ 294 int left, pages_left; 295 int read; 296 struct page **page_pos; 297 int ret; 298 bool hit_stripe, was_short; 299 300 /* 301 * we may need to do multiple reads. not atomic, unfortunately. 302 */ 303 pos = off; 304 left = len; 305 page_pos = pages; 306 pages_left = num_pages; 307 read = 0; 308 io_align = off & ~PAGE_MASK; 309 310 more: 311 if (o_direct) 312 page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 313 else 314 page_align = pos & ~PAGE_MASK; 315 this_len = left; 316 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 317 &ci->i_layout, pos, &this_len, 318 ci->i_truncate_seq, 319 ci->i_truncate_size, 320 page_pos, pages_left, page_align); 321 if (ret == -ENOENT) 322 ret = 0; 323 hit_stripe = this_len < left; 324 was_short = ret >= 0 && ret < this_len; 325 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, 326 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 327 328 if (ret > 0) { 329 int didpages = 330 ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; 331 332 if (read < pos - off) { 333 dout(" zero gap %llu to %llu\n", off + read, pos); 334 ceph_zero_page_vector_range(page_off + read, 335 pos - off - read, pages); 336 } 337 pos += ret; 338 read = pos - off; 339 left -= ret; 340 page_pos += didpages; 341 pages_left -= didpages; 342 343 /* hit stripe? */ 344 if (left && hit_stripe) 345 goto more; 346 } 347 348 if (was_short) { 349 /* did we bounce off eof? */ 350 if (pos + left > inode->i_size) 351 *checkeof = 1; 352 353 /* zero trailing bytes (inside i_size) */ 354 if (left > 0 && pos < inode->i_size) { 355 if (pos + left > inode->i_size) 356 left = inode->i_size - pos; 357 358 dout("zero tail %d\n", left); 359 ceph_zero_page_vector_range(page_off + read, left, 360 pages); 361 read += left; 362 } 363 } 364 365 if (ret >= 0) 366 ret = read; 367 dout("striped_read returns %d\n", ret); 368 return ret; 369 } 370 371 /* 372 * Completely synchronous read and write methods. Direct from __user 373 * buffer to osd, or directly to user pages (if O_DIRECT). 374 * 375 * If the read spans object boundary, just do multiple reads. 376 */ 377 static ssize_t ceph_sync_read(struct file *file, char __user *data, 378 unsigned len, loff_t *poff, int *checkeof) 379 { 380 struct inode *inode = file->f_dentry->d_inode; 381 struct page **pages; 382 u64 off = *poff; 383 int num_pages, ret; 384 385 dout("sync_read on file %p %llu~%u %s\n", file, off, len, 386 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 387 388 if (file->f_flags & O_DIRECT) { 389 num_pages = calc_pages_for((unsigned long)data, len); 390 pages = ceph_get_direct_page_vector(data, num_pages, true); 391 } else { 392 num_pages = calc_pages_for(off, len); 393 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 394 } 395 if (IS_ERR(pages)) 396 return PTR_ERR(pages); 397 398 /* 399 * flush any page cache pages in this range. this 400 * will make concurrent normal and sync io slow, 401 * but it will at least behave sensibly when they are 402 * in sequence. 403 */ 404 ret = filemap_write_and_wait(inode->i_mapping); 405 if (ret < 0) 406 goto done; 407 408 ret = striped_read(inode, off, len, pages, num_pages, checkeof, 409 file->f_flags & O_DIRECT, 410 (unsigned long)data & ~PAGE_MASK); 411 412 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 413 ret = ceph_copy_page_vector_to_user(pages, data, off, ret); 414 if (ret >= 0) 415 *poff = off + ret; 416 417 done: 418 if (file->f_flags & O_DIRECT) 419 ceph_put_page_vector(pages, num_pages, true); 420 else 421 ceph_release_page_vector(pages, num_pages); 422 dout("sync_read result %d\n", ret); 423 return ret; 424 } 425 426 /* 427 * Write commit callback, called if we requested both an ACK and 428 * ONDISK commit reply from the OSD. 429 */ 430 static void sync_write_commit(struct ceph_osd_request *req, 431 struct ceph_msg *msg) 432 { 433 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 434 435 dout("sync_write_commit %p tid %llu\n", req, req->r_tid); 436 spin_lock(&ci->i_unsafe_lock); 437 list_del_init(&req->r_unsafe_item); 438 spin_unlock(&ci->i_unsafe_lock); 439 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 440 } 441 442 /* 443 * Synchronous write, straight from __user pointer or user pages (if 444 * O_DIRECT). 445 * 446 * If write spans object boundary, just do multiple writes. (For a 447 * correct atomic write, we should e.g. take write locks on all 448 * objects, rollback on failure, etc.) 449 */ 450 static ssize_t ceph_sync_write(struct file *file, const char __user *data, 451 size_t left, loff_t *offset) 452 { 453 struct inode *inode = file->f_dentry->d_inode; 454 struct ceph_inode_info *ci = ceph_inode(inode); 455 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 456 struct ceph_osd_request *req; 457 struct page **pages; 458 int num_pages; 459 long long unsigned pos; 460 u64 len; 461 int written = 0; 462 int flags; 463 int do_sync = 0; 464 int check_caps = 0; 465 int page_align, io_align; 466 unsigned long buf_align; 467 int ret; 468 struct timespec mtime = CURRENT_TIME; 469 470 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) 471 return -EROFS; 472 473 dout("sync_write on file %p %lld~%u %s\n", file, *offset, 474 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 475 476 if (file->f_flags & O_APPEND) 477 pos = i_size_read(inode); 478 else 479 pos = *offset; 480 481 io_align = pos & ~PAGE_MASK; 482 buf_align = (unsigned long)data & ~PAGE_MASK; 483 484 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 485 if (ret < 0) 486 return ret; 487 488 ret = invalidate_inode_pages2_range(inode->i_mapping, 489 pos >> PAGE_CACHE_SHIFT, 490 (pos + left) >> PAGE_CACHE_SHIFT); 491 if (ret < 0) 492 dout("invalidate_inode_pages2_range returned %d\n", ret); 493 494 flags = CEPH_OSD_FLAG_ORDERSNAP | 495 CEPH_OSD_FLAG_ONDISK | 496 CEPH_OSD_FLAG_WRITE; 497 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) 498 flags |= CEPH_OSD_FLAG_ACK; 499 else 500 do_sync = 1; 501 502 /* 503 * we may need to do multiple writes here if we span an object 504 * boundary. this isn't atomic, unfortunately. :( 505 */ 506 more: 507 len = left; 508 if (file->f_flags & O_DIRECT) { 509 /* write from beginning of first page, regardless of 510 io alignment */ 511 page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 512 num_pages = calc_pages_for((unsigned long)data, len); 513 } else { 514 page_align = pos & ~PAGE_MASK; 515 num_pages = calc_pages_for(pos, len); 516 } 517 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 518 ceph_vino(inode), pos, &len, 519 CEPH_OSD_OP_WRITE, flags, 520 ci->i_snap_realm->cached_context, 521 do_sync, 522 ci->i_truncate_seq, ci->i_truncate_size, 523 &mtime, false, 2, page_align); 524 if (!req) 525 return -ENOMEM; 526 527 if (file->f_flags & O_DIRECT) { 528 pages = ceph_get_direct_page_vector(data, num_pages, false); 529 if (IS_ERR(pages)) { 530 ret = PTR_ERR(pages); 531 goto out; 532 } 533 534 /* 535 * throw out any page cache pages in this range. this 536 * may block. 537 */ 538 truncate_inode_pages_range(inode->i_mapping, pos, 539 (pos+len) | (PAGE_CACHE_SIZE-1)); 540 } else { 541 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 542 if (IS_ERR(pages)) { 543 ret = PTR_ERR(pages); 544 goto out; 545 } 546 ret = ceph_copy_user_to_page_vector(pages, data, pos, len); 547 if (ret < 0) { 548 ceph_release_page_vector(pages, num_pages); 549 goto out; 550 } 551 552 if ((file->f_flags & O_SYNC) == 0) { 553 /* get a second commit callback */ 554 req->r_safe_callback = sync_write_commit; 555 req->r_own_pages = 1; 556 } 557 } 558 req->r_pages = pages; 559 req->r_num_pages = num_pages; 560 req->r_inode = inode; 561 562 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 563 if (!ret) { 564 if (req->r_safe_callback) { 565 /* 566 * Add to inode unsafe list only after we 567 * start_request so that a tid has been assigned. 568 */ 569 spin_lock(&ci->i_unsafe_lock); 570 list_add_tail(&req->r_unsafe_item, 571 &ci->i_unsafe_writes); 572 spin_unlock(&ci->i_unsafe_lock); 573 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 574 } 575 576 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 577 if (ret < 0 && req->r_safe_callback) { 578 spin_lock(&ci->i_unsafe_lock); 579 list_del_init(&req->r_unsafe_item); 580 spin_unlock(&ci->i_unsafe_lock); 581 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 582 } 583 } 584 585 if (file->f_flags & O_DIRECT) 586 ceph_put_page_vector(pages, num_pages, false); 587 else if (file->f_flags & O_SYNC) 588 ceph_release_page_vector(pages, num_pages); 589 590 out: 591 ceph_osdc_put_request(req); 592 if (ret == 0) { 593 pos += len; 594 written += len; 595 left -= len; 596 if (left) 597 goto more; 598 599 ret = written; 600 *offset = pos; 601 if (pos > i_size_read(inode)) 602 check_caps = ceph_inode_set_size(inode, pos); 603 if (check_caps) 604 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, 605 NULL); 606 } 607 return ret; 608 } 609 610 /* 611 * Wrap generic_file_aio_read with checks for cap bits on the inode. 612 * Atomically grab references, so that those bits are not released 613 * back to the MDS mid-read. 614 * 615 * Hmm, the sync read case isn't actually async... should it be? 616 */ 617 static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, 618 unsigned long nr_segs, loff_t pos) 619 { 620 struct file *filp = iocb->ki_filp; 621 struct ceph_file_info *fi = filp->private_data; 622 loff_t *ppos = &iocb->ki_pos; 623 size_t len = iov->iov_len; 624 struct inode *inode = filp->f_dentry->d_inode; 625 struct ceph_inode_info *ci = ceph_inode(inode); 626 void __user *base = iov->iov_base; 627 ssize_t ret; 628 int want, got = 0; 629 int checkeof = 0, read = 0; 630 631 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 632 inode, ceph_vinop(inode), pos, (unsigned)len, inode); 633 again: 634 __ceph_do_pending_vmtruncate(inode); 635 if (fi->fmode & CEPH_FILE_MODE_LAZY) 636 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 637 else 638 want = CEPH_CAP_FILE_CACHE; 639 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); 640 if (ret < 0) 641 goto out; 642 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 643 inode, ceph_vinop(inode), pos, (unsigned)len, 644 ceph_cap_string(got)); 645 646 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 647 (iocb->ki_filp->f_flags & O_DIRECT) || 648 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) 649 /* hmm, this isn't really async... */ 650 ret = ceph_sync_read(filp, base, len, ppos, &checkeof); 651 else 652 ret = generic_file_aio_read(iocb, iov, nr_segs, pos); 653 654 out: 655 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 656 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 657 ceph_put_cap_refs(ci, got); 658 659 if (checkeof && ret >= 0) { 660 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 661 662 /* hit EOF or hole? */ 663 if (statret == 0 && *ppos < inode->i_size) { 664 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size); 665 read += ret; 666 base += ret; 667 len -= ret; 668 checkeof = 0; 669 goto again; 670 } 671 } 672 if (ret >= 0) 673 ret += read; 674 675 return ret; 676 } 677 678 /* 679 * Take cap references to avoid releasing caps to MDS mid-write. 680 * 681 * If we are synchronous, and write with an old snap context, the OSD 682 * may return EOLDSNAPC. In that case, retry the write.. _after_ 683 * dropping our cap refs and allowing the pending snap to logically 684 * complete _before_ this write occurs. 685 * 686 * If we are near ENOSPC, write synchronously. 687 */ 688 static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, 689 unsigned long nr_segs, loff_t pos) 690 { 691 struct file *file = iocb->ki_filp; 692 struct ceph_file_info *fi = file->private_data; 693 struct inode *inode = file->f_dentry->d_inode; 694 struct ceph_inode_info *ci = ceph_inode(inode); 695 struct ceph_osd_client *osdc = 696 &ceph_sb_to_client(inode->i_sb)->client->osdc; 697 loff_t endoff = pos + iov->iov_len; 698 int want, got = 0; 699 int ret, err; 700 701 if (ceph_snap(inode) != CEPH_NOSNAP) 702 return -EROFS; 703 704 retry_snap: 705 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) 706 return -ENOSPC; 707 __ceph_do_pending_vmtruncate(inode); 708 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", 709 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, 710 inode->i_size); 711 if (fi->fmode & CEPH_FILE_MODE_LAZY) 712 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 713 else 714 want = CEPH_CAP_FILE_BUFFER; 715 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); 716 if (ret < 0) 717 goto out; 718 719 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", 720 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, 721 ceph_cap_string(got)); 722 723 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 724 (iocb->ki_filp->f_flags & O_DIRECT) || 725 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) { 726 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, 727 &iocb->ki_pos); 728 } else { 729 ret = generic_file_aio_write(iocb, iov, nr_segs, pos); 730 731 if ((ret >= 0 || ret == -EIOCBQUEUED) && 732 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) 733 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 734 err = vfs_fsync_range(file, pos, pos + ret - 1, 1); 735 if (err < 0) 736 ret = err; 737 } 738 } 739 if (ret >= 0) { 740 int dirty; 741 spin_lock(&inode->i_lock); 742 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 743 spin_unlock(&inode->i_lock); 744 if (dirty) 745 __mark_inode_dirty(inode, dirty); 746 } 747 748 out: 749 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 750 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, 751 ceph_cap_string(got)); 752 ceph_put_cap_refs(ci, got); 753 754 if (ret == -EOLDSNAPC) { 755 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", 756 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); 757 goto retry_snap; 758 } 759 760 return ret; 761 } 762 763 /* 764 * llseek. be sure to verify file size on SEEK_END. 765 */ 766 static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) 767 { 768 struct inode *inode = file->f_mapping->host; 769 int ret; 770 771 mutex_lock(&inode->i_mutex); 772 __ceph_do_pending_vmtruncate(inode); 773 switch (origin) { 774 case SEEK_END: 775 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 776 if (ret < 0) { 777 offset = ret; 778 goto out; 779 } 780 offset += inode->i_size; 781 break; 782 case SEEK_CUR: 783 /* 784 * Here we special-case the lseek(fd, 0, SEEK_CUR) 785 * position-querying operation. Avoid rewriting the "same" 786 * f_pos value back to the file because a concurrent read(), 787 * write() or lseek() might have altered it 788 */ 789 if (offset == 0) { 790 offset = file->f_pos; 791 goto out; 792 } 793 offset += file->f_pos; 794 break; 795 } 796 797 if (offset < 0 || offset > inode->i_sb->s_maxbytes) { 798 offset = -EINVAL; 799 goto out; 800 } 801 802 /* Special lock needed here? */ 803 if (offset != file->f_pos) { 804 file->f_pos = offset; 805 file->f_version = 0; 806 } 807 808 out: 809 mutex_unlock(&inode->i_mutex); 810 return offset; 811 } 812 813 const struct file_operations ceph_file_fops = { 814 .open = ceph_open, 815 .release = ceph_release, 816 .llseek = ceph_llseek, 817 .read = do_sync_read, 818 .write = do_sync_write, 819 .aio_read = ceph_aio_read, 820 .aio_write = ceph_aio_write, 821 .mmap = ceph_mmap, 822 .fsync = ceph_fsync, 823 .lock = ceph_lock, 824 .flock = ceph_flock, 825 .splice_read = generic_file_splice_read, 826 .splice_write = generic_file_splice_write, 827 .unlocked_ioctl = ceph_ioctl, 828 .compat_ioctl = ceph_ioctl, 829 }; 830 831