1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/mount.h> 8 #include <linux/namei.h> 9 #include <linux/writeback.h> 10 #include <linux/aio.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 15 /* 16 * Ceph file operations 17 * 18 * Implement basic open/close functionality, and implement 19 * read/write. 20 * 21 * We implement three modes of file I/O: 22 * - buffered uses the generic_file_aio_{read,write} helpers 23 * 24 * - synchronous is used when there is multi-client read/write 25 * sharing, avoids the page cache, and synchronously waits for an 26 * ack from the OSD. 27 * 28 * - direct io takes the variant of the sync path that references 29 * user pages directly. 30 * 31 * fsync() flushes and waits on dirty pages, but just queues metadata 32 * for writeback: since the MDS can recover size and mtime there is no 33 * need to wait for MDS acknowledgement. 34 */ 35 36 37 /* 38 * Prepare an open request. Preallocate ceph_cap to avoid an 39 * inopportune ENOMEM later. 40 */ 41 static struct ceph_mds_request * 42 prepare_open_request(struct super_block *sb, int flags, int create_mode) 43 { 44 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 45 struct ceph_mds_client *mdsc = fsc->mdsc; 46 struct ceph_mds_request *req; 47 int want_auth = USE_ANY_MDS; 48 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 49 50 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 51 want_auth = USE_AUTH_MDS; 52 53 req = ceph_mdsc_create_request(mdsc, op, want_auth); 54 if (IS_ERR(req)) 55 goto out; 56 req->r_fmode = ceph_flags_to_mode(flags); 57 req->r_args.open.flags = cpu_to_le32(flags); 58 req->r_args.open.mode = cpu_to_le32(create_mode); 59 out: 60 return req; 61 } 62 63 /* 64 * initialize private struct file data. 65 * if we fail, clean up by dropping fmode reference on the ceph_inode 66 */ 67 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 68 { 69 struct ceph_file_info *cf; 70 int ret = 0; 71 72 switch (inode->i_mode & S_IFMT) { 73 case S_IFREG: 74 case S_IFDIR: 75 dout("init_file %p %p 0%o (regular)\n", inode, file, 76 inode->i_mode); 77 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); 78 if (cf == NULL) { 79 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 80 return -ENOMEM; 81 } 82 cf->fmode = fmode; 83 cf->next_offset = 2; 84 file->private_data = cf; 85 BUG_ON(inode->i_fop->release != ceph_release); 86 break; 87 88 case S_IFLNK: 89 dout("init_file %p %p 0%o (symlink)\n", inode, file, 90 inode->i_mode); 91 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 92 break; 93 94 default: 95 dout("init_file %p %p 0%o (special)\n", inode, file, 96 inode->i_mode); 97 /* 98 * we need to drop the open ref now, since we don't 99 * have .release set to ceph_release. 100 */ 101 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 102 BUG_ON(inode->i_fop->release == ceph_release); 103 104 /* call the proper open fop */ 105 ret = inode->i_fop->open(inode, file); 106 } 107 return ret; 108 } 109 110 /* 111 * If we already have the requisite capabilities, we can satisfy 112 * the open request locally (no need to request new caps from the 113 * MDS). We do, however, need to inform the MDS (asynchronously) 114 * if our wanted caps set expands. 115 */ 116 int ceph_open(struct inode *inode, struct file *file) 117 { 118 struct ceph_inode_info *ci = ceph_inode(inode); 119 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 120 struct ceph_mds_client *mdsc = fsc->mdsc; 121 struct ceph_mds_request *req; 122 struct ceph_file_info *cf = file->private_data; 123 struct inode *parent_inode = NULL; 124 int err; 125 int flags, fmode, wanted; 126 127 if (cf) { 128 dout("open file %p is already opened\n", file); 129 return 0; 130 } 131 132 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 133 flags = file->f_flags & ~(O_CREAT|O_EXCL); 134 if (S_ISDIR(inode->i_mode)) 135 flags = O_DIRECTORY; /* mds likes to know */ 136 137 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 138 ceph_vinop(inode), file, flags, file->f_flags); 139 fmode = ceph_flags_to_mode(flags); 140 wanted = ceph_caps_for_mode(fmode); 141 142 /* snapped files are read-only */ 143 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 144 return -EROFS; 145 146 /* trivially open snapdir */ 147 if (ceph_snap(inode) == CEPH_SNAPDIR) { 148 spin_lock(&ci->i_ceph_lock); 149 __ceph_get_fmode(ci, fmode); 150 spin_unlock(&ci->i_ceph_lock); 151 return ceph_init_file(inode, file, fmode); 152 } 153 154 /* 155 * No need to block if we have caps on the auth MDS (for 156 * write) or any MDS (for read). Update wanted set 157 * asynchronously. 158 */ 159 spin_lock(&ci->i_ceph_lock); 160 if (__ceph_is_any_real_caps(ci) && 161 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 162 int mds_wanted = __ceph_caps_mds_wanted(ci); 163 int issued = __ceph_caps_issued(ci, NULL); 164 165 dout("open %p fmode %d want %s issued %s using existing\n", 166 inode, fmode, ceph_cap_string(wanted), 167 ceph_cap_string(issued)); 168 __ceph_get_fmode(ci, fmode); 169 spin_unlock(&ci->i_ceph_lock); 170 171 /* adjust wanted? */ 172 if ((issued & wanted) != wanted && 173 (mds_wanted & wanted) != wanted && 174 ceph_snap(inode) != CEPH_SNAPDIR) 175 ceph_check_caps(ci, 0, NULL); 176 177 return ceph_init_file(inode, file, fmode); 178 } else if (ceph_snap(inode) != CEPH_NOSNAP && 179 (ci->i_snap_caps & wanted) == wanted) { 180 __ceph_get_fmode(ci, fmode); 181 spin_unlock(&ci->i_ceph_lock); 182 return ceph_init_file(inode, file, fmode); 183 } 184 spin_unlock(&ci->i_ceph_lock); 185 186 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 187 req = prepare_open_request(inode->i_sb, flags, 0); 188 if (IS_ERR(req)) { 189 err = PTR_ERR(req); 190 goto out; 191 } 192 req->r_inode = inode; 193 ihold(inode); 194 req->r_num_caps = 1; 195 if (flags & (O_CREAT|O_TRUNC)) 196 parent_inode = ceph_get_dentry_parent_inode(file->f_dentry); 197 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 198 iput(parent_inode); 199 if (!err) 200 err = ceph_init_file(inode, file, req->r_fmode); 201 ceph_mdsc_put_request(req); 202 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 203 out: 204 return err; 205 } 206 207 208 /* 209 * Do a lookup + open with a single request. If we get a non-existent 210 * file or symlink, return 1 so the VFS can retry. 211 */ 212 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 213 struct file *file, unsigned flags, umode_t mode, 214 int *opened) 215 { 216 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 217 struct ceph_mds_client *mdsc = fsc->mdsc; 218 struct ceph_mds_request *req; 219 struct dentry *dn; 220 int err; 221 222 dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n", 223 dir, dentry, dentry->d_name.len, dentry->d_name.name, 224 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 225 226 if (dentry->d_name.len > NAME_MAX) 227 return -ENAMETOOLONG; 228 229 err = ceph_init_dentry(dentry); 230 if (err < 0) 231 return err; 232 233 /* do the open */ 234 req = prepare_open_request(dir->i_sb, flags, mode); 235 if (IS_ERR(req)) 236 return PTR_ERR(req); 237 req->r_dentry = dget(dentry); 238 req->r_num_caps = 2; 239 if (flags & O_CREAT) { 240 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 241 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 242 } 243 req->r_locked_dir = dir; /* caller holds dir->i_mutex */ 244 err = ceph_mdsc_do_request(mdsc, 245 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 246 req); 247 if (err) 248 goto out_err; 249 250 err = ceph_handle_snapdir(req, dentry, err); 251 if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 252 err = ceph_handle_notrace_create(dir, dentry); 253 254 if (d_unhashed(dentry)) { 255 dn = ceph_finish_lookup(req, dentry, err); 256 if (IS_ERR(dn)) 257 err = PTR_ERR(dn); 258 } else { 259 /* we were given a hashed negative dentry */ 260 dn = NULL; 261 } 262 if (err) 263 goto out_err; 264 if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { 265 /* make vfs retry on splice, ENOENT, or symlink */ 266 dout("atomic_open finish_no_open on dn %p\n", dn); 267 err = finish_no_open(file, dn); 268 } else { 269 dout("atomic_open finish_open on dn %p\n", dn); 270 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 271 *opened |= FILE_CREATED; 272 } 273 err = finish_open(file, dentry, ceph_open, opened); 274 } 275 276 out_err: 277 ceph_mdsc_put_request(req); 278 dout("atomic_open result=%d\n", err); 279 return err; 280 } 281 282 int ceph_release(struct inode *inode, struct file *file) 283 { 284 struct ceph_inode_info *ci = ceph_inode(inode); 285 struct ceph_file_info *cf = file->private_data; 286 287 dout("release inode %p file %p\n", inode, file); 288 ceph_put_fmode(ci, cf->fmode); 289 if (cf->last_readdir) 290 ceph_mdsc_put_request(cf->last_readdir); 291 kfree(cf->last_name); 292 kfree(cf->dir_info); 293 dput(cf->dentry); 294 kmem_cache_free(ceph_file_cachep, cf); 295 296 /* wake up anyone waiting for caps on this inode */ 297 wake_up_all(&ci->i_cap_wq); 298 return 0; 299 } 300 301 /* 302 * Read a range of bytes striped over one or more objects. Iterate over 303 * objects we stripe over. (That's not atomic, but good enough for now.) 304 * 305 * If we get a short result from the OSD, check against i_size; we need to 306 * only return a short read to the caller if we hit EOF. 307 */ 308 static int striped_read(struct inode *inode, 309 u64 off, u64 len, 310 struct page **pages, int num_pages, 311 int *checkeof, bool o_direct, 312 unsigned long buf_align) 313 { 314 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 315 struct ceph_inode_info *ci = ceph_inode(inode); 316 u64 pos, this_len; 317 int io_align, page_align; 318 int left, pages_left; 319 int read; 320 struct page **page_pos; 321 int ret; 322 bool hit_stripe, was_short; 323 324 /* 325 * we may need to do multiple reads. not atomic, unfortunately. 326 */ 327 pos = off; 328 left = len; 329 page_pos = pages; 330 pages_left = num_pages; 331 read = 0; 332 io_align = off & ~PAGE_MASK; 333 334 more: 335 if (o_direct) 336 page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 337 else 338 page_align = pos & ~PAGE_MASK; 339 this_len = left; 340 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 341 &ci->i_layout, pos, &this_len, 342 ci->i_truncate_seq, 343 ci->i_truncate_size, 344 page_pos, pages_left, page_align); 345 if (ret == -ENOENT) 346 ret = 0; 347 hit_stripe = this_len < left; 348 was_short = ret >= 0 && ret < this_len; 349 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, 350 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 351 352 if (ret > 0) { 353 int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 354 355 if (read < pos - off) { 356 dout(" zero gap %llu to %llu\n", off + read, pos); 357 ceph_zero_page_vector_range(page_align + read, 358 pos - off - read, pages); 359 } 360 pos += ret; 361 read = pos - off; 362 left -= ret; 363 page_pos += didpages; 364 pages_left -= didpages; 365 366 /* hit stripe? */ 367 if (left && hit_stripe) 368 goto more; 369 } 370 371 if (was_short) { 372 /* did we bounce off eof? */ 373 if (pos + left > inode->i_size) 374 *checkeof = 1; 375 376 /* zero trailing bytes (inside i_size) */ 377 if (left > 0 && pos < inode->i_size) { 378 if (pos + left > inode->i_size) 379 left = inode->i_size - pos; 380 381 dout("zero tail %d\n", left); 382 ceph_zero_page_vector_range(page_align + read, left, 383 pages); 384 read += left; 385 } 386 } 387 388 if (ret >= 0) 389 ret = read; 390 dout("striped_read returns %d\n", ret); 391 return ret; 392 } 393 394 /* 395 * Completely synchronous read and write methods. Direct from __user 396 * buffer to osd, or directly to user pages (if O_DIRECT). 397 * 398 * If the read spans object boundary, just do multiple reads. 399 */ 400 static ssize_t ceph_sync_read(struct file *file, char __user *data, 401 unsigned len, loff_t *poff, int *checkeof) 402 { 403 struct inode *inode = file_inode(file); 404 struct page **pages; 405 u64 off = *poff; 406 int num_pages, ret; 407 408 dout("sync_read on file %p %llu~%u %s\n", file, off, len, 409 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 410 411 if (file->f_flags & O_DIRECT) { 412 num_pages = calc_pages_for((unsigned long)data, len); 413 pages = ceph_get_direct_page_vector(data, num_pages, true); 414 } else { 415 num_pages = calc_pages_for(off, len); 416 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 417 } 418 if (IS_ERR(pages)) 419 return PTR_ERR(pages); 420 421 /* 422 * flush any page cache pages in this range. this 423 * will make concurrent normal and sync io slow, 424 * but it will at least behave sensibly when they are 425 * in sequence. 426 */ 427 ret = filemap_write_and_wait(inode->i_mapping); 428 if (ret < 0) 429 goto done; 430 431 ret = striped_read(inode, off, len, pages, num_pages, checkeof, 432 file->f_flags & O_DIRECT, 433 (unsigned long)data & ~PAGE_MASK); 434 435 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 436 ret = ceph_copy_page_vector_to_user(pages, data, off, ret); 437 if (ret >= 0) 438 *poff = off + ret; 439 440 done: 441 if (file->f_flags & O_DIRECT) 442 ceph_put_page_vector(pages, num_pages, true); 443 else 444 ceph_release_page_vector(pages, num_pages); 445 dout("sync_read result %d\n", ret); 446 return ret; 447 } 448 449 /* 450 * Write commit request unsafe callback, called to tell us when a 451 * request is unsafe (that is, in flight--has been handed to the 452 * messenger to send to its target osd). It is called again when 453 * we've received a response message indicating the request is 454 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request 455 * is completed early (and unsuccessfully) due to a timeout or 456 * interrupt. 457 * 458 * This is used if we requested both an ACK and ONDISK commit reply 459 * from the OSD. 460 */ 461 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) 462 { 463 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 464 465 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, 466 unsafe ? "un" : ""); 467 if (unsafe) { 468 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 469 spin_lock(&ci->i_unsafe_lock); 470 list_add_tail(&req->r_unsafe_item, 471 &ci->i_unsafe_writes); 472 spin_unlock(&ci->i_unsafe_lock); 473 } else { 474 spin_lock(&ci->i_unsafe_lock); 475 list_del_init(&req->r_unsafe_item); 476 spin_unlock(&ci->i_unsafe_lock); 477 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 478 } 479 } 480 481 /* 482 * Synchronous write, straight from __user pointer or user pages (if 483 * O_DIRECT). 484 * 485 * If write spans object boundary, just do multiple writes. (For a 486 * correct atomic write, we should e.g. take write locks on all 487 * objects, rollback on failure, etc.) 488 */ 489 static ssize_t ceph_sync_write(struct file *file, const char __user *data, 490 size_t left, loff_t pos, loff_t *ppos) 491 { 492 struct inode *inode = file_inode(file); 493 struct ceph_inode_info *ci = ceph_inode(inode); 494 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 495 struct ceph_snap_context *snapc; 496 struct ceph_vino vino; 497 struct ceph_osd_request *req; 498 int num_ops = 1; 499 struct page **pages; 500 int num_pages; 501 u64 len; 502 int written = 0; 503 int flags; 504 int check_caps = 0; 505 int page_align, io_align; 506 unsigned long buf_align; 507 int ret; 508 struct timespec mtime = CURRENT_TIME; 509 bool own_pages = false; 510 511 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 512 return -EROFS; 513 514 dout("sync_write on file %p %lld~%u %s\n", file, pos, 515 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 516 517 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 518 if (ret < 0) 519 return ret; 520 521 ret = invalidate_inode_pages2_range(inode->i_mapping, 522 pos >> PAGE_CACHE_SHIFT, 523 (pos + left) >> PAGE_CACHE_SHIFT); 524 if (ret < 0) 525 dout("invalidate_inode_pages2_range returned %d\n", ret); 526 527 flags = CEPH_OSD_FLAG_ORDERSNAP | 528 CEPH_OSD_FLAG_ONDISK | 529 CEPH_OSD_FLAG_WRITE; 530 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) 531 flags |= CEPH_OSD_FLAG_ACK; 532 else 533 num_ops++; /* Also include a 'startsync' command. */ 534 535 /* 536 * we may need to do multiple writes here if we span an object 537 * boundary. this isn't atomic, unfortunately. :( 538 */ 539 more: 540 io_align = pos & ~PAGE_MASK; 541 buf_align = (unsigned long)data & ~PAGE_MASK; 542 len = left; 543 544 snapc = ci->i_snap_realm->cached_context; 545 vino = ceph_vino(inode); 546 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 547 vino, pos, &len, num_ops, 548 CEPH_OSD_OP_WRITE, flags, snapc, 549 ci->i_truncate_seq, ci->i_truncate_size, 550 false); 551 if (IS_ERR(req)) 552 return PTR_ERR(req); 553 554 /* write from beginning of first page, regardless of io alignment */ 555 page_align = file->f_flags & O_DIRECT ? buf_align : io_align; 556 num_pages = calc_pages_for(page_align, len); 557 if (file->f_flags & O_DIRECT) { 558 pages = ceph_get_direct_page_vector(data, num_pages, false); 559 if (IS_ERR(pages)) { 560 ret = PTR_ERR(pages); 561 goto out; 562 } 563 564 /* 565 * throw out any page cache pages in this range. this 566 * may block. 567 */ 568 truncate_inode_pages_range(inode->i_mapping, pos, 569 (pos+len) | (PAGE_CACHE_SIZE-1)); 570 } else { 571 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 572 if (IS_ERR(pages)) { 573 ret = PTR_ERR(pages); 574 goto out; 575 } 576 ret = ceph_copy_user_to_page_vector(pages, data, pos, len); 577 if (ret < 0) { 578 ceph_release_page_vector(pages, num_pages); 579 goto out; 580 } 581 582 if ((file->f_flags & O_SYNC) == 0) { 583 /* get a second commit callback */ 584 req->r_unsafe_callback = ceph_sync_write_unsafe; 585 req->r_inode = inode; 586 own_pages = true; 587 } 588 } 589 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, 590 false, own_pages); 591 592 /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 593 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 594 595 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 596 if (!ret) 597 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 598 599 if (file->f_flags & O_DIRECT) 600 ceph_put_page_vector(pages, num_pages, false); 601 else if (file->f_flags & O_SYNC) 602 ceph_release_page_vector(pages, num_pages); 603 604 out: 605 ceph_osdc_put_request(req); 606 if (ret == 0) { 607 pos += len; 608 written += len; 609 left -= len; 610 data += len; 611 if (left) 612 goto more; 613 614 ret = written; 615 *ppos = pos; 616 if (pos > i_size_read(inode)) 617 check_caps = ceph_inode_set_size(inode, pos); 618 if (check_caps) 619 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, 620 NULL); 621 } 622 return ret; 623 } 624 625 /* 626 * Wrap generic_file_aio_read with checks for cap bits on the inode. 627 * Atomically grab references, so that those bits are not released 628 * back to the MDS mid-read. 629 * 630 * Hmm, the sync read case isn't actually async... should it be? 631 */ 632 static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, 633 unsigned long nr_segs, loff_t pos) 634 { 635 struct file *filp = iocb->ki_filp; 636 struct ceph_file_info *fi = filp->private_data; 637 loff_t *ppos = &iocb->ki_pos; 638 size_t len = iov->iov_len; 639 struct inode *inode = file_inode(filp); 640 struct ceph_inode_info *ci = ceph_inode(inode); 641 void __user *base = iov->iov_base; 642 ssize_t ret; 643 int want, got = 0; 644 int checkeof = 0, read = 0; 645 646 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 647 inode, ceph_vinop(inode), pos, (unsigned)len, inode); 648 again: 649 if (fi->fmode & CEPH_FILE_MODE_LAZY) 650 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 651 else 652 want = CEPH_CAP_FILE_CACHE; 653 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); 654 if (ret < 0) 655 goto out; 656 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 657 inode, ceph_vinop(inode), pos, (unsigned)len, 658 ceph_cap_string(got)); 659 660 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 661 (iocb->ki_filp->f_flags & O_DIRECT) || 662 (inode->i_sb->s_flags & MS_SYNCHRONOUS) || 663 (fi->flags & CEPH_F_SYNC)) 664 /* hmm, this isn't really async... */ 665 ret = ceph_sync_read(filp, base, len, ppos, &checkeof); 666 else 667 ret = generic_file_aio_read(iocb, iov, nr_segs, pos); 668 669 out: 670 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 671 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 672 ceph_put_cap_refs(ci, got); 673 674 if (checkeof && ret >= 0) { 675 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 676 677 /* hit EOF or hole? */ 678 if (statret == 0 && *ppos < inode->i_size) { 679 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size); 680 read += ret; 681 base += ret; 682 len -= ret; 683 checkeof = 0; 684 goto again; 685 } 686 } 687 if (ret >= 0) 688 ret += read; 689 690 return ret; 691 } 692 693 /* 694 * Take cap references to avoid releasing caps to MDS mid-write. 695 * 696 * If we are synchronous, and write with an old snap context, the OSD 697 * may return EOLDSNAPC. In that case, retry the write.. _after_ 698 * dropping our cap refs and allowing the pending snap to logically 699 * complete _before_ this write occurs. 700 * 701 * If we are near ENOSPC, write synchronously. 702 */ 703 static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, 704 unsigned long nr_segs, loff_t pos) 705 { 706 struct file *file = iocb->ki_filp; 707 struct ceph_file_info *fi = file->private_data; 708 struct inode *inode = file_inode(file); 709 struct ceph_inode_info *ci = ceph_inode(inode); 710 struct ceph_osd_client *osdc = 711 &ceph_sb_to_client(inode->i_sb)->client->osdc; 712 ssize_t count, written = 0; 713 int err, want, got; 714 bool hold_mutex; 715 716 if (ceph_snap(inode) != CEPH_NOSNAP) 717 return -EROFS; 718 719 mutex_lock(&inode->i_mutex); 720 hold_mutex = true; 721 722 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 723 if (err) 724 goto out; 725 726 /* We can write back this queue in page reclaim */ 727 current->backing_dev_info = file->f_mapping->backing_dev_info; 728 729 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 730 if (err) 731 goto out; 732 733 if (count == 0) 734 goto out; 735 736 err = file_remove_suid(file); 737 if (err) 738 goto out; 739 740 err = file_update_time(file); 741 if (err) 742 goto out; 743 744 retry_snap: 745 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 746 err = -ENOSPC; 747 goto out; 748 } 749 750 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 751 inode, ceph_vinop(inode), pos, count, inode->i_size); 752 if (fi->fmode & CEPH_FILE_MODE_LAZY) 753 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 754 else 755 want = CEPH_CAP_FILE_BUFFER; 756 got = 0; 757 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count); 758 if (err < 0) 759 goto out; 760 761 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 762 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 763 764 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 765 (iocb->ki_filp->f_flags & O_DIRECT) || 766 (inode->i_sb->s_flags & MS_SYNCHRONOUS) || 767 (fi->flags & CEPH_F_SYNC)) { 768 mutex_unlock(&inode->i_mutex); 769 written = ceph_sync_write(file, iov->iov_base, count, 770 pos, &iocb->ki_pos); 771 } else { 772 written = generic_file_buffered_write(iocb, iov, nr_segs, 773 pos, &iocb->ki_pos, 774 count, 0); 775 mutex_unlock(&inode->i_mutex); 776 } 777 hold_mutex = false; 778 779 if (written >= 0) { 780 int dirty; 781 spin_lock(&ci->i_ceph_lock); 782 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 783 spin_unlock(&ci->i_ceph_lock); 784 if (dirty) 785 __mark_inode_dirty(inode, dirty); 786 } 787 788 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 789 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, 790 ceph_cap_string(got)); 791 ceph_put_cap_refs(ci, got); 792 793 if (written >= 0 && 794 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || 795 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 796 err = vfs_fsync_range(file, pos, pos + written - 1, 1); 797 if (err < 0) 798 written = err; 799 } 800 801 if (written == -EOLDSNAPC) { 802 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", 803 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); 804 mutex_lock(&inode->i_mutex); 805 hold_mutex = true; 806 goto retry_snap; 807 } 808 out: 809 if (hold_mutex) 810 mutex_unlock(&inode->i_mutex); 811 current->backing_dev_info = NULL; 812 813 return written ? written : err; 814 } 815 816 /* 817 * llseek. be sure to verify file size on SEEK_END. 818 */ 819 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 820 { 821 struct inode *inode = file->f_mapping->host; 822 int ret; 823 824 mutex_lock(&inode->i_mutex); 825 __ceph_do_pending_vmtruncate(inode); 826 827 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 828 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 829 if (ret < 0) { 830 offset = ret; 831 goto out; 832 } 833 } 834 835 switch (whence) { 836 case SEEK_END: 837 offset += inode->i_size; 838 break; 839 case SEEK_CUR: 840 /* 841 * Here we special-case the lseek(fd, 0, SEEK_CUR) 842 * position-querying operation. Avoid rewriting the "same" 843 * f_pos value back to the file because a concurrent read(), 844 * write() or lseek() might have altered it 845 */ 846 if (offset == 0) { 847 offset = file->f_pos; 848 goto out; 849 } 850 offset += file->f_pos; 851 break; 852 case SEEK_DATA: 853 if (offset >= inode->i_size) { 854 ret = -ENXIO; 855 goto out; 856 } 857 break; 858 case SEEK_HOLE: 859 if (offset >= inode->i_size) { 860 ret = -ENXIO; 861 goto out; 862 } 863 offset = inode->i_size; 864 break; 865 } 866 867 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 868 869 out: 870 mutex_unlock(&inode->i_mutex); 871 return offset; 872 } 873 874 const struct file_operations ceph_file_fops = { 875 .open = ceph_open, 876 .release = ceph_release, 877 .llseek = ceph_llseek, 878 .read = do_sync_read, 879 .write = do_sync_write, 880 .aio_read = ceph_aio_read, 881 .aio_write = ceph_aio_write, 882 .mmap = ceph_mmap, 883 .fsync = ceph_fsync, 884 .lock = ceph_lock, 885 .flock = ceph_flock, 886 .splice_read = generic_file_splice_read, 887 .splice_write = generic_file_splice_write, 888 .unlocked_ioctl = ceph_ioctl, 889 .compat_ioctl = ceph_ioctl, 890 }; 891 892