1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/mount.h> 8 #include <linux/namei.h> 9 #include <linux/writeback.h> 10 #include <linux/falloc.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 16 /* 17 * Ceph file operations 18 * 19 * Implement basic open/close functionality, and implement 20 * read/write. 21 * 22 * We implement three modes of file I/O: 23 * - buffered uses the generic_file_aio_{read,write} helpers 24 * 25 * - synchronous is used when there is multi-client read/write 26 * sharing, avoids the page cache, and synchronously waits for an 27 * ack from the OSD. 28 * 29 * - direct io takes the variant of the sync path that references 30 * user pages directly. 31 * 32 * fsync() flushes and waits on dirty pages, but just queues metadata 33 * for writeback: since the MDS can recover size and mtime there is no 34 * need to wait for MDS acknowledgement. 35 */ 36 37 /* 38 * Calculate the length sum of direct io vectors that can 39 * be combined into one page vector. 40 */ 41 static size_t dio_get_pagev_size(const struct iov_iter *it) 42 { 43 const struct iovec *iov = it->iov; 44 const struct iovec *iovend = iov + it->nr_segs; 45 size_t size; 46 47 size = iov->iov_len - it->iov_offset; 48 /* 49 * An iov can be page vectored when both the current tail 50 * and the next base are page aligned. 51 */ 52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && 53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { 54 size += iov->iov_len; 55 } 56 dout("dio_get_pagevlen len = %zu\n", size); 57 return size; 58 } 59 60 /* 61 * Allocate a page vector based on (@it, @nbytes). 62 * The return value is the tuple describing a page vector, 63 * that is (@pages, @page_align, @num_pages). 64 */ 65 static struct page ** 66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, 67 size_t *page_align, int *num_pages) 68 { 69 struct iov_iter tmp_it = *it; 70 size_t align; 71 struct page **pages; 72 int ret = 0, idx, npages; 73 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 75 (PAGE_SIZE - 1); 76 npages = calc_pages_for(align, nbytes); 77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 if (!pages) { 79 pages = vmalloc(sizeof(*pages) * npages); 80 if (!pages) 81 return ERR_PTR(-ENOMEM); 82 } 83 84 for (idx = 0; idx < npages; ) { 85 size_t start; 86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, 87 npages - idx, &start); 88 if (ret < 0) 89 goto fail; 90 91 iov_iter_advance(&tmp_it, ret); 92 nbytes -= ret; 93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; 94 } 95 96 BUG_ON(nbytes != 0); 97 *num_pages = npages; 98 *page_align = align; 99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); 100 return pages; 101 fail: 102 ceph_put_page_vector(pages, idx, false); 103 return ERR_PTR(ret); 104 } 105 106 /* 107 * Prepare an open request. Preallocate ceph_cap to avoid an 108 * inopportune ENOMEM later. 109 */ 110 static struct ceph_mds_request * 111 prepare_open_request(struct super_block *sb, int flags, int create_mode) 112 { 113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 114 struct ceph_mds_client *mdsc = fsc->mdsc; 115 struct ceph_mds_request *req; 116 int want_auth = USE_ANY_MDS; 117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 118 119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 120 want_auth = USE_AUTH_MDS; 121 122 req = ceph_mdsc_create_request(mdsc, op, want_auth); 123 if (IS_ERR(req)) 124 goto out; 125 req->r_fmode = ceph_flags_to_mode(flags); 126 req->r_args.open.flags = cpu_to_le32(flags); 127 req->r_args.open.mode = cpu_to_le32(create_mode); 128 out: 129 return req; 130 } 131 132 /* 133 * initialize private struct file data. 134 * if we fail, clean up by dropping fmode reference on the ceph_inode 135 */ 136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 137 { 138 struct ceph_file_info *cf; 139 int ret = 0; 140 141 switch (inode->i_mode & S_IFMT) { 142 case S_IFREG: 143 ceph_fscache_register_inode_cookie(inode); 144 ceph_fscache_file_set_cookie(inode, file); 145 case S_IFDIR: 146 dout("init_file %p %p 0%o (regular)\n", inode, file, 147 inode->i_mode); 148 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 149 if (cf == NULL) { 150 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 151 return -ENOMEM; 152 } 153 cf->fmode = fmode; 154 cf->next_offset = 2; 155 cf->readdir_cache_idx = -1; 156 file->private_data = cf; 157 BUG_ON(inode->i_fop->release != ceph_release); 158 break; 159 160 case S_IFLNK: 161 dout("init_file %p %p 0%o (symlink)\n", inode, file, 162 inode->i_mode); 163 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 164 break; 165 166 default: 167 dout("init_file %p %p 0%o (special)\n", inode, file, 168 inode->i_mode); 169 /* 170 * we need to drop the open ref now, since we don't 171 * have .release set to ceph_release. 172 */ 173 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 174 BUG_ON(inode->i_fop->release == ceph_release); 175 176 /* call the proper open fop */ 177 ret = inode->i_fop->open(inode, file); 178 } 179 return ret; 180 } 181 182 /* 183 * try renew caps after session gets killed. 184 */ 185 int ceph_renew_caps(struct inode *inode) 186 { 187 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 188 struct ceph_inode_info *ci = ceph_inode(inode); 189 struct ceph_mds_request *req; 190 int err, flags, wanted; 191 192 spin_lock(&ci->i_ceph_lock); 193 wanted = __ceph_caps_file_wanted(ci); 194 if (__ceph_is_any_real_caps(ci) && 195 (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) { 196 int issued = __ceph_caps_issued(ci, NULL); 197 spin_unlock(&ci->i_ceph_lock); 198 dout("renew caps %p want %s issued %s updating mds_wanted\n", 199 inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 200 ceph_check_caps(ci, 0, NULL); 201 return 0; 202 } 203 spin_unlock(&ci->i_ceph_lock); 204 205 flags = 0; 206 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) 207 flags = O_RDWR; 208 else if (wanted & CEPH_CAP_FILE_RD) 209 flags = O_RDONLY; 210 else if (wanted & CEPH_CAP_FILE_WR) 211 flags = O_WRONLY; 212 #ifdef O_LAZY 213 if (wanted & CEPH_CAP_FILE_LAZYIO) 214 flags |= O_LAZY; 215 #endif 216 217 req = prepare_open_request(inode->i_sb, flags, 0); 218 if (IS_ERR(req)) { 219 err = PTR_ERR(req); 220 goto out; 221 } 222 223 req->r_inode = inode; 224 ihold(inode); 225 req->r_num_caps = 1; 226 req->r_fmode = -1; 227 228 err = ceph_mdsc_do_request(mdsc, NULL, req); 229 ceph_mdsc_put_request(req); 230 out: 231 dout("renew caps %p open result=%d\n", inode, err); 232 return err < 0 ? err : 0; 233 } 234 235 /* 236 * If we already have the requisite capabilities, we can satisfy 237 * the open request locally (no need to request new caps from the 238 * MDS). We do, however, need to inform the MDS (asynchronously) 239 * if our wanted caps set expands. 240 */ 241 int ceph_open(struct inode *inode, struct file *file) 242 { 243 struct ceph_inode_info *ci = ceph_inode(inode); 244 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 245 struct ceph_mds_client *mdsc = fsc->mdsc; 246 struct ceph_mds_request *req; 247 struct ceph_file_info *cf = file->private_data; 248 int err; 249 int flags, fmode, wanted; 250 251 if (cf) { 252 dout("open file %p is already opened\n", file); 253 return 0; 254 } 255 256 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 257 flags = file->f_flags & ~(O_CREAT|O_EXCL); 258 if (S_ISDIR(inode->i_mode)) 259 flags = O_DIRECTORY; /* mds likes to know */ 260 261 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 262 ceph_vinop(inode), file, flags, file->f_flags); 263 fmode = ceph_flags_to_mode(flags); 264 wanted = ceph_caps_for_mode(fmode); 265 266 /* snapped files are read-only */ 267 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 268 return -EROFS; 269 270 /* trivially open snapdir */ 271 if (ceph_snap(inode) == CEPH_SNAPDIR) { 272 spin_lock(&ci->i_ceph_lock); 273 __ceph_get_fmode(ci, fmode); 274 spin_unlock(&ci->i_ceph_lock); 275 return ceph_init_file(inode, file, fmode); 276 } 277 278 /* 279 * No need to block if we have caps on the auth MDS (for 280 * write) or any MDS (for read). Update wanted set 281 * asynchronously. 282 */ 283 spin_lock(&ci->i_ceph_lock); 284 if (__ceph_is_any_real_caps(ci) && 285 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 286 int mds_wanted = __ceph_caps_mds_wanted(ci); 287 int issued = __ceph_caps_issued(ci, NULL); 288 289 dout("open %p fmode %d want %s issued %s using existing\n", 290 inode, fmode, ceph_cap_string(wanted), 291 ceph_cap_string(issued)); 292 __ceph_get_fmode(ci, fmode); 293 spin_unlock(&ci->i_ceph_lock); 294 295 /* adjust wanted? */ 296 if ((issued & wanted) != wanted && 297 (mds_wanted & wanted) != wanted && 298 ceph_snap(inode) != CEPH_SNAPDIR) 299 ceph_check_caps(ci, 0, NULL); 300 301 return ceph_init_file(inode, file, fmode); 302 } else if (ceph_snap(inode) != CEPH_NOSNAP && 303 (ci->i_snap_caps & wanted) == wanted) { 304 __ceph_get_fmode(ci, fmode); 305 spin_unlock(&ci->i_ceph_lock); 306 return ceph_init_file(inode, file, fmode); 307 } 308 309 spin_unlock(&ci->i_ceph_lock); 310 311 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 312 req = prepare_open_request(inode->i_sb, flags, 0); 313 if (IS_ERR(req)) { 314 err = PTR_ERR(req); 315 goto out; 316 } 317 req->r_inode = inode; 318 ihold(inode); 319 320 req->r_num_caps = 1; 321 err = ceph_mdsc_do_request(mdsc, NULL, req); 322 if (!err) 323 err = ceph_init_file(inode, file, req->r_fmode); 324 ceph_mdsc_put_request(req); 325 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 326 out: 327 return err; 328 } 329 330 331 /* 332 * Do a lookup + open with a single request. If we get a non-existent 333 * file or symlink, return 1 so the VFS can retry. 334 */ 335 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 336 struct file *file, unsigned flags, umode_t mode, 337 int *opened) 338 { 339 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 340 struct ceph_mds_client *mdsc = fsc->mdsc; 341 struct ceph_mds_request *req; 342 struct dentry *dn; 343 struct ceph_acls_info acls = {}; 344 int mask; 345 int err; 346 347 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 348 dir, dentry, dentry, 349 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 350 351 if (dentry->d_name.len > NAME_MAX) 352 return -ENAMETOOLONG; 353 354 err = ceph_init_dentry(dentry); 355 if (err < 0) 356 return err; 357 358 if (flags & O_CREAT) { 359 err = ceph_pre_init_acls(dir, &mode, &acls); 360 if (err < 0) 361 return err; 362 } 363 364 /* do the open */ 365 req = prepare_open_request(dir->i_sb, flags, mode); 366 if (IS_ERR(req)) { 367 err = PTR_ERR(req); 368 goto out_acl; 369 } 370 req->r_dentry = dget(dentry); 371 req->r_num_caps = 2; 372 if (flags & O_CREAT) { 373 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 374 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 375 if (acls.pagelist) { 376 req->r_pagelist = acls.pagelist; 377 acls.pagelist = NULL; 378 } 379 } 380 381 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 382 if (ceph_security_xattr_wanted(dir)) 383 mask |= CEPH_CAP_XATTR_SHARED; 384 req->r_args.open.mask = cpu_to_le32(mask); 385 386 req->r_locked_dir = dir; /* caller holds dir->i_mutex */ 387 err = ceph_mdsc_do_request(mdsc, 388 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 389 req); 390 err = ceph_handle_snapdir(req, dentry, err); 391 if (err) 392 goto out_req; 393 394 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 395 err = ceph_handle_notrace_create(dir, dentry); 396 397 if (d_unhashed(dentry)) { 398 dn = ceph_finish_lookup(req, dentry, err); 399 if (IS_ERR(dn)) 400 err = PTR_ERR(dn); 401 } else { 402 /* we were given a hashed negative dentry */ 403 dn = NULL; 404 } 405 if (err) 406 goto out_req; 407 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 408 /* make vfs retry on splice, ENOENT, or symlink */ 409 dout("atomic_open finish_no_open on dn %p\n", dn); 410 err = finish_no_open(file, dn); 411 } else { 412 dout("atomic_open finish_open on dn %p\n", dn); 413 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 414 ceph_init_inode_acls(d_inode(dentry), &acls); 415 *opened |= FILE_CREATED; 416 } 417 err = finish_open(file, dentry, ceph_open, opened); 418 } 419 out_req: 420 if (!req->r_err && req->r_target_inode) 421 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 422 ceph_mdsc_put_request(req); 423 out_acl: 424 ceph_release_acls_info(&acls); 425 dout("atomic_open result=%d\n", err); 426 return err; 427 } 428 429 int ceph_release(struct inode *inode, struct file *file) 430 { 431 struct ceph_inode_info *ci = ceph_inode(inode); 432 struct ceph_file_info *cf = file->private_data; 433 434 dout("release inode %p file %p\n", inode, file); 435 ceph_put_fmode(ci, cf->fmode); 436 if (cf->last_readdir) 437 ceph_mdsc_put_request(cf->last_readdir); 438 kfree(cf->last_name); 439 kfree(cf->dir_info); 440 kmem_cache_free(ceph_file_cachep, cf); 441 442 /* wake up anyone waiting for caps on this inode */ 443 wake_up_all(&ci->i_cap_wq); 444 return 0; 445 } 446 447 enum { 448 HAVE_RETRIED = 1, 449 CHECK_EOF = 2, 450 READ_INLINE = 3, 451 }; 452 453 /* 454 * Read a range of bytes striped over one or more objects. Iterate over 455 * objects we stripe over. (That's not atomic, but good enough for now.) 456 * 457 * If we get a short result from the OSD, check against i_size; we need to 458 * only return a short read to the caller if we hit EOF. 459 */ 460 static int striped_read(struct inode *inode, 461 u64 off, u64 len, 462 struct page **pages, int num_pages, 463 int *checkeof) 464 { 465 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 466 struct ceph_inode_info *ci = ceph_inode(inode); 467 u64 pos, this_len, left; 468 loff_t i_size; 469 int page_align, pages_left; 470 int read, ret; 471 struct page **page_pos; 472 bool hit_stripe, was_short; 473 474 /* 475 * we may need to do multiple reads. not atomic, unfortunately. 476 */ 477 pos = off; 478 left = len; 479 page_pos = pages; 480 pages_left = num_pages; 481 read = 0; 482 483 more: 484 page_align = pos & ~PAGE_MASK; 485 this_len = left; 486 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 487 &ci->i_layout, pos, &this_len, 488 ci->i_truncate_seq, 489 ci->i_truncate_size, 490 page_pos, pages_left, page_align); 491 if (ret == -ENOENT) 492 ret = 0; 493 hit_stripe = this_len < left; 494 was_short = ret >= 0 && ret < this_len; 495 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read, 496 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 497 498 i_size = i_size_read(inode); 499 if (ret >= 0) { 500 int didpages; 501 if (was_short && (pos + ret < i_size)) { 502 int zlen = min(this_len - ret, i_size - pos - ret); 503 int zoff = (off & ~PAGE_MASK) + read + ret; 504 dout(" zero gap %llu to %llu\n", 505 pos + ret, pos + ret + zlen); 506 ceph_zero_page_vector_range(zoff, zlen, pages); 507 ret += zlen; 508 } 509 510 didpages = (page_align + ret) >> PAGE_SHIFT; 511 pos += ret; 512 read = pos - off; 513 left -= ret; 514 page_pos += didpages; 515 pages_left -= didpages; 516 517 /* hit stripe and need continue*/ 518 if (left && hit_stripe && pos < i_size) 519 goto more; 520 } 521 522 if (read > 0) { 523 ret = read; 524 /* did we bounce off eof? */ 525 if (pos + left > i_size) 526 *checkeof = CHECK_EOF; 527 } 528 529 dout("striped_read returns %d\n", ret); 530 return ret; 531 } 532 533 /* 534 * Completely synchronous read and write methods. Direct from __user 535 * buffer to osd, or directly to user pages (if O_DIRECT). 536 * 537 * If the read spans object boundary, just do multiple reads. 538 */ 539 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, 540 int *checkeof) 541 { 542 struct file *file = iocb->ki_filp; 543 struct inode *inode = file_inode(file); 544 struct page **pages; 545 u64 off = iocb->ki_pos; 546 int num_pages, ret; 547 size_t len = iov_iter_count(i); 548 549 dout("sync_read on file %p %llu~%u %s\n", file, off, 550 (unsigned)len, 551 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 552 553 if (!len) 554 return 0; 555 /* 556 * flush any page cache pages in this range. this 557 * will make concurrent normal and sync io slow, 558 * but it will at least behave sensibly when they are 559 * in sequence. 560 */ 561 ret = filemap_write_and_wait_range(inode->i_mapping, off, 562 off + len); 563 if (ret < 0) 564 return ret; 565 566 num_pages = calc_pages_for(off, len); 567 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 568 if (IS_ERR(pages)) 569 return PTR_ERR(pages); 570 ret = striped_read(inode, off, len, pages, 571 num_pages, checkeof); 572 if (ret > 0) { 573 int l, k = 0; 574 size_t left = ret; 575 576 while (left) { 577 size_t page_off = off & ~PAGE_MASK; 578 size_t copy = min_t(size_t, left, 579 PAGE_SIZE - page_off); 580 l = copy_page_to_iter(pages[k++], page_off, copy, i); 581 off += l; 582 left -= l; 583 if (l < copy) 584 break; 585 } 586 } 587 ceph_release_page_vector(pages, num_pages); 588 589 if (off > iocb->ki_pos) { 590 ret = off - iocb->ki_pos; 591 iocb->ki_pos = off; 592 } 593 594 dout("sync_read result %d\n", ret); 595 return ret; 596 } 597 598 struct ceph_aio_request { 599 struct kiocb *iocb; 600 size_t total_len; 601 int write; 602 int error; 603 struct list_head osd_reqs; 604 unsigned num_reqs; 605 atomic_t pending_reqs; 606 struct timespec mtime; 607 struct ceph_cap_flush *prealloc_cf; 608 }; 609 610 struct ceph_aio_work { 611 struct work_struct work; 612 struct ceph_osd_request *req; 613 }; 614 615 static void ceph_aio_retry_work(struct work_struct *work); 616 617 static void ceph_aio_complete(struct inode *inode, 618 struct ceph_aio_request *aio_req) 619 { 620 struct ceph_inode_info *ci = ceph_inode(inode); 621 int ret; 622 623 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 624 return; 625 626 ret = aio_req->error; 627 if (!ret) 628 ret = aio_req->total_len; 629 630 dout("ceph_aio_complete %p rc %d\n", inode, ret); 631 632 if (ret >= 0 && aio_req->write) { 633 int dirty; 634 635 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 636 if (endoff > i_size_read(inode)) { 637 if (ceph_inode_set_size(inode, endoff)) 638 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 639 } 640 641 spin_lock(&ci->i_ceph_lock); 642 ci->i_inline_version = CEPH_INLINE_NONE; 643 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 644 &aio_req->prealloc_cf); 645 spin_unlock(&ci->i_ceph_lock); 646 if (dirty) 647 __mark_inode_dirty(inode, dirty); 648 649 } 650 651 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 652 CEPH_CAP_FILE_RD)); 653 654 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 655 656 ceph_free_cap_flush(aio_req->prealloc_cf); 657 kfree(aio_req); 658 } 659 660 static void ceph_aio_complete_req(struct ceph_osd_request *req) 661 { 662 int rc = req->r_result; 663 struct inode *inode = req->r_inode; 664 struct ceph_aio_request *aio_req = req->r_priv; 665 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 666 int num_pages = calc_pages_for((u64)osd_data->alignment, 667 osd_data->length); 668 669 dout("ceph_aio_complete_req %p rc %d bytes %llu\n", 670 inode, rc, osd_data->length); 671 672 if (rc == -EOLDSNAPC) { 673 struct ceph_aio_work *aio_work; 674 BUG_ON(!aio_req->write); 675 676 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 677 if (aio_work) { 678 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 679 aio_work->req = req; 680 queue_work(ceph_inode_to_client(inode)->wb_wq, 681 &aio_work->work); 682 return; 683 } 684 rc = -ENOMEM; 685 } else if (!aio_req->write) { 686 if (rc == -ENOENT) 687 rc = 0; 688 if (rc >= 0 && osd_data->length > rc) { 689 int zoff = osd_data->alignment + rc; 690 int zlen = osd_data->length - rc; 691 /* 692 * If read is satisfied by single OSD request, 693 * it can pass EOF. Otherwise read is within 694 * i_size. 695 */ 696 if (aio_req->num_reqs == 1) { 697 loff_t i_size = i_size_read(inode); 698 loff_t endoff = aio_req->iocb->ki_pos + rc; 699 if (endoff < i_size) 700 zlen = min_t(size_t, zlen, 701 i_size - endoff); 702 aio_req->total_len = rc + zlen; 703 } 704 705 if (zlen > 0) 706 ceph_zero_page_vector_range(zoff, zlen, 707 osd_data->pages); 708 } 709 } 710 711 ceph_put_page_vector(osd_data->pages, num_pages, false); 712 ceph_osdc_put_request(req); 713 714 if (rc < 0) 715 cmpxchg(&aio_req->error, 0, rc); 716 717 ceph_aio_complete(inode, aio_req); 718 return; 719 } 720 721 static void ceph_aio_retry_work(struct work_struct *work) 722 { 723 struct ceph_aio_work *aio_work = 724 container_of(work, struct ceph_aio_work, work); 725 struct ceph_osd_request *orig_req = aio_work->req; 726 struct ceph_aio_request *aio_req = orig_req->r_priv; 727 struct inode *inode = orig_req->r_inode; 728 struct ceph_inode_info *ci = ceph_inode(inode); 729 struct ceph_snap_context *snapc; 730 struct ceph_osd_request *req; 731 int ret; 732 733 spin_lock(&ci->i_ceph_lock); 734 if (__ceph_have_pending_cap_snap(ci)) { 735 struct ceph_cap_snap *capsnap = 736 list_last_entry(&ci->i_cap_snaps, 737 struct ceph_cap_snap, 738 ci_item); 739 snapc = ceph_get_snap_context(capsnap->context); 740 } else { 741 BUG_ON(!ci->i_head_snapc); 742 snapc = ceph_get_snap_context(ci->i_head_snapc); 743 } 744 spin_unlock(&ci->i_ceph_lock); 745 746 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 747 false, GFP_NOFS); 748 if (!req) { 749 ret = -ENOMEM; 750 req = orig_req; 751 goto out; 752 } 753 754 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | 755 CEPH_OSD_FLAG_ONDISK | 756 CEPH_OSD_FLAG_WRITE; 757 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); 758 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); 759 760 ret = ceph_osdc_alloc_messages(req, GFP_NOFS); 761 if (ret) { 762 ceph_osdc_put_request(req); 763 req = orig_req; 764 goto out; 765 } 766 767 req->r_ops[0] = orig_req->r_ops[0]; 768 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 769 770 req->r_mtime = aio_req->mtime; 771 req->r_data_offset = req->r_ops[0].extent.offset; 772 773 ceph_osdc_put_request(orig_req); 774 775 req->r_callback = ceph_aio_complete_req; 776 req->r_inode = inode; 777 req->r_priv = aio_req; 778 779 ret = ceph_osdc_start_request(req->r_osdc, req, false); 780 out: 781 if (ret < 0) { 782 req->r_result = ret; 783 ceph_aio_complete_req(req); 784 } 785 786 ceph_put_snap_context(snapc); 787 kfree(aio_work); 788 } 789 790 /* 791 * Write commit request unsafe callback, called to tell us when a 792 * request is unsafe (that is, in flight--has been handed to the 793 * messenger to send to its target osd). It is called again when 794 * we've received a response message indicating the request is 795 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request 796 * is completed early (and unsuccessfully) due to a timeout or 797 * interrupt. 798 * 799 * This is used if we requested both an ACK and ONDISK commit reply 800 * from the OSD. 801 */ 802 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) 803 { 804 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 805 806 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, 807 unsafe ? "un" : ""); 808 if (unsafe) { 809 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 810 spin_lock(&ci->i_unsafe_lock); 811 list_add_tail(&req->r_unsafe_item, 812 &ci->i_unsafe_writes); 813 spin_unlock(&ci->i_unsafe_lock); 814 815 complete_all(&req->r_completion); 816 } else { 817 spin_lock(&ci->i_unsafe_lock); 818 list_del_init(&req->r_unsafe_item); 819 spin_unlock(&ci->i_unsafe_lock); 820 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 821 } 822 } 823 824 825 static ssize_t 826 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 827 struct ceph_snap_context *snapc, 828 struct ceph_cap_flush **pcf) 829 { 830 struct file *file = iocb->ki_filp; 831 struct inode *inode = file_inode(file); 832 struct ceph_inode_info *ci = ceph_inode(inode); 833 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 834 struct ceph_vino vino; 835 struct ceph_osd_request *req; 836 struct page **pages; 837 struct ceph_aio_request *aio_req = NULL; 838 int num_pages = 0; 839 int flags; 840 int ret; 841 struct timespec mtime = current_fs_time(inode->i_sb); 842 size_t count = iov_iter_count(iter); 843 loff_t pos = iocb->ki_pos; 844 bool write = iov_iter_rw(iter) == WRITE; 845 846 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 847 return -EROFS; 848 849 dout("sync_direct_read_write (%s) on file %p %lld~%u\n", 850 (write ? "write" : "read"), file, pos, (unsigned)count); 851 852 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 853 if (ret < 0) 854 return ret; 855 856 if (write) { 857 ret = invalidate_inode_pages2_range(inode->i_mapping, 858 pos >> PAGE_SHIFT, 859 (pos + count) >> PAGE_SHIFT); 860 if (ret < 0) 861 dout("invalidate_inode_pages2_range returned %d\n", ret); 862 863 flags = CEPH_OSD_FLAG_ORDERSNAP | 864 CEPH_OSD_FLAG_ONDISK | 865 CEPH_OSD_FLAG_WRITE; 866 } else { 867 flags = CEPH_OSD_FLAG_READ; 868 } 869 870 while (iov_iter_count(iter) > 0) { 871 u64 size = dio_get_pagev_size(iter); 872 size_t start = 0; 873 ssize_t len; 874 875 vino = ceph_vino(inode); 876 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 877 vino, pos, &size, 0, 878 /*include a 'startsync' command*/ 879 write ? 2 : 1, 880 write ? CEPH_OSD_OP_WRITE : 881 CEPH_OSD_OP_READ, 882 flags, snapc, 883 ci->i_truncate_seq, 884 ci->i_truncate_size, 885 false); 886 if (IS_ERR(req)) { 887 ret = PTR_ERR(req); 888 break; 889 } 890 891 len = size; 892 pages = dio_get_pages_alloc(iter, len, &start, &num_pages); 893 if (IS_ERR(pages)) { 894 ceph_osdc_put_request(req); 895 ret = PTR_ERR(pages); 896 break; 897 } 898 899 /* 900 * To simplify error handling, allow AIO when IO within i_size 901 * or IO can be satisfied by single OSD request. 902 */ 903 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 904 (len == count || pos + count <= i_size_read(inode))) { 905 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 906 if (aio_req) { 907 aio_req->iocb = iocb; 908 aio_req->write = write; 909 INIT_LIST_HEAD(&aio_req->osd_reqs); 910 if (write) { 911 aio_req->mtime = mtime; 912 swap(aio_req->prealloc_cf, *pcf); 913 } 914 } 915 /* ignore error */ 916 } 917 918 if (write) { 919 /* 920 * throw out any page cache pages in this range. this 921 * may block. 922 */ 923 truncate_inode_pages_range(inode->i_mapping, pos, 924 (pos+len) | (PAGE_SIZE - 1)); 925 926 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 927 req->r_mtime = mtime; 928 } 929 930 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, 931 false, false); 932 933 if (aio_req) { 934 aio_req->total_len += len; 935 aio_req->num_reqs++; 936 atomic_inc(&aio_req->pending_reqs); 937 938 req->r_callback = ceph_aio_complete_req; 939 req->r_inode = inode; 940 req->r_priv = aio_req; 941 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 942 943 pos += len; 944 iov_iter_advance(iter, len); 945 continue; 946 } 947 948 ret = ceph_osdc_start_request(req->r_osdc, req, false); 949 if (!ret) 950 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 951 952 size = i_size_read(inode); 953 if (!write) { 954 if (ret == -ENOENT) 955 ret = 0; 956 if (ret >= 0 && ret < len && pos + ret < size) { 957 int zlen = min_t(size_t, len - ret, 958 size - pos - ret); 959 ceph_zero_page_vector_range(start + ret, zlen, 960 pages); 961 ret += zlen; 962 } 963 if (ret >= 0) 964 len = ret; 965 } 966 967 ceph_put_page_vector(pages, num_pages, false); 968 969 ceph_osdc_put_request(req); 970 if (ret < 0) 971 break; 972 973 pos += len; 974 iov_iter_advance(iter, len); 975 976 if (!write && pos >= size) 977 break; 978 979 if (write && pos > size) { 980 if (ceph_inode_set_size(inode, pos)) 981 ceph_check_caps(ceph_inode(inode), 982 CHECK_CAPS_AUTHONLY, 983 NULL); 984 } 985 } 986 987 if (aio_req) { 988 if (aio_req->num_reqs == 0) { 989 kfree(aio_req); 990 return ret; 991 } 992 993 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 994 CEPH_CAP_FILE_RD); 995 996 while (!list_empty(&aio_req->osd_reqs)) { 997 req = list_first_entry(&aio_req->osd_reqs, 998 struct ceph_osd_request, 999 r_unsafe_item); 1000 list_del_init(&req->r_unsafe_item); 1001 if (ret >= 0) 1002 ret = ceph_osdc_start_request(req->r_osdc, 1003 req, false); 1004 if (ret < 0) { 1005 req->r_result = ret; 1006 ceph_aio_complete_req(req); 1007 } 1008 } 1009 return -EIOCBQUEUED; 1010 } 1011 1012 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 1013 ret = pos - iocb->ki_pos; 1014 iocb->ki_pos = pos; 1015 } 1016 return ret; 1017 } 1018 1019 /* 1020 * Synchronous write, straight from __user pointer or user pages. 1021 * 1022 * If write spans object boundary, just do multiple writes. (For a 1023 * correct atomic write, we should e.g. take write locks on all 1024 * objects, rollback on failure, etc.) 1025 */ 1026 static ssize_t 1027 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 1028 struct ceph_snap_context *snapc) 1029 { 1030 struct file *file = iocb->ki_filp; 1031 struct inode *inode = file_inode(file); 1032 struct ceph_inode_info *ci = ceph_inode(inode); 1033 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1034 struct ceph_vino vino; 1035 struct ceph_osd_request *req; 1036 struct page **pages; 1037 u64 len; 1038 int num_pages; 1039 int written = 0; 1040 int flags; 1041 int check_caps = 0; 1042 int ret; 1043 struct timespec mtime = current_fs_time(inode->i_sb); 1044 size_t count = iov_iter_count(from); 1045 1046 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1047 return -EROFS; 1048 1049 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); 1050 1051 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1052 if (ret < 0) 1053 return ret; 1054 1055 ret = invalidate_inode_pages2_range(inode->i_mapping, 1056 pos >> PAGE_SHIFT, 1057 (pos + count) >> PAGE_SHIFT); 1058 if (ret < 0) 1059 dout("invalidate_inode_pages2_range returned %d\n", ret); 1060 1061 flags = CEPH_OSD_FLAG_ORDERSNAP | 1062 CEPH_OSD_FLAG_ONDISK | 1063 CEPH_OSD_FLAG_WRITE | 1064 CEPH_OSD_FLAG_ACK; 1065 1066 while ((len = iov_iter_count(from)) > 0) { 1067 size_t left; 1068 int n; 1069 1070 vino = ceph_vino(inode); 1071 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1072 vino, pos, &len, 0, 1, 1073 CEPH_OSD_OP_WRITE, flags, snapc, 1074 ci->i_truncate_seq, 1075 ci->i_truncate_size, 1076 false); 1077 if (IS_ERR(req)) { 1078 ret = PTR_ERR(req); 1079 break; 1080 } 1081 1082 /* 1083 * write from beginning of first page, 1084 * regardless of io alignment 1085 */ 1086 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1087 1088 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1089 if (IS_ERR(pages)) { 1090 ret = PTR_ERR(pages); 1091 goto out; 1092 } 1093 1094 left = len; 1095 for (n = 0; n < num_pages; n++) { 1096 size_t plen = min_t(size_t, left, PAGE_SIZE); 1097 ret = copy_page_from_iter(pages[n], 0, plen, from); 1098 if (ret != plen) { 1099 ret = -EFAULT; 1100 break; 1101 } 1102 left -= ret; 1103 } 1104 1105 if (ret < 0) { 1106 ceph_release_page_vector(pages, num_pages); 1107 goto out; 1108 } 1109 1110 /* get a second commit callback */ 1111 req->r_unsafe_callback = ceph_sync_write_unsafe; 1112 req->r_inode = inode; 1113 1114 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1115 false, true); 1116 1117 req->r_mtime = mtime; 1118 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1119 if (!ret) 1120 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1121 1122 out: 1123 ceph_osdc_put_request(req); 1124 if (ret == 0) { 1125 pos += len; 1126 written += len; 1127 1128 if (pos > i_size_read(inode)) { 1129 check_caps = ceph_inode_set_size(inode, pos); 1130 if (check_caps) 1131 ceph_check_caps(ceph_inode(inode), 1132 CHECK_CAPS_AUTHONLY, 1133 NULL); 1134 } 1135 } else 1136 break; 1137 } 1138 1139 if (ret != -EOLDSNAPC && written > 0) { 1140 ret = written; 1141 iocb->ki_pos = pos; 1142 } 1143 return ret; 1144 } 1145 1146 /* 1147 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1148 * Atomically grab references, so that those bits are not released 1149 * back to the MDS mid-read. 1150 * 1151 * Hmm, the sync read case isn't actually async... should it be? 1152 */ 1153 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1154 { 1155 struct file *filp = iocb->ki_filp; 1156 struct ceph_file_info *fi = filp->private_data; 1157 size_t len = iov_iter_count(to); 1158 struct inode *inode = file_inode(filp); 1159 struct ceph_inode_info *ci = ceph_inode(inode); 1160 struct page *pinned_page = NULL; 1161 ssize_t ret; 1162 int want, got = 0; 1163 int retry_op = 0, read = 0; 1164 1165 again: 1166 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1167 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1168 1169 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1170 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1171 else 1172 want = CEPH_CAP_FILE_CACHE; 1173 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1174 if (ret < 0) 1175 return ret; 1176 1177 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1178 (iocb->ki_flags & IOCB_DIRECT) || 1179 (fi->flags & CEPH_F_SYNC)) { 1180 1181 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1182 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1183 ceph_cap_string(got)); 1184 1185 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1186 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1187 ret = ceph_direct_read_write(iocb, to, 1188 NULL, NULL); 1189 if (ret >= 0 && ret < len) 1190 retry_op = CHECK_EOF; 1191 } else { 1192 ret = ceph_sync_read(iocb, to, &retry_op); 1193 } 1194 } else { 1195 retry_op = READ_INLINE; 1196 } 1197 } else { 1198 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1199 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1200 ceph_cap_string(got)); 1201 1202 ret = generic_file_read_iter(iocb, to); 1203 } 1204 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1205 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1206 if (pinned_page) { 1207 put_page(pinned_page); 1208 pinned_page = NULL; 1209 } 1210 ceph_put_cap_refs(ci, got); 1211 if (retry_op > HAVE_RETRIED && ret >= 0) { 1212 int statret; 1213 struct page *page = NULL; 1214 loff_t i_size; 1215 if (retry_op == READ_INLINE) { 1216 page = __page_cache_alloc(GFP_KERNEL); 1217 if (!page) 1218 return -ENOMEM; 1219 } 1220 1221 statret = __ceph_do_getattr(inode, page, 1222 CEPH_STAT_CAP_INLINE_DATA, !!page); 1223 if (statret < 0) { 1224 __free_page(page); 1225 if (statret == -ENODATA) { 1226 BUG_ON(retry_op != READ_INLINE); 1227 goto again; 1228 } 1229 return statret; 1230 } 1231 1232 i_size = i_size_read(inode); 1233 if (retry_op == READ_INLINE) { 1234 BUG_ON(ret > 0 || read > 0); 1235 if (iocb->ki_pos < i_size && 1236 iocb->ki_pos < PAGE_SIZE) { 1237 loff_t end = min_t(loff_t, i_size, 1238 iocb->ki_pos + len); 1239 end = min_t(loff_t, end, PAGE_SIZE); 1240 if (statret < end) 1241 zero_user_segment(page, statret, end); 1242 ret = copy_page_to_iter(page, 1243 iocb->ki_pos & ~PAGE_MASK, 1244 end - iocb->ki_pos, to); 1245 iocb->ki_pos += ret; 1246 read += ret; 1247 } 1248 if (iocb->ki_pos < i_size && read < len) { 1249 size_t zlen = min_t(size_t, len - read, 1250 i_size - iocb->ki_pos); 1251 ret = iov_iter_zero(zlen, to); 1252 iocb->ki_pos += ret; 1253 read += ret; 1254 } 1255 __free_pages(page, 0); 1256 return read; 1257 } 1258 1259 /* hit EOF or hole? */ 1260 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1261 ret < len) { 1262 dout("sync_read hit hole, ppos %lld < size %lld" 1263 ", reading more\n", iocb->ki_pos, i_size); 1264 1265 read += ret; 1266 len -= ret; 1267 retry_op = HAVE_RETRIED; 1268 goto again; 1269 } 1270 } 1271 1272 if (ret >= 0) 1273 ret += read; 1274 1275 return ret; 1276 } 1277 1278 /* 1279 * Take cap references to avoid releasing caps to MDS mid-write. 1280 * 1281 * If we are synchronous, and write with an old snap context, the OSD 1282 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1283 * dropping our cap refs and allowing the pending snap to logically 1284 * complete _before_ this write occurs. 1285 * 1286 * If we are near ENOSPC, write synchronously. 1287 */ 1288 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1289 { 1290 struct file *file = iocb->ki_filp; 1291 struct ceph_file_info *fi = file->private_data; 1292 struct inode *inode = file_inode(file); 1293 struct ceph_inode_info *ci = ceph_inode(inode); 1294 struct ceph_osd_client *osdc = 1295 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1296 struct ceph_cap_flush *prealloc_cf; 1297 ssize_t count, written = 0; 1298 int err, want, got; 1299 loff_t pos; 1300 1301 if (ceph_snap(inode) != CEPH_NOSNAP) 1302 return -EROFS; 1303 1304 prealloc_cf = ceph_alloc_cap_flush(); 1305 if (!prealloc_cf) 1306 return -ENOMEM; 1307 1308 inode_lock(inode); 1309 1310 /* We can write back this queue in page reclaim */ 1311 current->backing_dev_info = inode_to_bdi(inode); 1312 1313 if (iocb->ki_flags & IOCB_APPEND) { 1314 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1315 if (err < 0) 1316 goto out; 1317 } 1318 1319 err = generic_write_checks(iocb, from); 1320 if (err <= 0) 1321 goto out; 1322 1323 pos = iocb->ki_pos; 1324 count = iov_iter_count(from); 1325 err = file_remove_privs(file); 1326 if (err) 1327 goto out; 1328 1329 err = file_update_time(file); 1330 if (err) 1331 goto out; 1332 1333 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1334 err = ceph_uninline_data(file, NULL); 1335 if (err < 0) 1336 goto out; 1337 } 1338 1339 retry_snap: 1340 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { 1341 err = -ENOSPC; 1342 goto out; 1343 } 1344 1345 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1346 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1347 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1348 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1349 else 1350 want = CEPH_CAP_FILE_BUFFER; 1351 got = 0; 1352 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1353 &got, NULL); 1354 if (err < 0) 1355 goto out; 1356 1357 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1358 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1359 1360 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1361 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { 1362 struct ceph_snap_context *snapc; 1363 struct iov_iter data; 1364 inode_unlock(inode); 1365 1366 spin_lock(&ci->i_ceph_lock); 1367 if (__ceph_have_pending_cap_snap(ci)) { 1368 struct ceph_cap_snap *capsnap = 1369 list_last_entry(&ci->i_cap_snaps, 1370 struct ceph_cap_snap, 1371 ci_item); 1372 snapc = ceph_get_snap_context(capsnap->context); 1373 } else { 1374 BUG_ON(!ci->i_head_snapc); 1375 snapc = ceph_get_snap_context(ci->i_head_snapc); 1376 } 1377 spin_unlock(&ci->i_ceph_lock); 1378 1379 /* we might need to revert back to that point */ 1380 data = *from; 1381 if (iocb->ki_flags & IOCB_DIRECT) 1382 written = ceph_direct_read_write(iocb, &data, snapc, 1383 &prealloc_cf); 1384 else 1385 written = ceph_sync_write(iocb, &data, pos, snapc); 1386 if (written == -EOLDSNAPC) { 1387 dout("aio_write %p %llx.%llx %llu~%u" 1388 "got EOLDSNAPC, retrying\n", 1389 inode, ceph_vinop(inode), 1390 pos, (unsigned)count); 1391 inode_lock(inode); 1392 goto retry_snap; 1393 } 1394 if (written > 0) 1395 iov_iter_advance(from, written); 1396 ceph_put_snap_context(snapc); 1397 } else { 1398 /* 1399 * No need to acquire the i_truncate_mutex. Because 1400 * the MDS revokes Fwb caps before sending truncate 1401 * message to us. We can't get Fwb cap while there 1402 * are pending vmtruncate. So write and vmtruncate 1403 * can not run at the same time 1404 */ 1405 written = generic_perform_write(file, from, pos); 1406 if (likely(written >= 0)) 1407 iocb->ki_pos = pos + written; 1408 inode_unlock(inode); 1409 } 1410 1411 if (written >= 0) { 1412 int dirty; 1413 spin_lock(&ci->i_ceph_lock); 1414 ci->i_inline_version = CEPH_INLINE_NONE; 1415 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1416 &prealloc_cf); 1417 spin_unlock(&ci->i_ceph_lock); 1418 if (dirty) 1419 __mark_inode_dirty(inode, dirty); 1420 } 1421 1422 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1423 inode, ceph_vinop(inode), pos, (unsigned)count, 1424 ceph_cap_string(got)); 1425 ceph_put_cap_refs(ci, got); 1426 1427 if (written >= 0) { 1428 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) 1429 iocb->ki_flags |= IOCB_DSYNC; 1430 1431 written = generic_write_sync(iocb, written); 1432 } 1433 1434 goto out_unlocked; 1435 1436 out: 1437 inode_unlock(inode); 1438 out_unlocked: 1439 ceph_free_cap_flush(prealloc_cf); 1440 current->backing_dev_info = NULL; 1441 return written ? written : err; 1442 } 1443 1444 /* 1445 * llseek. be sure to verify file size on SEEK_END. 1446 */ 1447 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1448 { 1449 struct inode *inode = file->f_mapping->host; 1450 loff_t i_size; 1451 int ret; 1452 1453 inode_lock(inode); 1454 1455 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1456 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1457 if (ret < 0) { 1458 offset = ret; 1459 goto out; 1460 } 1461 } 1462 1463 i_size = i_size_read(inode); 1464 switch (whence) { 1465 case SEEK_END: 1466 offset += i_size; 1467 break; 1468 case SEEK_CUR: 1469 /* 1470 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1471 * position-querying operation. Avoid rewriting the "same" 1472 * f_pos value back to the file because a concurrent read(), 1473 * write() or lseek() might have altered it 1474 */ 1475 if (offset == 0) { 1476 offset = file->f_pos; 1477 goto out; 1478 } 1479 offset += file->f_pos; 1480 break; 1481 case SEEK_DATA: 1482 if (offset >= i_size) { 1483 ret = -ENXIO; 1484 goto out; 1485 } 1486 break; 1487 case SEEK_HOLE: 1488 if (offset >= i_size) { 1489 ret = -ENXIO; 1490 goto out; 1491 } 1492 offset = i_size; 1493 break; 1494 } 1495 1496 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1497 1498 out: 1499 inode_unlock(inode); 1500 return offset; 1501 } 1502 1503 static inline void ceph_zero_partial_page( 1504 struct inode *inode, loff_t offset, unsigned size) 1505 { 1506 struct page *page; 1507 pgoff_t index = offset >> PAGE_SHIFT; 1508 1509 page = find_lock_page(inode->i_mapping, index); 1510 if (page) { 1511 wait_on_page_writeback(page); 1512 zero_user(page, offset & (PAGE_SIZE - 1), size); 1513 unlock_page(page); 1514 put_page(page); 1515 } 1516 } 1517 1518 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1519 loff_t length) 1520 { 1521 loff_t nearly = round_up(offset, PAGE_SIZE); 1522 if (offset < nearly) { 1523 loff_t size = nearly - offset; 1524 if (length < size) 1525 size = length; 1526 ceph_zero_partial_page(inode, offset, size); 1527 offset += size; 1528 length -= size; 1529 } 1530 if (length >= PAGE_SIZE) { 1531 loff_t size = round_down(length, PAGE_SIZE); 1532 truncate_pagecache_range(inode, offset, offset + size - 1); 1533 offset += size; 1534 length -= size; 1535 } 1536 if (length) 1537 ceph_zero_partial_page(inode, offset, length); 1538 } 1539 1540 static int ceph_zero_partial_object(struct inode *inode, 1541 loff_t offset, loff_t *length) 1542 { 1543 struct ceph_inode_info *ci = ceph_inode(inode); 1544 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1545 struct ceph_osd_request *req; 1546 int ret = 0; 1547 loff_t zero = 0; 1548 int op; 1549 1550 if (!length) { 1551 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1552 length = &zero; 1553 } else { 1554 op = CEPH_OSD_OP_ZERO; 1555 } 1556 1557 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1558 ceph_vino(inode), 1559 offset, length, 1560 0, 1, op, 1561 CEPH_OSD_FLAG_WRITE | 1562 CEPH_OSD_FLAG_ONDISK, 1563 NULL, 0, 0, false); 1564 if (IS_ERR(req)) { 1565 ret = PTR_ERR(req); 1566 goto out; 1567 } 1568 1569 req->r_mtime = inode->i_mtime; 1570 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1571 if (!ret) { 1572 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1573 if (ret == -ENOENT) 1574 ret = 0; 1575 } 1576 ceph_osdc_put_request(req); 1577 1578 out: 1579 return ret; 1580 } 1581 1582 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1583 { 1584 int ret = 0; 1585 struct ceph_inode_info *ci = ceph_inode(inode); 1586 s32 stripe_unit = ceph_file_layout_su(ci->i_layout); 1587 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); 1588 s32 object_size = ceph_file_layout_object_size(ci->i_layout); 1589 u64 object_set_size = object_size * stripe_count; 1590 u64 nearly, t; 1591 1592 /* round offset up to next period boundary */ 1593 nearly = offset + object_set_size - 1; 1594 t = nearly; 1595 nearly -= do_div(t, object_set_size); 1596 1597 while (length && offset < nearly) { 1598 loff_t size = length; 1599 ret = ceph_zero_partial_object(inode, offset, &size); 1600 if (ret < 0) 1601 return ret; 1602 offset += size; 1603 length -= size; 1604 } 1605 while (length >= object_set_size) { 1606 int i; 1607 loff_t pos = offset; 1608 for (i = 0; i < stripe_count; ++i) { 1609 ret = ceph_zero_partial_object(inode, pos, NULL); 1610 if (ret < 0) 1611 return ret; 1612 pos += stripe_unit; 1613 } 1614 offset += object_set_size; 1615 length -= object_set_size; 1616 } 1617 while (length) { 1618 loff_t size = length; 1619 ret = ceph_zero_partial_object(inode, offset, &size); 1620 if (ret < 0) 1621 return ret; 1622 offset += size; 1623 length -= size; 1624 } 1625 return ret; 1626 } 1627 1628 static long ceph_fallocate(struct file *file, int mode, 1629 loff_t offset, loff_t length) 1630 { 1631 struct ceph_file_info *fi = file->private_data; 1632 struct inode *inode = file_inode(file); 1633 struct ceph_inode_info *ci = ceph_inode(inode); 1634 struct ceph_osd_client *osdc = 1635 &ceph_inode_to_client(inode)->client->osdc; 1636 struct ceph_cap_flush *prealloc_cf; 1637 int want, got = 0; 1638 int dirty; 1639 int ret = 0; 1640 loff_t endoff = 0; 1641 loff_t size; 1642 1643 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1644 return -EOPNOTSUPP; 1645 1646 if (!S_ISREG(inode->i_mode)) 1647 return -EOPNOTSUPP; 1648 1649 prealloc_cf = ceph_alloc_cap_flush(); 1650 if (!prealloc_cf) 1651 return -ENOMEM; 1652 1653 inode_lock(inode); 1654 1655 if (ceph_snap(inode) != CEPH_NOSNAP) { 1656 ret = -EROFS; 1657 goto unlock; 1658 } 1659 1660 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && 1661 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1662 ret = -ENOSPC; 1663 goto unlock; 1664 } 1665 1666 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1667 ret = ceph_uninline_data(file, NULL); 1668 if (ret < 0) 1669 goto unlock; 1670 } 1671 1672 size = i_size_read(inode); 1673 if (!(mode & FALLOC_FL_KEEP_SIZE)) 1674 endoff = offset + length; 1675 1676 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1677 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1678 else 1679 want = CEPH_CAP_FILE_BUFFER; 1680 1681 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1682 if (ret < 0) 1683 goto unlock; 1684 1685 if (mode & FALLOC_FL_PUNCH_HOLE) { 1686 if (offset < size) 1687 ceph_zero_pagecache_range(inode, offset, length); 1688 ret = ceph_zero_objects(inode, offset, length); 1689 } else if (endoff > size) { 1690 truncate_pagecache_range(inode, size, -1); 1691 if (ceph_inode_set_size(inode, endoff)) 1692 ceph_check_caps(ceph_inode(inode), 1693 CHECK_CAPS_AUTHONLY, NULL); 1694 } 1695 1696 if (!ret) { 1697 spin_lock(&ci->i_ceph_lock); 1698 ci->i_inline_version = CEPH_INLINE_NONE; 1699 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1700 &prealloc_cf); 1701 spin_unlock(&ci->i_ceph_lock); 1702 if (dirty) 1703 __mark_inode_dirty(inode, dirty); 1704 } 1705 1706 ceph_put_cap_refs(ci, got); 1707 unlock: 1708 inode_unlock(inode); 1709 ceph_free_cap_flush(prealloc_cf); 1710 return ret; 1711 } 1712 1713 const struct file_operations ceph_file_fops = { 1714 .open = ceph_open, 1715 .release = ceph_release, 1716 .llseek = ceph_llseek, 1717 .read_iter = ceph_read_iter, 1718 .write_iter = ceph_write_iter, 1719 .mmap = ceph_mmap, 1720 .fsync = ceph_fsync, 1721 .lock = ceph_lock, 1722 .flock = ceph_flock, 1723 .splice_read = generic_file_splice_read, 1724 .splice_write = iter_file_splice_write, 1725 .unlocked_ioctl = ceph_ioctl, 1726 .compat_ioctl = ceph_ioctl, 1727 .fallocate = ceph_fallocate, 1728 }; 1729 1730