1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/mount.h> 8 #include <linux/namei.h> 9 #include <linux/writeback.h> 10 #include <linux/falloc.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 16 /* 17 * Ceph file operations 18 * 19 * Implement basic open/close functionality, and implement 20 * read/write. 21 * 22 * We implement three modes of file I/O: 23 * - buffered uses the generic_file_aio_{read,write} helpers 24 * 25 * - synchronous is used when there is multi-client read/write 26 * sharing, avoids the page cache, and synchronously waits for an 27 * ack from the OSD. 28 * 29 * - direct io takes the variant of the sync path that references 30 * user pages directly. 31 * 32 * fsync() flushes and waits on dirty pages, but just queues metadata 33 * for writeback: since the MDS can recover size and mtime there is no 34 * need to wait for MDS acknowledgement. 35 */ 36 37 /* 38 * Calculate the length sum of direct io vectors that can 39 * be combined into one page vector. 40 */ 41 static size_t dio_get_pagev_size(const struct iov_iter *it) 42 { 43 const struct iovec *iov = it->iov; 44 const struct iovec *iovend = iov + it->nr_segs; 45 size_t size; 46 47 size = iov->iov_len - it->iov_offset; 48 /* 49 * An iov can be page vectored when both the current tail 50 * and the next base are page aligned. 51 */ 52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && 53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { 54 size += iov->iov_len; 55 } 56 dout("dio_get_pagevlen len = %zu\n", size); 57 return size; 58 } 59 60 /* 61 * Allocate a page vector based on (@it, @nbytes). 62 * The return value is the tuple describing a page vector, 63 * that is (@pages, @page_align, @num_pages). 64 */ 65 static struct page ** 66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, 67 size_t *page_align, int *num_pages) 68 { 69 struct iov_iter tmp_it = *it; 70 size_t align; 71 struct page **pages; 72 int ret = 0, idx, npages; 73 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 75 (PAGE_SIZE - 1); 76 npages = calc_pages_for(align, nbytes); 77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 if (!pages) { 79 pages = vmalloc(sizeof(*pages) * npages); 80 if (!pages) 81 return ERR_PTR(-ENOMEM); 82 } 83 84 for (idx = 0; idx < npages; ) { 85 size_t start; 86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, 87 npages - idx, &start); 88 if (ret < 0) 89 goto fail; 90 91 iov_iter_advance(&tmp_it, ret); 92 nbytes -= ret; 93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; 94 } 95 96 BUG_ON(nbytes != 0); 97 *num_pages = npages; 98 *page_align = align; 99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); 100 return pages; 101 fail: 102 ceph_put_page_vector(pages, idx, false); 103 return ERR_PTR(ret); 104 } 105 106 /* 107 * Prepare an open request. Preallocate ceph_cap to avoid an 108 * inopportune ENOMEM later. 109 */ 110 static struct ceph_mds_request * 111 prepare_open_request(struct super_block *sb, int flags, int create_mode) 112 { 113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 114 struct ceph_mds_client *mdsc = fsc->mdsc; 115 struct ceph_mds_request *req; 116 int want_auth = USE_ANY_MDS; 117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 118 119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 120 want_auth = USE_AUTH_MDS; 121 122 req = ceph_mdsc_create_request(mdsc, op, want_auth); 123 if (IS_ERR(req)) 124 goto out; 125 req->r_fmode = ceph_flags_to_mode(flags); 126 req->r_args.open.flags = cpu_to_le32(flags); 127 req->r_args.open.mode = cpu_to_le32(create_mode); 128 out: 129 return req; 130 } 131 132 /* 133 * initialize private struct file data. 134 * if we fail, clean up by dropping fmode reference on the ceph_inode 135 */ 136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 137 { 138 struct ceph_file_info *cf; 139 int ret = 0; 140 141 switch (inode->i_mode & S_IFMT) { 142 case S_IFREG: 143 ceph_fscache_register_inode_cookie(inode); 144 ceph_fscache_file_set_cookie(inode, file); 145 case S_IFDIR: 146 dout("init_file %p %p 0%o (regular)\n", inode, file, 147 inode->i_mode); 148 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 149 if (cf == NULL) { 150 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 151 return -ENOMEM; 152 } 153 cf->fmode = fmode; 154 cf->next_offset = 2; 155 cf->readdir_cache_idx = -1; 156 file->private_data = cf; 157 BUG_ON(inode->i_fop->release != ceph_release); 158 break; 159 160 case S_IFLNK: 161 dout("init_file %p %p 0%o (symlink)\n", inode, file, 162 inode->i_mode); 163 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 164 break; 165 166 default: 167 dout("init_file %p %p 0%o (special)\n", inode, file, 168 inode->i_mode); 169 /* 170 * we need to drop the open ref now, since we don't 171 * have .release set to ceph_release. 172 */ 173 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 174 BUG_ON(inode->i_fop->release == ceph_release); 175 176 /* call the proper open fop */ 177 ret = inode->i_fop->open(inode, file); 178 } 179 return ret; 180 } 181 182 /* 183 * try renew caps after session gets killed. 184 */ 185 int ceph_renew_caps(struct inode *inode) 186 { 187 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 188 struct ceph_inode_info *ci = ceph_inode(inode); 189 struct ceph_mds_request *req; 190 int err, flags, wanted; 191 192 spin_lock(&ci->i_ceph_lock); 193 wanted = __ceph_caps_file_wanted(ci); 194 if (__ceph_is_any_real_caps(ci) && 195 (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) { 196 int issued = __ceph_caps_issued(ci, NULL); 197 spin_unlock(&ci->i_ceph_lock); 198 dout("renew caps %p want %s issued %s updating mds_wanted\n", 199 inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 200 ceph_check_caps(ci, 0, NULL); 201 return 0; 202 } 203 spin_unlock(&ci->i_ceph_lock); 204 205 flags = 0; 206 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) 207 flags = O_RDWR; 208 else if (wanted & CEPH_CAP_FILE_RD) 209 flags = O_RDONLY; 210 else if (wanted & CEPH_CAP_FILE_WR) 211 flags = O_WRONLY; 212 #ifdef O_LAZY 213 if (wanted & CEPH_CAP_FILE_LAZYIO) 214 flags |= O_LAZY; 215 #endif 216 217 req = prepare_open_request(inode->i_sb, flags, 0); 218 if (IS_ERR(req)) { 219 err = PTR_ERR(req); 220 goto out; 221 } 222 223 req->r_inode = inode; 224 ihold(inode); 225 req->r_num_caps = 1; 226 req->r_fmode = -1; 227 228 err = ceph_mdsc_do_request(mdsc, NULL, req); 229 ceph_mdsc_put_request(req); 230 out: 231 dout("renew caps %p open result=%d\n", inode, err); 232 return err < 0 ? err : 0; 233 } 234 235 /* 236 * If we already have the requisite capabilities, we can satisfy 237 * the open request locally (no need to request new caps from the 238 * MDS). We do, however, need to inform the MDS (asynchronously) 239 * if our wanted caps set expands. 240 */ 241 int ceph_open(struct inode *inode, struct file *file) 242 { 243 struct ceph_inode_info *ci = ceph_inode(inode); 244 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 245 struct ceph_mds_client *mdsc = fsc->mdsc; 246 struct ceph_mds_request *req; 247 struct ceph_file_info *cf = file->private_data; 248 int err; 249 int flags, fmode, wanted; 250 251 if (cf) { 252 dout("open file %p is already opened\n", file); 253 return 0; 254 } 255 256 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 257 flags = file->f_flags & ~(O_CREAT|O_EXCL); 258 if (S_ISDIR(inode->i_mode)) 259 flags = O_DIRECTORY; /* mds likes to know */ 260 261 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 262 ceph_vinop(inode), file, flags, file->f_flags); 263 fmode = ceph_flags_to_mode(flags); 264 wanted = ceph_caps_for_mode(fmode); 265 266 /* snapped files are read-only */ 267 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 268 return -EROFS; 269 270 /* trivially open snapdir */ 271 if (ceph_snap(inode) == CEPH_SNAPDIR) { 272 spin_lock(&ci->i_ceph_lock); 273 __ceph_get_fmode(ci, fmode); 274 spin_unlock(&ci->i_ceph_lock); 275 return ceph_init_file(inode, file, fmode); 276 } 277 278 /* 279 * No need to block if we have caps on the auth MDS (for 280 * write) or any MDS (for read). Update wanted set 281 * asynchronously. 282 */ 283 spin_lock(&ci->i_ceph_lock); 284 if (__ceph_is_any_real_caps(ci) && 285 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 286 int mds_wanted = __ceph_caps_mds_wanted(ci, true); 287 int issued = __ceph_caps_issued(ci, NULL); 288 289 dout("open %p fmode %d want %s issued %s using existing\n", 290 inode, fmode, ceph_cap_string(wanted), 291 ceph_cap_string(issued)); 292 __ceph_get_fmode(ci, fmode); 293 spin_unlock(&ci->i_ceph_lock); 294 295 /* adjust wanted? */ 296 if ((issued & wanted) != wanted && 297 (mds_wanted & wanted) != wanted && 298 ceph_snap(inode) != CEPH_SNAPDIR) 299 ceph_check_caps(ci, 0, NULL); 300 301 return ceph_init_file(inode, file, fmode); 302 } else if (ceph_snap(inode) != CEPH_NOSNAP && 303 (ci->i_snap_caps & wanted) == wanted) { 304 __ceph_get_fmode(ci, fmode); 305 spin_unlock(&ci->i_ceph_lock); 306 return ceph_init_file(inode, file, fmode); 307 } 308 309 spin_unlock(&ci->i_ceph_lock); 310 311 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 312 req = prepare_open_request(inode->i_sb, flags, 0); 313 if (IS_ERR(req)) { 314 err = PTR_ERR(req); 315 goto out; 316 } 317 req->r_inode = inode; 318 ihold(inode); 319 320 req->r_num_caps = 1; 321 err = ceph_mdsc_do_request(mdsc, NULL, req); 322 if (!err) 323 err = ceph_init_file(inode, file, req->r_fmode); 324 ceph_mdsc_put_request(req); 325 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 326 out: 327 return err; 328 } 329 330 331 /* 332 * Do a lookup + open with a single request. If we get a non-existent 333 * file or symlink, return 1 so the VFS can retry. 334 */ 335 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 336 struct file *file, unsigned flags, umode_t mode, 337 int *opened) 338 { 339 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 340 struct ceph_mds_client *mdsc = fsc->mdsc; 341 struct ceph_mds_request *req; 342 struct dentry *dn; 343 struct ceph_acls_info acls = {}; 344 int mask; 345 int err; 346 347 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 348 dir, dentry, dentry, 349 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 350 351 if (dentry->d_name.len > NAME_MAX) 352 return -ENAMETOOLONG; 353 354 if (flags & O_CREAT) { 355 err = ceph_pre_init_acls(dir, &mode, &acls); 356 if (err < 0) 357 return err; 358 } 359 360 /* do the open */ 361 req = prepare_open_request(dir->i_sb, flags, mode); 362 if (IS_ERR(req)) { 363 err = PTR_ERR(req); 364 goto out_acl; 365 } 366 req->r_dentry = dget(dentry); 367 req->r_num_caps = 2; 368 if (flags & O_CREAT) { 369 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 370 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 371 if (acls.pagelist) { 372 req->r_pagelist = acls.pagelist; 373 acls.pagelist = NULL; 374 } 375 } 376 377 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 378 if (ceph_security_xattr_wanted(dir)) 379 mask |= CEPH_CAP_XATTR_SHARED; 380 req->r_args.open.mask = cpu_to_le32(mask); 381 382 req->r_parent = dir; 383 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 384 err = ceph_mdsc_do_request(mdsc, 385 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 386 req); 387 err = ceph_handle_snapdir(req, dentry, err); 388 if (err) 389 goto out_req; 390 391 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 392 err = ceph_handle_notrace_create(dir, dentry); 393 394 if (d_in_lookup(dentry)) { 395 dn = ceph_finish_lookup(req, dentry, err); 396 if (IS_ERR(dn)) 397 err = PTR_ERR(dn); 398 } else { 399 /* we were given a hashed negative dentry */ 400 dn = NULL; 401 } 402 if (err) 403 goto out_req; 404 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 405 /* make vfs retry on splice, ENOENT, or symlink */ 406 dout("atomic_open finish_no_open on dn %p\n", dn); 407 err = finish_no_open(file, dn); 408 } else { 409 dout("atomic_open finish_open on dn %p\n", dn); 410 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 411 ceph_init_inode_acls(d_inode(dentry), &acls); 412 *opened |= FILE_CREATED; 413 } 414 err = finish_open(file, dentry, ceph_open, opened); 415 } 416 out_req: 417 if (!req->r_err && req->r_target_inode) 418 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 419 ceph_mdsc_put_request(req); 420 out_acl: 421 ceph_release_acls_info(&acls); 422 dout("atomic_open result=%d\n", err); 423 return err; 424 } 425 426 int ceph_release(struct inode *inode, struct file *file) 427 { 428 struct ceph_inode_info *ci = ceph_inode(inode); 429 struct ceph_file_info *cf = file->private_data; 430 431 dout("release inode %p file %p\n", inode, file); 432 ceph_put_fmode(ci, cf->fmode); 433 if (cf->last_readdir) 434 ceph_mdsc_put_request(cf->last_readdir); 435 kfree(cf->last_name); 436 kfree(cf->dir_info); 437 kmem_cache_free(ceph_file_cachep, cf); 438 439 /* wake up anyone waiting for caps on this inode */ 440 wake_up_all(&ci->i_cap_wq); 441 return 0; 442 } 443 444 enum { 445 HAVE_RETRIED = 1, 446 CHECK_EOF = 2, 447 READ_INLINE = 3, 448 }; 449 450 /* 451 * Read a range of bytes striped over one or more objects. Iterate over 452 * objects we stripe over. (That's not atomic, but good enough for now.) 453 * 454 * If we get a short result from the OSD, check against i_size; we need to 455 * only return a short read to the caller if we hit EOF. 456 */ 457 static int striped_read(struct inode *inode, 458 u64 pos, u64 len, 459 struct page **pages, int num_pages, 460 int page_align, int *checkeof) 461 { 462 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 463 struct ceph_inode_info *ci = ceph_inode(inode); 464 u64 this_len; 465 loff_t i_size; 466 int page_idx; 467 int ret, read = 0; 468 bool hit_stripe, was_short; 469 470 /* 471 * we may need to do multiple reads. not atomic, unfortunately. 472 */ 473 more: 474 this_len = len; 475 page_idx = (page_align + read) >> PAGE_SHIFT; 476 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 477 &ci->i_layout, pos, &this_len, 478 ci->i_truncate_seq, ci->i_truncate_size, 479 pages + page_idx, num_pages - page_idx, 480 ((page_align + read) & ~PAGE_MASK)); 481 if (ret == -ENOENT) 482 ret = 0; 483 hit_stripe = this_len < len; 484 was_short = ret >= 0 && ret < this_len; 485 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read, 486 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 487 488 i_size = i_size_read(inode); 489 if (ret >= 0) { 490 if (was_short && (pos + ret < i_size)) { 491 int zlen = min(this_len - ret, i_size - pos - ret); 492 int zoff = page_align + read + ret; 493 dout(" zero gap %llu to %llu\n", 494 pos + ret, pos + ret + zlen); 495 ceph_zero_page_vector_range(zoff, zlen, pages); 496 ret += zlen; 497 } 498 499 read += ret; 500 pos += ret; 501 len -= ret; 502 503 /* hit stripe and need continue*/ 504 if (len && hit_stripe && pos < i_size) 505 goto more; 506 } 507 508 if (read > 0) { 509 ret = read; 510 /* did we bounce off eof? */ 511 if (pos + len > i_size) 512 *checkeof = CHECK_EOF; 513 } 514 515 dout("striped_read returns %d\n", ret); 516 return ret; 517 } 518 519 /* 520 * Completely synchronous read and write methods. Direct from __user 521 * buffer to osd, or directly to user pages (if O_DIRECT). 522 * 523 * If the read spans object boundary, just do multiple reads. 524 */ 525 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, 526 int *checkeof) 527 { 528 struct file *file = iocb->ki_filp; 529 struct inode *inode = file_inode(file); 530 struct page **pages; 531 u64 off = iocb->ki_pos; 532 int num_pages; 533 ssize_t ret; 534 size_t len = iov_iter_count(to); 535 536 dout("sync_read on file %p %llu~%u %s\n", file, off, 537 (unsigned)len, 538 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 539 540 if (!len) 541 return 0; 542 /* 543 * flush any page cache pages in this range. this 544 * will make concurrent normal and sync io slow, 545 * but it will at least behave sensibly when they are 546 * in sequence. 547 */ 548 ret = filemap_write_and_wait_range(inode->i_mapping, off, 549 off + len); 550 if (ret < 0) 551 return ret; 552 553 if (unlikely(to->type & ITER_PIPE)) { 554 size_t page_off; 555 ret = iov_iter_get_pages_alloc(to, &pages, len, 556 &page_off); 557 if (ret <= 0) 558 return -ENOMEM; 559 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); 560 561 ret = striped_read(inode, off, ret, pages, num_pages, 562 page_off, checkeof); 563 if (ret > 0) { 564 iov_iter_advance(to, ret); 565 off += ret; 566 } else { 567 iov_iter_advance(to, 0); 568 } 569 ceph_put_page_vector(pages, num_pages, false); 570 } else { 571 num_pages = calc_pages_for(off, len); 572 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 573 if (IS_ERR(pages)) 574 return PTR_ERR(pages); 575 576 ret = striped_read(inode, off, len, pages, num_pages, 577 (off & ~PAGE_MASK), checkeof); 578 if (ret > 0) { 579 int l, k = 0; 580 size_t left = ret; 581 582 while (left) { 583 size_t page_off = off & ~PAGE_MASK; 584 size_t copy = min_t(size_t, left, 585 PAGE_SIZE - page_off); 586 l = copy_page_to_iter(pages[k++], page_off, 587 copy, to); 588 off += l; 589 left -= l; 590 if (l < copy) 591 break; 592 } 593 } 594 ceph_release_page_vector(pages, num_pages); 595 } 596 597 if (off > iocb->ki_pos) { 598 ret = off - iocb->ki_pos; 599 iocb->ki_pos = off; 600 } 601 602 dout("sync_read result %zd\n", ret); 603 return ret; 604 } 605 606 struct ceph_aio_request { 607 struct kiocb *iocb; 608 size_t total_len; 609 int write; 610 int error; 611 struct list_head osd_reqs; 612 unsigned num_reqs; 613 atomic_t pending_reqs; 614 struct timespec mtime; 615 struct ceph_cap_flush *prealloc_cf; 616 }; 617 618 struct ceph_aio_work { 619 struct work_struct work; 620 struct ceph_osd_request *req; 621 }; 622 623 static void ceph_aio_retry_work(struct work_struct *work); 624 625 static void ceph_aio_complete(struct inode *inode, 626 struct ceph_aio_request *aio_req) 627 { 628 struct ceph_inode_info *ci = ceph_inode(inode); 629 int ret; 630 631 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 632 return; 633 634 ret = aio_req->error; 635 if (!ret) 636 ret = aio_req->total_len; 637 638 dout("ceph_aio_complete %p rc %d\n", inode, ret); 639 640 if (ret >= 0 && aio_req->write) { 641 int dirty; 642 643 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 644 if (endoff > i_size_read(inode)) { 645 if (ceph_inode_set_size(inode, endoff)) 646 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 647 } 648 649 spin_lock(&ci->i_ceph_lock); 650 ci->i_inline_version = CEPH_INLINE_NONE; 651 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 652 &aio_req->prealloc_cf); 653 spin_unlock(&ci->i_ceph_lock); 654 if (dirty) 655 __mark_inode_dirty(inode, dirty); 656 657 } 658 659 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 660 CEPH_CAP_FILE_RD)); 661 662 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 663 664 ceph_free_cap_flush(aio_req->prealloc_cf); 665 kfree(aio_req); 666 } 667 668 static void ceph_aio_complete_req(struct ceph_osd_request *req) 669 { 670 int rc = req->r_result; 671 struct inode *inode = req->r_inode; 672 struct ceph_aio_request *aio_req = req->r_priv; 673 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 674 int num_pages = calc_pages_for((u64)osd_data->alignment, 675 osd_data->length); 676 677 dout("ceph_aio_complete_req %p rc %d bytes %llu\n", 678 inode, rc, osd_data->length); 679 680 if (rc == -EOLDSNAPC) { 681 struct ceph_aio_work *aio_work; 682 BUG_ON(!aio_req->write); 683 684 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 685 if (aio_work) { 686 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 687 aio_work->req = req; 688 queue_work(ceph_inode_to_client(inode)->wb_wq, 689 &aio_work->work); 690 return; 691 } 692 rc = -ENOMEM; 693 } else if (!aio_req->write) { 694 if (rc == -ENOENT) 695 rc = 0; 696 if (rc >= 0 && osd_data->length > rc) { 697 int zoff = osd_data->alignment + rc; 698 int zlen = osd_data->length - rc; 699 /* 700 * If read is satisfied by single OSD request, 701 * it can pass EOF. Otherwise read is within 702 * i_size. 703 */ 704 if (aio_req->num_reqs == 1) { 705 loff_t i_size = i_size_read(inode); 706 loff_t endoff = aio_req->iocb->ki_pos + rc; 707 if (endoff < i_size) 708 zlen = min_t(size_t, zlen, 709 i_size - endoff); 710 aio_req->total_len = rc + zlen; 711 } 712 713 if (zlen > 0) 714 ceph_zero_page_vector_range(zoff, zlen, 715 osd_data->pages); 716 } 717 } 718 719 ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write); 720 ceph_osdc_put_request(req); 721 722 if (rc < 0) 723 cmpxchg(&aio_req->error, 0, rc); 724 725 ceph_aio_complete(inode, aio_req); 726 return; 727 } 728 729 static void ceph_aio_retry_work(struct work_struct *work) 730 { 731 struct ceph_aio_work *aio_work = 732 container_of(work, struct ceph_aio_work, work); 733 struct ceph_osd_request *orig_req = aio_work->req; 734 struct ceph_aio_request *aio_req = orig_req->r_priv; 735 struct inode *inode = orig_req->r_inode; 736 struct ceph_inode_info *ci = ceph_inode(inode); 737 struct ceph_snap_context *snapc; 738 struct ceph_osd_request *req; 739 int ret; 740 741 spin_lock(&ci->i_ceph_lock); 742 if (__ceph_have_pending_cap_snap(ci)) { 743 struct ceph_cap_snap *capsnap = 744 list_last_entry(&ci->i_cap_snaps, 745 struct ceph_cap_snap, 746 ci_item); 747 snapc = ceph_get_snap_context(capsnap->context); 748 } else { 749 BUG_ON(!ci->i_head_snapc); 750 snapc = ceph_get_snap_context(ci->i_head_snapc); 751 } 752 spin_unlock(&ci->i_ceph_lock); 753 754 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 755 false, GFP_NOFS); 756 if (!req) { 757 ret = -ENOMEM; 758 req = orig_req; 759 goto out; 760 } 761 762 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; 763 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); 764 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); 765 766 ret = ceph_osdc_alloc_messages(req, GFP_NOFS); 767 if (ret) { 768 ceph_osdc_put_request(req); 769 req = orig_req; 770 goto out; 771 } 772 773 req->r_ops[0] = orig_req->r_ops[0]; 774 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 775 776 req->r_mtime = aio_req->mtime; 777 req->r_data_offset = req->r_ops[0].extent.offset; 778 779 ceph_osdc_put_request(orig_req); 780 781 req->r_callback = ceph_aio_complete_req; 782 req->r_inode = inode; 783 req->r_priv = aio_req; 784 785 ret = ceph_osdc_start_request(req->r_osdc, req, false); 786 out: 787 if (ret < 0) { 788 req->r_result = ret; 789 ceph_aio_complete_req(req); 790 } 791 792 ceph_put_snap_context(snapc); 793 kfree(aio_work); 794 } 795 796 static ssize_t 797 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 798 struct ceph_snap_context *snapc, 799 struct ceph_cap_flush **pcf) 800 { 801 struct file *file = iocb->ki_filp; 802 struct inode *inode = file_inode(file); 803 struct ceph_inode_info *ci = ceph_inode(inode); 804 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 805 struct ceph_vino vino; 806 struct ceph_osd_request *req; 807 struct page **pages; 808 struct ceph_aio_request *aio_req = NULL; 809 int num_pages = 0; 810 int flags; 811 int ret; 812 struct timespec mtime = current_time(inode); 813 size_t count = iov_iter_count(iter); 814 loff_t pos = iocb->ki_pos; 815 bool write = iov_iter_rw(iter) == WRITE; 816 817 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 818 return -EROFS; 819 820 dout("sync_direct_read_write (%s) on file %p %lld~%u\n", 821 (write ? "write" : "read"), file, pos, (unsigned)count); 822 823 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 824 if (ret < 0) 825 return ret; 826 827 if (write) { 828 int ret2 = invalidate_inode_pages2_range(inode->i_mapping, 829 pos >> PAGE_SHIFT, 830 (pos + count) >> PAGE_SHIFT); 831 if (ret2 < 0) 832 dout("invalidate_inode_pages2_range returned %d\n", ret2); 833 834 flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; 835 } else { 836 flags = CEPH_OSD_FLAG_READ; 837 } 838 839 while (iov_iter_count(iter) > 0) { 840 u64 size = dio_get_pagev_size(iter); 841 size_t start = 0; 842 ssize_t len; 843 844 vino = ceph_vino(inode); 845 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 846 vino, pos, &size, 0, 847 /*include a 'startsync' command*/ 848 write ? 2 : 1, 849 write ? CEPH_OSD_OP_WRITE : 850 CEPH_OSD_OP_READ, 851 flags, snapc, 852 ci->i_truncate_seq, 853 ci->i_truncate_size, 854 false); 855 if (IS_ERR(req)) { 856 ret = PTR_ERR(req); 857 break; 858 } 859 860 len = size; 861 pages = dio_get_pages_alloc(iter, len, &start, &num_pages); 862 if (IS_ERR(pages)) { 863 ceph_osdc_put_request(req); 864 ret = PTR_ERR(pages); 865 break; 866 } 867 868 /* 869 * To simplify error handling, allow AIO when IO within i_size 870 * or IO can be satisfied by single OSD request. 871 */ 872 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 873 (len == count || pos + count <= i_size_read(inode))) { 874 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 875 if (aio_req) { 876 aio_req->iocb = iocb; 877 aio_req->write = write; 878 INIT_LIST_HEAD(&aio_req->osd_reqs); 879 if (write) { 880 aio_req->mtime = mtime; 881 swap(aio_req->prealloc_cf, *pcf); 882 } 883 } 884 /* ignore error */ 885 } 886 887 if (write) { 888 /* 889 * throw out any page cache pages in this range. this 890 * may block. 891 */ 892 truncate_inode_pages_range(inode->i_mapping, pos, 893 (pos+len) | (PAGE_SIZE - 1)); 894 895 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 896 req->r_mtime = mtime; 897 } 898 899 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, 900 false, false); 901 902 if (aio_req) { 903 aio_req->total_len += len; 904 aio_req->num_reqs++; 905 atomic_inc(&aio_req->pending_reqs); 906 907 req->r_callback = ceph_aio_complete_req; 908 req->r_inode = inode; 909 req->r_priv = aio_req; 910 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 911 912 pos += len; 913 iov_iter_advance(iter, len); 914 continue; 915 } 916 917 ret = ceph_osdc_start_request(req->r_osdc, req, false); 918 if (!ret) 919 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 920 921 size = i_size_read(inode); 922 if (!write) { 923 if (ret == -ENOENT) 924 ret = 0; 925 if (ret >= 0 && ret < len && pos + ret < size) { 926 int zlen = min_t(size_t, len - ret, 927 size - pos - ret); 928 ceph_zero_page_vector_range(start + ret, zlen, 929 pages); 930 ret += zlen; 931 } 932 if (ret >= 0) 933 len = ret; 934 } 935 936 ceph_put_page_vector(pages, num_pages, !write); 937 938 ceph_osdc_put_request(req); 939 if (ret < 0) 940 break; 941 942 pos += len; 943 iov_iter_advance(iter, len); 944 945 if (!write && pos >= size) 946 break; 947 948 if (write && pos > size) { 949 if (ceph_inode_set_size(inode, pos)) 950 ceph_check_caps(ceph_inode(inode), 951 CHECK_CAPS_AUTHONLY, 952 NULL); 953 } 954 } 955 956 if (aio_req) { 957 LIST_HEAD(osd_reqs); 958 959 if (aio_req->num_reqs == 0) { 960 kfree(aio_req); 961 return ret; 962 } 963 964 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 965 CEPH_CAP_FILE_RD); 966 967 list_splice(&aio_req->osd_reqs, &osd_reqs); 968 while (!list_empty(&osd_reqs)) { 969 req = list_first_entry(&osd_reqs, 970 struct ceph_osd_request, 971 r_unsafe_item); 972 list_del_init(&req->r_unsafe_item); 973 if (ret >= 0) 974 ret = ceph_osdc_start_request(req->r_osdc, 975 req, false); 976 if (ret < 0) { 977 req->r_result = ret; 978 ceph_aio_complete_req(req); 979 } 980 } 981 return -EIOCBQUEUED; 982 } 983 984 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 985 ret = pos - iocb->ki_pos; 986 iocb->ki_pos = pos; 987 } 988 return ret; 989 } 990 991 /* 992 * Synchronous write, straight from __user pointer or user pages. 993 * 994 * If write spans object boundary, just do multiple writes. (For a 995 * correct atomic write, we should e.g. take write locks on all 996 * objects, rollback on failure, etc.) 997 */ 998 static ssize_t 999 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 1000 struct ceph_snap_context *snapc) 1001 { 1002 struct file *file = iocb->ki_filp; 1003 struct inode *inode = file_inode(file); 1004 struct ceph_inode_info *ci = ceph_inode(inode); 1005 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1006 struct ceph_vino vino; 1007 struct ceph_osd_request *req; 1008 struct page **pages; 1009 u64 len; 1010 int num_pages; 1011 int written = 0; 1012 int flags; 1013 int check_caps = 0; 1014 int ret; 1015 struct timespec mtime = current_time(inode); 1016 size_t count = iov_iter_count(from); 1017 1018 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1019 return -EROFS; 1020 1021 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); 1022 1023 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1024 if (ret < 0) 1025 return ret; 1026 1027 ret = invalidate_inode_pages2_range(inode->i_mapping, 1028 pos >> PAGE_SHIFT, 1029 (pos + count) >> PAGE_SHIFT); 1030 if (ret < 0) 1031 dout("invalidate_inode_pages2_range returned %d\n", ret); 1032 1033 flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_WRITE; 1034 1035 while ((len = iov_iter_count(from)) > 0) { 1036 size_t left; 1037 int n; 1038 1039 vino = ceph_vino(inode); 1040 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1041 vino, pos, &len, 0, 1, 1042 CEPH_OSD_OP_WRITE, flags, snapc, 1043 ci->i_truncate_seq, 1044 ci->i_truncate_size, 1045 false); 1046 if (IS_ERR(req)) { 1047 ret = PTR_ERR(req); 1048 break; 1049 } 1050 1051 /* 1052 * write from beginning of first page, 1053 * regardless of io alignment 1054 */ 1055 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1056 1057 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1058 if (IS_ERR(pages)) { 1059 ret = PTR_ERR(pages); 1060 goto out; 1061 } 1062 1063 left = len; 1064 for (n = 0; n < num_pages; n++) { 1065 size_t plen = min_t(size_t, left, PAGE_SIZE); 1066 ret = copy_page_from_iter(pages[n], 0, plen, from); 1067 if (ret != plen) { 1068 ret = -EFAULT; 1069 break; 1070 } 1071 left -= ret; 1072 } 1073 1074 if (ret < 0) { 1075 ceph_release_page_vector(pages, num_pages); 1076 goto out; 1077 } 1078 1079 req->r_inode = inode; 1080 1081 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1082 false, true); 1083 1084 req->r_mtime = mtime; 1085 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1086 if (!ret) 1087 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1088 1089 out: 1090 ceph_osdc_put_request(req); 1091 if (ret == 0) { 1092 pos += len; 1093 written += len; 1094 1095 if (pos > i_size_read(inode)) { 1096 check_caps = ceph_inode_set_size(inode, pos); 1097 if (check_caps) 1098 ceph_check_caps(ceph_inode(inode), 1099 CHECK_CAPS_AUTHONLY, 1100 NULL); 1101 } 1102 } else 1103 break; 1104 } 1105 1106 if (ret != -EOLDSNAPC && written > 0) { 1107 ret = written; 1108 iocb->ki_pos = pos; 1109 } 1110 return ret; 1111 } 1112 1113 /* 1114 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1115 * Atomically grab references, so that those bits are not released 1116 * back to the MDS mid-read. 1117 * 1118 * Hmm, the sync read case isn't actually async... should it be? 1119 */ 1120 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1121 { 1122 struct file *filp = iocb->ki_filp; 1123 struct ceph_file_info *fi = filp->private_data; 1124 size_t len = iov_iter_count(to); 1125 struct inode *inode = file_inode(filp); 1126 struct ceph_inode_info *ci = ceph_inode(inode); 1127 struct page *pinned_page = NULL; 1128 ssize_t ret; 1129 int want, got = 0; 1130 int retry_op = 0, read = 0; 1131 1132 again: 1133 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1134 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1135 1136 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1137 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1138 else 1139 want = CEPH_CAP_FILE_CACHE; 1140 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1141 if (ret < 0) 1142 return ret; 1143 1144 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1145 (iocb->ki_flags & IOCB_DIRECT) || 1146 (fi->flags & CEPH_F_SYNC)) { 1147 1148 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1149 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1150 ceph_cap_string(got)); 1151 1152 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1153 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1154 ret = ceph_direct_read_write(iocb, to, 1155 NULL, NULL); 1156 if (ret >= 0 && ret < len) 1157 retry_op = CHECK_EOF; 1158 } else { 1159 ret = ceph_sync_read(iocb, to, &retry_op); 1160 } 1161 } else { 1162 retry_op = READ_INLINE; 1163 } 1164 } else { 1165 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1166 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1167 ceph_cap_string(got)); 1168 current->journal_info = filp; 1169 ret = generic_file_read_iter(iocb, to); 1170 current->journal_info = NULL; 1171 } 1172 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1173 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1174 if (pinned_page) { 1175 put_page(pinned_page); 1176 pinned_page = NULL; 1177 } 1178 ceph_put_cap_refs(ci, got); 1179 if (retry_op > HAVE_RETRIED && ret >= 0) { 1180 int statret; 1181 struct page *page = NULL; 1182 loff_t i_size; 1183 if (retry_op == READ_INLINE) { 1184 page = __page_cache_alloc(GFP_KERNEL); 1185 if (!page) 1186 return -ENOMEM; 1187 } 1188 1189 statret = __ceph_do_getattr(inode, page, 1190 CEPH_STAT_CAP_INLINE_DATA, !!page); 1191 if (statret < 0) { 1192 if (page) 1193 __free_page(page); 1194 if (statret == -ENODATA) { 1195 BUG_ON(retry_op != READ_INLINE); 1196 goto again; 1197 } 1198 return statret; 1199 } 1200 1201 i_size = i_size_read(inode); 1202 if (retry_op == READ_INLINE) { 1203 BUG_ON(ret > 0 || read > 0); 1204 if (iocb->ki_pos < i_size && 1205 iocb->ki_pos < PAGE_SIZE) { 1206 loff_t end = min_t(loff_t, i_size, 1207 iocb->ki_pos + len); 1208 end = min_t(loff_t, end, PAGE_SIZE); 1209 if (statret < end) 1210 zero_user_segment(page, statret, end); 1211 ret = copy_page_to_iter(page, 1212 iocb->ki_pos & ~PAGE_MASK, 1213 end - iocb->ki_pos, to); 1214 iocb->ki_pos += ret; 1215 read += ret; 1216 } 1217 if (iocb->ki_pos < i_size && read < len) { 1218 size_t zlen = min_t(size_t, len - read, 1219 i_size - iocb->ki_pos); 1220 ret = iov_iter_zero(zlen, to); 1221 iocb->ki_pos += ret; 1222 read += ret; 1223 } 1224 __free_pages(page, 0); 1225 return read; 1226 } 1227 1228 /* hit EOF or hole? */ 1229 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1230 ret < len) { 1231 dout("sync_read hit hole, ppos %lld < size %lld" 1232 ", reading more\n", iocb->ki_pos, i_size); 1233 1234 read += ret; 1235 len -= ret; 1236 retry_op = HAVE_RETRIED; 1237 goto again; 1238 } 1239 } 1240 1241 if (ret >= 0) 1242 ret += read; 1243 1244 return ret; 1245 } 1246 1247 /* 1248 * Take cap references to avoid releasing caps to MDS mid-write. 1249 * 1250 * If we are synchronous, and write with an old snap context, the OSD 1251 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1252 * dropping our cap refs and allowing the pending snap to logically 1253 * complete _before_ this write occurs. 1254 * 1255 * If we are near ENOSPC, write synchronously. 1256 */ 1257 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1258 { 1259 struct file *file = iocb->ki_filp; 1260 struct ceph_file_info *fi = file->private_data; 1261 struct inode *inode = file_inode(file); 1262 struct ceph_inode_info *ci = ceph_inode(inode); 1263 struct ceph_osd_client *osdc = 1264 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1265 struct ceph_cap_flush *prealloc_cf; 1266 ssize_t count, written = 0; 1267 int err, want, got; 1268 loff_t pos; 1269 1270 if (ceph_snap(inode) != CEPH_NOSNAP) 1271 return -EROFS; 1272 1273 prealloc_cf = ceph_alloc_cap_flush(); 1274 if (!prealloc_cf) 1275 return -ENOMEM; 1276 1277 inode_lock(inode); 1278 1279 /* We can write back this queue in page reclaim */ 1280 current->backing_dev_info = inode_to_bdi(inode); 1281 1282 if (iocb->ki_flags & IOCB_APPEND) { 1283 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1284 if (err < 0) 1285 goto out; 1286 } 1287 1288 err = generic_write_checks(iocb, from); 1289 if (err <= 0) 1290 goto out; 1291 1292 pos = iocb->ki_pos; 1293 count = iov_iter_count(from); 1294 err = file_remove_privs(file); 1295 if (err) 1296 goto out; 1297 1298 err = file_update_time(file); 1299 if (err) 1300 goto out; 1301 1302 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1303 err = ceph_uninline_data(file, NULL); 1304 if (err < 0) 1305 goto out; 1306 } 1307 1308 retry_snap: 1309 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { 1310 err = -ENOSPC; 1311 goto out; 1312 } 1313 1314 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1315 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1316 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1317 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1318 else 1319 want = CEPH_CAP_FILE_BUFFER; 1320 got = 0; 1321 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1322 &got, NULL); 1323 if (err < 0) 1324 goto out; 1325 1326 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1327 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1328 1329 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1330 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { 1331 struct ceph_snap_context *snapc; 1332 struct iov_iter data; 1333 inode_unlock(inode); 1334 1335 spin_lock(&ci->i_ceph_lock); 1336 if (__ceph_have_pending_cap_snap(ci)) { 1337 struct ceph_cap_snap *capsnap = 1338 list_last_entry(&ci->i_cap_snaps, 1339 struct ceph_cap_snap, 1340 ci_item); 1341 snapc = ceph_get_snap_context(capsnap->context); 1342 } else { 1343 BUG_ON(!ci->i_head_snapc); 1344 snapc = ceph_get_snap_context(ci->i_head_snapc); 1345 } 1346 spin_unlock(&ci->i_ceph_lock); 1347 1348 /* we might need to revert back to that point */ 1349 data = *from; 1350 if (iocb->ki_flags & IOCB_DIRECT) 1351 written = ceph_direct_read_write(iocb, &data, snapc, 1352 &prealloc_cf); 1353 else 1354 written = ceph_sync_write(iocb, &data, pos, snapc); 1355 if (written == -EOLDSNAPC) { 1356 dout("aio_write %p %llx.%llx %llu~%u" 1357 "got EOLDSNAPC, retrying\n", 1358 inode, ceph_vinop(inode), 1359 pos, (unsigned)count); 1360 inode_lock(inode); 1361 goto retry_snap; 1362 } 1363 if (written > 0) 1364 iov_iter_advance(from, written); 1365 ceph_put_snap_context(snapc); 1366 } else { 1367 /* 1368 * No need to acquire the i_truncate_mutex. Because 1369 * the MDS revokes Fwb caps before sending truncate 1370 * message to us. We can't get Fwb cap while there 1371 * are pending vmtruncate. So write and vmtruncate 1372 * can not run at the same time 1373 */ 1374 written = generic_perform_write(file, from, pos); 1375 if (likely(written >= 0)) 1376 iocb->ki_pos = pos + written; 1377 inode_unlock(inode); 1378 } 1379 1380 if (written >= 0) { 1381 int dirty; 1382 spin_lock(&ci->i_ceph_lock); 1383 ci->i_inline_version = CEPH_INLINE_NONE; 1384 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1385 &prealloc_cf); 1386 spin_unlock(&ci->i_ceph_lock); 1387 if (dirty) 1388 __mark_inode_dirty(inode, dirty); 1389 } 1390 1391 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1392 inode, ceph_vinop(inode), pos, (unsigned)count, 1393 ceph_cap_string(got)); 1394 ceph_put_cap_refs(ci, got); 1395 1396 if (written >= 0) { 1397 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) 1398 iocb->ki_flags |= IOCB_DSYNC; 1399 1400 written = generic_write_sync(iocb, written); 1401 } 1402 1403 goto out_unlocked; 1404 1405 out: 1406 inode_unlock(inode); 1407 out_unlocked: 1408 ceph_free_cap_flush(prealloc_cf); 1409 current->backing_dev_info = NULL; 1410 return written ? written : err; 1411 } 1412 1413 /* 1414 * llseek. be sure to verify file size on SEEK_END. 1415 */ 1416 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1417 { 1418 struct inode *inode = file->f_mapping->host; 1419 loff_t i_size; 1420 loff_t ret; 1421 1422 inode_lock(inode); 1423 1424 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1425 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1426 if (ret < 0) 1427 goto out; 1428 } 1429 1430 i_size = i_size_read(inode); 1431 switch (whence) { 1432 case SEEK_END: 1433 offset += i_size; 1434 break; 1435 case SEEK_CUR: 1436 /* 1437 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1438 * position-querying operation. Avoid rewriting the "same" 1439 * f_pos value back to the file because a concurrent read(), 1440 * write() or lseek() might have altered it 1441 */ 1442 if (offset == 0) { 1443 ret = file->f_pos; 1444 goto out; 1445 } 1446 offset += file->f_pos; 1447 break; 1448 case SEEK_DATA: 1449 if (offset >= i_size) { 1450 ret = -ENXIO; 1451 goto out; 1452 } 1453 break; 1454 case SEEK_HOLE: 1455 if (offset >= i_size) { 1456 ret = -ENXIO; 1457 goto out; 1458 } 1459 offset = i_size; 1460 break; 1461 } 1462 1463 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1464 1465 out: 1466 inode_unlock(inode); 1467 return ret; 1468 } 1469 1470 static inline void ceph_zero_partial_page( 1471 struct inode *inode, loff_t offset, unsigned size) 1472 { 1473 struct page *page; 1474 pgoff_t index = offset >> PAGE_SHIFT; 1475 1476 page = find_lock_page(inode->i_mapping, index); 1477 if (page) { 1478 wait_on_page_writeback(page); 1479 zero_user(page, offset & (PAGE_SIZE - 1), size); 1480 unlock_page(page); 1481 put_page(page); 1482 } 1483 } 1484 1485 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1486 loff_t length) 1487 { 1488 loff_t nearly = round_up(offset, PAGE_SIZE); 1489 if (offset < nearly) { 1490 loff_t size = nearly - offset; 1491 if (length < size) 1492 size = length; 1493 ceph_zero_partial_page(inode, offset, size); 1494 offset += size; 1495 length -= size; 1496 } 1497 if (length >= PAGE_SIZE) { 1498 loff_t size = round_down(length, PAGE_SIZE); 1499 truncate_pagecache_range(inode, offset, offset + size - 1); 1500 offset += size; 1501 length -= size; 1502 } 1503 if (length) 1504 ceph_zero_partial_page(inode, offset, length); 1505 } 1506 1507 static int ceph_zero_partial_object(struct inode *inode, 1508 loff_t offset, loff_t *length) 1509 { 1510 struct ceph_inode_info *ci = ceph_inode(inode); 1511 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1512 struct ceph_osd_request *req; 1513 int ret = 0; 1514 loff_t zero = 0; 1515 int op; 1516 1517 if (!length) { 1518 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1519 length = &zero; 1520 } else { 1521 op = CEPH_OSD_OP_ZERO; 1522 } 1523 1524 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1525 ceph_vino(inode), 1526 offset, length, 1527 0, 1, op, 1528 CEPH_OSD_FLAG_WRITE, 1529 NULL, 0, 0, false); 1530 if (IS_ERR(req)) { 1531 ret = PTR_ERR(req); 1532 goto out; 1533 } 1534 1535 req->r_mtime = inode->i_mtime; 1536 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1537 if (!ret) { 1538 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1539 if (ret == -ENOENT) 1540 ret = 0; 1541 } 1542 ceph_osdc_put_request(req); 1543 1544 out: 1545 return ret; 1546 } 1547 1548 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1549 { 1550 int ret = 0; 1551 struct ceph_inode_info *ci = ceph_inode(inode); 1552 s32 stripe_unit = ci->i_layout.stripe_unit; 1553 s32 stripe_count = ci->i_layout.stripe_count; 1554 s32 object_size = ci->i_layout.object_size; 1555 u64 object_set_size = object_size * stripe_count; 1556 u64 nearly, t; 1557 1558 /* round offset up to next period boundary */ 1559 nearly = offset + object_set_size - 1; 1560 t = nearly; 1561 nearly -= do_div(t, object_set_size); 1562 1563 while (length && offset < nearly) { 1564 loff_t size = length; 1565 ret = ceph_zero_partial_object(inode, offset, &size); 1566 if (ret < 0) 1567 return ret; 1568 offset += size; 1569 length -= size; 1570 } 1571 while (length >= object_set_size) { 1572 int i; 1573 loff_t pos = offset; 1574 for (i = 0; i < stripe_count; ++i) { 1575 ret = ceph_zero_partial_object(inode, pos, NULL); 1576 if (ret < 0) 1577 return ret; 1578 pos += stripe_unit; 1579 } 1580 offset += object_set_size; 1581 length -= object_set_size; 1582 } 1583 while (length) { 1584 loff_t size = length; 1585 ret = ceph_zero_partial_object(inode, offset, &size); 1586 if (ret < 0) 1587 return ret; 1588 offset += size; 1589 length -= size; 1590 } 1591 return ret; 1592 } 1593 1594 static long ceph_fallocate(struct file *file, int mode, 1595 loff_t offset, loff_t length) 1596 { 1597 struct ceph_file_info *fi = file->private_data; 1598 struct inode *inode = file_inode(file); 1599 struct ceph_inode_info *ci = ceph_inode(inode); 1600 struct ceph_osd_client *osdc = 1601 &ceph_inode_to_client(inode)->client->osdc; 1602 struct ceph_cap_flush *prealloc_cf; 1603 int want, got = 0; 1604 int dirty; 1605 int ret = 0; 1606 loff_t endoff = 0; 1607 loff_t size; 1608 1609 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1610 return -EOPNOTSUPP; 1611 1612 if (!S_ISREG(inode->i_mode)) 1613 return -EOPNOTSUPP; 1614 1615 prealloc_cf = ceph_alloc_cap_flush(); 1616 if (!prealloc_cf) 1617 return -ENOMEM; 1618 1619 inode_lock(inode); 1620 1621 if (ceph_snap(inode) != CEPH_NOSNAP) { 1622 ret = -EROFS; 1623 goto unlock; 1624 } 1625 1626 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && 1627 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1628 ret = -ENOSPC; 1629 goto unlock; 1630 } 1631 1632 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1633 ret = ceph_uninline_data(file, NULL); 1634 if (ret < 0) 1635 goto unlock; 1636 } 1637 1638 size = i_size_read(inode); 1639 if (!(mode & FALLOC_FL_KEEP_SIZE)) 1640 endoff = offset + length; 1641 1642 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1643 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1644 else 1645 want = CEPH_CAP_FILE_BUFFER; 1646 1647 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1648 if (ret < 0) 1649 goto unlock; 1650 1651 if (mode & FALLOC_FL_PUNCH_HOLE) { 1652 if (offset < size) 1653 ceph_zero_pagecache_range(inode, offset, length); 1654 ret = ceph_zero_objects(inode, offset, length); 1655 } else if (endoff > size) { 1656 truncate_pagecache_range(inode, size, -1); 1657 if (ceph_inode_set_size(inode, endoff)) 1658 ceph_check_caps(ceph_inode(inode), 1659 CHECK_CAPS_AUTHONLY, NULL); 1660 } 1661 1662 if (!ret) { 1663 spin_lock(&ci->i_ceph_lock); 1664 ci->i_inline_version = CEPH_INLINE_NONE; 1665 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1666 &prealloc_cf); 1667 spin_unlock(&ci->i_ceph_lock); 1668 if (dirty) 1669 __mark_inode_dirty(inode, dirty); 1670 } 1671 1672 ceph_put_cap_refs(ci, got); 1673 unlock: 1674 inode_unlock(inode); 1675 ceph_free_cap_flush(prealloc_cf); 1676 return ret; 1677 } 1678 1679 const struct file_operations ceph_file_fops = { 1680 .open = ceph_open, 1681 .release = ceph_release, 1682 .llseek = ceph_llseek, 1683 .read_iter = ceph_read_iter, 1684 .write_iter = ceph_write_iter, 1685 .mmap = ceph_mmap, 1686 .fsync = ceph_fsync, 1687 .lock = ceph_lock, 1688 .flock = ceph_flock, 1689 .splice_read = generic_file_splice_read, 1690 .splice_write = iter_file_splice_write, 1691 .unlocked_ioctl = ceph_ioctl, 1692 .compat_ioctl = ceph_ioctl, 1693 .fallocate = ceph_fallocate, 1694 }; 1695 1696