1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/mount.h> 8 #include <linux/namei.h> 9 #include <linux/writeback.h> 10 #include <linux/falloc.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 16 /* 17 * Ceph file operations 18 * 19 * Implement basic open/close functionality, and implement 20 * read/write. 21 * 22 * We implement three modes of file I/O: 23 * - buffered uses the generic_file_aio_{read,write} helpers 24 * 25 * - synchronous is used when there is multi-client read/write 26 * sharing, avoids the page cache, and synchronously waits for an 27 * ack from the OSD. 28 * 29 * - direct io takes the variant of the sync path that references 30 * user pages directly. 31 * 32 * fsync() flushes and waits on dirty pages, but just queues metadata 33 * for writeback: since the MDS can recover size and mtime there is no 34 * need to wait for MDS acknowledgement. 35 */ 36 37 /* 38 * Calculate the length sum of direct io vectors that can 39 * be combined into one page vector. 40 */ 41 static size_t dio_get_pagev_size(const struct iov_iter *it) 42 { 43 const struct iovec *iov = it->iov; 44 const struct iovec *iovend = iov + it->nr_segs; 45 size_t size; 46 47 size = iov->iov_len - it->iov_offset; 48 /* 49 * An iov can be page vectored when both the current tail 50 * and the next base are page aligned. 51 */ 52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && 53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { 54 size += iov->iov_len; 55 } 56 dout("dio_get_pagevlen len = %zu\n", size); 57 return size; 58 } 59 60 /* 61 * Allocate a page vector based on (@it, @nbytes). 62 * The return value is the tuple describing a page vector, 63 * that is (@pages, @page_align, @num_pages). 64 */ 65 static struct page ** 66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, 67 size_t *page_align, int *num_pages) 68 { 69 struct iov_iter tmp_it = *it; 70 size_t align; 71 struct page **pages; 72 int ret = 0, idx, npages; 73 74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 75 (PAGE_SIZE - 1); 76 npages = calc_pages_for(align, nbytes); 77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); 78 if (!pages) { 79 pages = vmalloc(sizeof(*pages) * npages); 80 if (!pages) 81 return ERR_PTR(-ENOMEM); 82 } 83 84 for (idx = 0; idx < npages; ) { 85 size_t start; 86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, 87 npages - idx, &start); 88 if (ret < 0) 89 goto fail; 90 91 iov_iter_advance(&tmp_it, ret); 92 nbytes -= ret; 93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; 94 } 95 96 BUG_ON(nbytes != 0); 97 *num_pages = npages; 98 *page_align = align; 99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); 100 return pages; 101 fail: 102 ceph_put_page_vector(pages, idx, false); 103 return ERR_PTR(ret); 104 } 105 106 /* 107 * Prepare an open request. Preallocate ceph_cap to avoid an 108 * inopportune ENOMEM later. 109 */ 110 static struct ceph_mds_request * 111 prepare_open_request(struct super_block *sb, int flags, int create_mode) 112 { 113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 114 struct ceph_mds_client *mdsc = fsc->mdsc; 115 struct ceph_mds_request *req; 116 int want_auth = USE_ANY_MDS; 117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 118 119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 120 want_auth = USE_AUTH_MDS; 121 122 req = ceph_mdsc_create_request(mdsc, op, want_auth); 123 if (IS_ERR(req)) 124 goto out; 125 req->r_fmode = ceph_flags_to_mode(flags); 126 req->r_args.open.flags = cpu_to_le32(flags); 127 req->r_args.open.mode = cpu_to_le32(create_mode); 128 out: 129 return req; 130 } 131 132 /* 133 * initialize private struct file data. 134 * if we fail, clean up by dropping fmode reference on the ceph_inode 135 */ 136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 137 { 138 struct ceph_file_info *cf; 139 int ret = 0; 140 struct ceph_inode_info *ci = ceph_inode(inode); 141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 142 struct ceph_mds_client *mdsc = fsc->mdsc; 143 144 switch (inode->i_mode & S_IFMT) { 145 case S_IFREG: 146 /* First file open request creates the cookie, we want to keep 147 * this cookie around for the filetime of the inode as not to 148 * have to worry about fscache register / revoke / operation 149 * races. 150 * 151 * Also, if we know the operation is going to invalidate data 152 * (non readonly) just nuke the cache right away. 153 */ 154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci); 155 if ((fmode & CEPH_FILE_MODE_WR)) 156 ceph_fscache_invalidate(inode); 157 case S_IFDIR: 158 dout("init_file %p %p 0%o (regular)\n", inode, file, 159 inode->i_mode); 160 cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO); 161 if (cf == NULL) { 162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 163 return -ENOMEM; 164 } 165 cf->fmode = fmode; 166 cf->next_offset = 2; 167 cf->readdir_cache_idx = -1; 168 file->private_data = cf; 169 BUG_ON(inode->i_fop->release != ceph_release); 170 break; 171 172 case S_IFLNK: 173 dout("init_file %p %p 0%o (symlink)\n", inode, file, 174 inode->i_mode); 175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 176 break; 177 178 default: 179 dout("init_file %p %p 0%o (special)\n", inode, file, 180 inode->i_mode); 181 /* 182 * we need to drop the open ref now, since we don't 183 * have .release set to ceph_release. 184 */ 185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 186 BUG_ON(inode->i_fop->release == ceph_release); 187 188 /* call the proper open fop */ 189 ret = inode->i_fop->open(inode, file); 190 } 191 return ret; 192 } 193 194 /* 195 * If we already have the requisite capabilities, we can satisfy 196 * the open request locally (no need to request new caps from the 197 * MDS). We do, however, need to inform the MDS (asynchronously) 198 * if our wanted caps set expands. 199 */ 200 int ceph_open(struct inode *inode, struct file *file) 201 { 202 struct ceph_inode_info *ci = ceph_inode(inode); 203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 204 struct ceph_mds_client *mdsc = fsc->mdsc; 205 struct ceph_mds_request *req; 206 struct ceph_file_info *cf = file->private_data; 207 int err; 208 int flags, fmode, wanted; 209 210 if (cf) { 211 dout("open file %p is already opened\n", file); 212 return 0; 213 } 214 215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 216 flags = file->f_flags & ~(O_CREAT|O_EXCL); 217 if (S_ISDIR(inode->i_mode)) 218 flags = O_DIRECTORY; /* mds likes to know */ 219 220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 221 ceph_vinop(inode), file, flags, file->f_flags); 222 fmode = ceph_flags_to_mode(flags); 223 wanted = ceph_caps_for_mode(fmode); 224 225 /* snapped files are read-only */ 226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 227 return -EROFS; 228 229 /* trivially open snapdir */ 230 if (ceph_snap(inode) == CEPH_SNAPDIR) { 231 spin_lock(&ci->i_ceph_lock); 232 __ceph_get_fmode(ci, fmode); 233 spin_unlock(&ci->i_ceph_lock); 234 return ceph_init_file(inode, file, fmode); 235 } 236 237 /* 238 * No need to block if we have caps on the auth MDS (for 239 * write) or any MDS (for read). Update wanted set 240 * asynchronously. 241 */ 242 spin_lock(&ci->i_ceph_lock); 243 if (__ceph_is_any_real_caps(ci) && 244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 245 int mds_wanted = __ceph_caps_mds_wanted(ci); 246 int issued = __ceph_caps_issued(ci, NULL); 247 248 dout("open %p fmode %d want %s issued %s using existing\n", 249 inode, fmode, ceph_cap_string(wanted), 250 ceph_cap_string(issued)); 251 __ceph_get_fmode(ci, fmode); 252 spin_unlock(&ci->i_ceph_lock); 253 254 /* adjust wanted? */ 255 if ((issued & wanted) != wanted && 256 (mds_wanted & wanted) != wanted && 257 ceph_snap(inode) != CEPH_SNAPDIR) 258 ceph_check_caps(ci, 0, NULL); 259 260 return ceph_init_file(inode, file, fmode); 261 } else if (ceph_snap(inode) != CEPH_NOSNAP && 262 (ci->i_snap_caps & wanted) == wanted) { 263 __ceph_get_fmode(ci, fmode); 264 spin_unlock(&ci->i_ceph_lock); 265 return ceph_init_file(inode, file, fmode); 266 } 267 268 spin_unlock(&ci->i_ceph_lock); 269 270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 271 req = prepare_open_request(inode->i_sb, flags, 0); 272 if (IS_ERR(req)) { 273 err = PTR_ERR(req); 274 goto out; 275 } 276 req->r_inode = inode; 277 ihold(inode); 278 279 req->r_num_caps = 1; 280 err = ceph_mdsc_do_request(mdsc, NULL, req); 281 if (!err) 282 err = ceph_init_file(inode, file, req->r_fmode); 283 ceph_mdsc_put_request(req); 284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 285 out: 286 return err; 287 } 288 289 290 /* 291 * Do a lookup + open with a single request. If we get a non-existent 292 * file or symlink, return 1 so the VFS can retry. 293 */ 294 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 295 struct file *file, unsigned flags, umode_t mode, 296 int *opened) 297 { 298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 299 struct ceph_mds_client *mdsc = fsc->mdsc; 300 struct ceph_mds_request *req; 301 struct dentry *dn; 302 struct ceph_acls_info acls = {}; 303 int err; 304 305 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 306 dir, dentry, dentry, 307 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 308 309 if (dentry->d_name.len > NAME_MAX) 310 return -ENAMETOOLONG; 311 312 err = ceph_init_dentry(dentry); 313 if (err < 0) 314 return err; 315 316 if (flags & O_CREAT) { 317 err = ceph_pre_init_acls(dir, &mode, &acls); 318 if (err < 0) 319 return err; 320 } 321 322 /* do the open */ 323 req = prepare_open_request(dir->i_sb, flags, mode); 324 if (IS_ERR(req)) { 325 err = PTR_ERR(req); 326 goto out_acl; 327 } 328 req->r_dentry = dget(dentry); 329 req->r_num_caps = 2; 330 if (flags & O_CREAT) { 331 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 332 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 333 if (acls.pagelist) { 334 req->r_pagelist = acls.pagelist; 335 acls.pagelist = NULL; 336 } 337 } 338 req->r_locked_dir = dir; /* caller holds dir->i_mutex */ 339 err = ceph_mdsc_do_request(mdsc, 340 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 341 req); 342 err = ceph_handle_snapdir(req, dentry, err); 343 if (err) 344 goto out_req; 345 346 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 347 err = ceph_handle_notrace_create(dir, dentry); 348 349 if (d_unhashed(dentry)) { 350 dn = ceph_finish_lookup(req, dentry, err); 351 if (IS_ERR(dn)) 352 err = PTR_ERR(dn); 353 } else { 354 /* we were given a hashed negative dentry */ 355 dn = NULL; 356 } 357 if (err) 358 goto out_req; 359 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 360 /* make vfs retry on splice, ENOENT, or symlink */ 361 dout("atomic_open finish_no_open on dn %p\n", dn); 362 err = finish_no_open(file, dn); 363 } else { 364 dout("atomic_open finish_open on dn %p\n", dn); 365 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 366 ceph_init_inode_acls(d_inode(dentry), &acls); 367 *opened |= FILE_CREATED; 368 } 369 err = finish_open(file, dentry, ceph_open, opened); 370 } 371 out_req: 372 if (!req->r_err && req->r_target_inode) 373 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 374 ceph_mdsc_put_request(req); 375 out_acl: 376 ceph_release_acls_info(&acls); 377 dout("atomic_open result=%d\n", err); 378 return err; 379 } 380 381 int ceph_release(struct inode *inode, struct file *file) 382 { 383 struct ceph_inode_info *ci = ceph_inode(inode); 384 struct ceph_file_info *cf = file->private_data; 385 386 dout("release inode %p file %p\n", inode, file); 387 ceph_put_fmode(ci, cf->fmode); 388 if (cf->last_readdir) 389 ceph_mdsc_put_request(cf->last_readdir); 390 kfree(cf->last_name); 391 kfree(cf->dir_info); 392 kmem_cache_free(ceph_file_cachep, cf); 393 394 /* wake up anyone waiting for caps on this inode */ 395 wake_up_all(&ci->i_cap_wq); 396 return 0; 397 } 398 399 enum { 400 HAVE_RETRIED = 1, 401 CHECK_EOF = 2, 402 READ_INLINE = 3, 403 }; 404 405 /* 406 * Read a range of bytes striped over one or more objects. Iterate over 407 * objects we stripe over. (That's not atomic, but good enough for now.) 408 * 409 * If we get a short result from the OSD, check against i_size; we need to 410 * only return a short read to the caller if we hit EOF. 411 */ 412 static int striped_read(struct inode *inode, 413 u64 off, u64 len, 414 struct page **pages, int num_pages, 415 int *checkeof) 416 { 417 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 418 struct ceph_inode_info *ci = ceph_inode(inode); 419 u64 pos, this_len, left; 420 loff_t i_size; 421 int page_align, pages_left; 422 int read, ret; 423 struct page **page_pos; 424 bool hit_stripe, was_short; 425 426 /* 427 * we may need to do multiple reads. not atomic, unfortunately. 428 */ 429 pos = off; 430 left = len; 431 page_pos = pages; 432 pages_left = num_pages; 433 read = 0; 434 435 more: 436 page_align = pos & ~PAGE_MASK; 437 this_len = left; 438 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 439 &ci->i_layout, pos, &this_len, 440 ci->i_truncate_seq, 441 ci->i_truncate_size, 442 page_pos, pages_left, page_align); 443 if (ret == -ENOENT) 444 ret = 0; 445 hit_stripe = this_len < left; 446 was_short = ret >= 0 && ret < this_len; 447 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read, 448 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 449 450 i_size = i_size_read(inode); 451 if (ret >= 0) { 452 int didpages; 453 if (was_short && (pos + ret < i_size)) { 454 int zlen = min(this_len - ret, i_size - pos - ret); 455 int zoff = (off & ~PAGE_MASK) + read + ret; 456 dout(" zero gap %llu to %llu\n", 457 pos + ret, pos + ret + zlen); 458 ceph_zero_page_vector_range(zoff, zlen, pages); 459 ret += zlen; 460 } 461 462 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; 463 pos += ret; 464 read = pos - off; 465 left -= ret; 466 page_pos += didpages; 467 pages_left -= didpages; 468 469 /* hit stripe and need continue*/ 470 if (left && hit_stripe && pos < i_size) 471 goto more; 472 } 473 474 if (read > 0) { 475 ret = read; 476 /* did we bounce off eof? */ 477 if (pos + left > i_size) 478 *checkeof = CHECK_EOF; 479 } 480 481 dout("striped_read returns %d\n", ret); 482 return ret; 483 } 484 485 /* 486 * Completely synchronous read and write methods. Direct from __user 487 * buffer to osd, or directly to user pages (if O_DIRECT). 488 * 489 * If the read spans object boundary, just do multiple reads. 490 */ 491 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, 492 int *checkeof) 493 { 494 struct file *file = iocb->ki_filp; 495 struct inode *inode = file_inode(file); 496 struct page **pages; 497 u64 off = iocb->ki_pos; 498 int num_pages, ret; 499 size_t len = iov_iter_count(i); 500 501 dout("sync_read on file %p %llu~%u %s\n", file, off, 502 (unsigned)len, 503 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 504 505 if (!len) 506 return 0; 507 /* 508 * flush any page cache pages in this range. this 509 * will make concurrent normal and sync io slow, 510 * but it will at least behave sensibly when they are 511 * in sequence. 512 */ 513 ret = filemap_write_and_wait_range(inode->i_mapping, off, 514 off + len); 515 if (ret < 0) 516 return ret; 517 518 num_pages = calc_pages_for(off, len); 519 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 520 if (IS_ERR(pages)) 521 return PTR_ERR(pages); 522 ret = striped_read(inode, off, len, pages, 523 num_pages, checkeof); 524 if (ret > 0) { 525 int l, k = 0; 526 size_t left = ret; 527 528 while (left) { 529 size_t page_off = off & ~PAGE_MASK; 530 size_t copy = min_t(size_t, left, 531 PAGE_SIZE - page_off); 532 l = copy_page_to_iter(pages[k++], page_off, copy, i); 533 off += l; 534 left -= l; 535 if (l < copy) 536 break; 537 } 538 } 539 ceph_release_page_vector(pages, num_pages); 540 541 if (off > iocb->ki_pos) { 542 ret = off - iocb->ki_pos; 543 iocb->ki_pos = off; 544 } 545 546 dout("sync_read result %d\n", ret); 547 return ret; 548 } 549 550 struct ceph_aio_request { 551 struct kiocb *iocb; 552 size_t total_len; 553 int write; 554 int error; 555 struct list_head osd_reqs; 556 unsigned num_reqs; 557 atomic_t pending_reqs; 558 struct timespec mtime; 559 struct ceph_cap_flush *prealloc_cf; 560 }; 561 562 struct ceph_aio_work { 563 struct work_struct work; 564 struct ceph_osd_request *req; 565 }; 566 567 static void ceph_aio_retry_work(struct work_struct *work); 568 569 static void ceph_aio_complete(struct inode *inode, 570 struct ceph_aio_request *aio_req) 571 { 572 struct ceph_inode_info *ci = ceph_inode(inode); 573 int ret; 574 575 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 576 return; 577 578 ret = aio_req->error; 579 if (!ret) 580 ret = aio_req->total_len; 581 582 dout("ceph_aio_complete %p rc %d\n", inode, ret); 583 584 if (ret >= 0 && aio_req->write) { 585 int dirty; 586 587 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 588 if (endoff > i_size_read(inode)) { 589 if (ceph_inode_set_size(inode, endoff)) 590 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 591 } 592 593 spin_lock(&ci->i_ceph_lock); 594 ci->i_inline_version = CEPH_INLINE_NONE; 595 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 596 &aio_req->prealloc_cf); 597 spin_unlock(&ci->i_ceph_lock); 598 if (dirty) 599 __mark_inode_dirty(inode, dirty); 600 601 } 602 603 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 604 CEPH_CAP_FILE_RD)); 605 606 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 607 608 ceph_free_cap_flush(aio_req->prealloc_cf); 609 kfree(aio_req); 610 } 611 612 static void ceph_aio_complete_req(struct ceph_osd_request *req, 613 struct ceph_msg *msg) 614 { 615 int rc = req->r_result; 616 struct inode *inode = req->r_inode; 617 struct ceph_aio_request *aio_req = req->r_priv; 618 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 619 int num_pages = calc_pages_for((u64)osd_data->alignment, 620 osd_data->length); 621 622 dout("ceph_aio_complete_req %p rc %d bytes %llu\n", 623 inode, rc, osd_data->length); 624 625 if (rc == -EOLDSNAPC) { 626 struct ceph_aio_work *aio_work; 627 BUG_ON(!aio_req->write); 628 629 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 630 if (aio_work) { 631 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 632 aio_work->req = req; 633 queue_work(ceph_inode_to_client(inode)->wb_wq, 634 &aio_work->work); 635 return; 636 } 637 rc = -ENOMEM; 638 } else if (!aio_req->write) { 639 if (rc == -ENOENT) 640 rc = 0; 641 if (rc >= 0 && osd_data->length > rc) { 642 int zoff = osd_data->alignment + rc; 643 int zlen = osd_data->length - rc; 644 /* 645 * If read is satisfied by single OSD request, 646 * it can pass EOF. Otherwise read is within 647 * i_size. 648 */ 649 if (aio_req->num_reqs == 1) { 650 loff_t i_size = i_size_read(inode); 651 loff_t endoff = aio_req->iocb->ki_pos + rc; 652 if (endoff < i_size) 653 zlen = min_t(size_t, zlen, 654 i_size - endoff); 655 aio_req->total_len = rc + zlen; 656 } 657 658 if (zlen > 0) 659 ceph_zero_page_vector_range(zoff, zlen, 660 osd_data->pages); 661 } 662 } 663 664 ceph_put_page_vector(osd_data->pages, num_pages, false); 665 ceph_osdc_put_request(req); 666 667 if (rc < 0) 668 cmpxchg(&aio_req->error, 0, rc); 669 670 ceph_aio_complete(inode, aio_req); 671 return; 672 } 673 674 static void ceph_aio_retry_work(struct work_struct *work) 675 { 676 struct ceph_aio_work *aio_work = 677 container_of(work, struct ceph_aio_work, work); 678 struct ceph_osd_request *orig_req = aio_work->req; 679 struct ceph_aio_request *aio_req = orig_req->r_priv; 680 struct inode *inode = orig_req->r_inode; 681 struct ceph_inode_info *ci = ceph_inode(inode); 682 struct ceph_snap_context *snapc; 683 struct ceph_osd_request *req; 684 int ret; 685 686 spin_lock(&ci->i_ceph_lock); 687 if (__ceph_have_pending_cap_snap(ci)) { 688 struct ceph_cap_snap *capsnap = 689 list_last_entry(&ci->i_cap_snaps, 690 struct ceph_cap_snap, 691 ci_item); 692 snapc = ceph_get_snap_context(capsnap->context); 693 } else { 694 BUG_ON(!ci->i_head_snapc); 695 snapc = ceph_get_snap_context(ci->i_head_snapc); 696 } 697 spin_unlock(&ci->i_ceph_lock); 698 699 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 700 false, GFP_NOFS); 701 if (!req) { 702 ret = -ENOMEM; 703 req = orig_req; 704 goto out; 705 } 706 707 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP | 708 CEPH_OSD_FLAG_ONDISK | 709 CEPH_OSD_FLAG_WRITE; 710 req->r_base_oloc = orig_req->r_base_oloc; 711 req->r_base_oid = orig_req->r_base_oid; 712 713 req->r_ops[0] = orig_req->r_ops[0]; 714 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 715 716 ceph_osdc_build_request(req, req->r_ops[0].extent.offset, 717 snapc, CEPH_NOSNAP, &aio_req->mtime); 718 719 ceph_osdc_put_request(orig_req); 720 721 req->r_callback = ceph_aio_complete_req; 722 req->r_inode = inode; 723 req->r_priv = aio_req; 724 725 ret = ceph_osdc_start_request(req->r_osdc, req, false); 726 out: 727 if (ret < 0) { 728 BUG_ON(ret == -EOLDSNAPC); 729 req->r_result = ret; 730 ceph_aio_complete_req(req, NULL); 731 } 732 733 ceph_put_snap_context(snapc); 734 kfree(aio_work); 735 } 736 737 /* 738 * Write commit request unsafe callback, called to tell us when a 739 * request is unsafe (that is, in flight--has been handed to the 740 * messenger to send to its target osd). It is called again when 741 * we've received a response message indicating the request is 742 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request 743 * is completed early (and unsuccessfully) due to a timeout or 744 * interrupt. 745 * 746 * This is used if we requested both an ACK and ONDISK commit reply 747 * from the OSD. 748 */ 749 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) 750 { 751 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 752 753 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, 754 unsafe ? "un" : ""); 755 if (unsafe) { 756 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 757 spin_lock(&ci->i_unsafe_lock); 758 list_add_tail(&req->r_unsafe_item, 759 &ci->i_unsafe_writes); 760 spin_unlock(&ci->i_unsafe_lock); 761 } else { 762 spin_lock(&ci->i_unsafe_lock); 763 list_del_init(&req->r_unsafe_item); 764 spin_unlock(&ci->i_unsafe_lock); 765 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); 766 } 767 } 768 769 770 static ssize_t 771 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 772 struct ceph_snap_context *snapc, 773 struct ceph_cap_flush **pcf) 774 { 775 struct file *file = iocb->ki_filp; 776 struct inode *inode = file_inode(file); 777 struct ceph_inode_info *ci = ceph_inode(inode); 778 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 779 struct ceph_vino vino; 780 struct ceph_osd_request *req; 781 struct page **pages; 782 struct ceph_aio_request *aio_req = NULL; 783 int num_pages = 0; 784 int flags; 785 int ret; 786 struct timespec mtime = CURRENT_TIME; 787 size_t count = iov_iter_count(iter); 788 loff_t pos = iocb->ki_pos; 789 bool write = iov_iter_rw(iter) == WRITE; 790 791 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 792 return -EROFS; 793 794 dout("sync_direct_read_write (%s) on file %p %lld~%u\n", 795 (write ? "write" : "read"), file, pos, (unsigned)count); 796 797 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 798 if (ret < 0) 799 return ret; 800 801 if (write) { 802 ret = invalidate_inode_pages2_range(inode->i_mapping, 803 pos >> PAGE_CACHE_SHIFT, 804 (pos + count) >> PAGE_CACHE_SHIFT); 805 if (ret < 0) 806 dout("invalidate_inode_pages2_range returned %d\n", ret); 807 808 flags = CEPH_OSD_FLAG_ORDERSNAP | 809 CEPH_OSD_FLAG_ONDISK | 810 CEPH_OSD_FLAG_WRITE; 811 } else { 812 flags = CEPH_OSD_FLAG_READ; 813 } 814 815 while (iov_iter_count(iter) > 0) { 816 u64 size = dio_get_pagev_size(iter); 817 size_t start = 0; 818 ssize_t len; 819 820 vino = ceph_vino(inode); 821 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 822 vino, pos, &size, 0, 823 /*include a 'startsync' command*/ 824 write ? 2 : 1, 825 write ? CEPH_OSD_OP_WRITE : 826 CEPH_OSD_OP_READ, 827 flags, snapc, 828 ci->i_truncate_seq, 829 ci->i_truncate_size, 830 false); 831 if (IS_ERR(req)) { 832 ret = PTR_ERR(req); 833 break; 834 } 835 836 len = size; 837 pages = dio_get_pages_alloc(iter, len, &start, &num_pages); 838 if (IS_ERR(pages)) { 839 ceph_osdc_put_request(req); 840 ret = PTR_ERR(pages); 841 break; 842 } 843 844 /* 845 * To simplify error handling, allow AIO when IO within i_size 846 * or IO can be satisfied by single OSD request. 847 */ 848 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 849 (len == count || pos + count <= i_size_read(inode))) { 850 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 851 if (aio_req) { 852 aio_req->iocb = iocb; 853 aio_req->write = write; 854 INIT_LIST_HEAD(&aio_req->osd_reqs); 855 if (write) { 856 aio_req->mtime = mtime; 857 swap(aio_req->prealloc_cf, *pcf); 858 } 859 } 860 /* ignore error */ 861 } 862 863 if (write) { 864 /* 865 * throw out any page cache pages in this range. this 866 * may block. 867 */ 868 truncate_inode_pages_range(inode->i_mapping, pos, 869 (pos+len) | (PAGE_CACHE_SIZE - 1)); 870 871 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); 872 } 873 874 875 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, 876 false, false); 877 878 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 879 880 if (aio_req) { 881 aio_req->total_len += len; 882 aio_req->num_reqs++; 883 atomic_inc(&aio_req->pending_reqs); 884 885 req->r_callback = ceph_aio_complete_req; 886 req->r_inode = inode; 887 req->r_priv = aio_req; 888 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 889 890 pos += len; 891 iov_iter_advance(iter, len); 892 continue; 893 } 894 895 ret = ceph_osdc_start_request(req->r_osdc, req, false); 896 if (!ret) 897 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 898 899 size = i_size_read(inode); 900 if (!write) { 901 if (ret == -ENOENT) 902 ret = 0; 903 if (ret >= 0 && ret < len && pos + ret < size) { 904 int zlen = min_t(size_t, len - ret, 905 size - pos - ret); 906 ceph_zero_page_vector_range(start + ret, zlen, 907 pages); 908 ret += zlen; 909 } 910 if (ret >= 0) 911 len = ret; 912 } 913 914 ceph_put_page_vector(pages, num_pages, false); 915 916 ceph_osdc_put_request(req); 917 if (ret < 0) 918 break; 919 920 pos += len; 921 iov_iter_advance(iter, len); 922 923 if (!write && pos >= size) 924 break; 925 926 if (write && pos > size) { 927 if (ceph_inode_set_size(inode, pos)) 928 ceph_check_caps(ceph_inode(inode), 929 CHECK_CAPS_AUTHONLY, 930 NULL); 931 } 932 } 933 934 if (aio_req) { 935 if (aio_req->num_reqs == 0) { 936 kfree(aio_req); 937 return ret; 938 } 939 940 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 941 CEPH_CAP_FILE_RD); 942 943 while (!list_empty(&aio_req->osd_reqs)) { 944 req = list_first_entry(&aio_req->osd_reqs, 945 struct ceph_osd_request, 946 r_unsafe_item); 947 list_del_init(&req->r_unsafe_item); 948 if (ret >= 0) 949 ret = ceph_osdc_start_request(req->r_osdc, 950 req, false); 951 if (ret < 0) { 952 BUG_ON(ret == -EOLDSNAPC); 953 req->r_result = ret; 954 ceph_aio_complete_req(req, NULL); 955 } 956 } 957 return -EIOCBQUEUED; 958 } 959 960 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 961 ret = pos - iocb->ki_pos; 962 iocb->ki_pos = pos; 963 } 964 return ret; 965 } 966 967 /* 968 * Synchronous write, straight from __user pointer or user pages. 969 * 970 * If write spans object boundary, just do multiple writes. (For a 971 * correct atomic write, we should e.g. take write locks on all 972 * objects, rollback on failure, etc.) 973 */ 974 static ssize_t 975 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 976 struct ceph_snap_context *snapc) 977 { 978 struct file *file = iocb->ki_filp; 979 struct inode *inode = file_inode(file); 980 struct ceph_inode_info *ci = ceph_inode(inode); 981 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 982 struct ceph_vino vino; 983 struct ceph_osd_request *req; 984 struct page **pages; 985 u64 len; 986 int num_pages; 987 int written = 0; 988 int flags; 989 int check_caps = 0; 990 int ret; 991 struct timespec mtime = CURRENT_TIME; 992 size_t count = iov_iter_count(from); 993 994 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 995 return -EROFS; 996 997 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count); 998 999 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1000 if (ret < 0) 1001 return ret; 1002 1003 ret = invalidate_inode_pages2_range(inode->i_mapping, 1004 pos >> PAGE_CACHE_SHIFT, 1005 (pos + count) >> PAGE_CACHE_SHIFT); 1006 if (ret < 0) 1007 dout("invalidate_inode_pages2_range returned %d\n", ret); 1008 1009 flags = CEPH_OSD_FLAG_ORDERSNAP | 1010 CEPH_OSD_FLAG_ONDISK | 1011 CEPH_OSD_FLAG_WRITE | 1012 CEPH_OSD_FLAG_ACK; 1013 1014 while ((len = iov_iter_count(from)) > 0) { 1015 size_t left; 1016 int n; 1017 1018 vino = ceph_vino(inode); 1019 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1020 vino, pos, &len, 0, 1, 1021 CEPH_OSD_OP_WRITE, flags, snapc, 1022 ci->i_truncate_seq, 1023 ci->i_truncate_size, 1024 false); 1025 if (IS_ERR(req)) { 1026 ret = PTR_ERR(req); 1027 break; 1028 } 1029 1030 /* 1031 * write from beginning of first page, 1032 * regardless of io alignment 1033 */ 1034 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1035 1036 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1037 if (IS_ERR(pages)) { 1038 ret = PTR_ERR(pages); 1039 goto out; 1040 } 1041 1042 left = len; 1043 for (n = 0; n < num_pages; n++) { 1044 size_t plen = min_t(size_t, left, PAGE_SIZE); 1045 ret = copy_page_from_iter(pages[n], 0, plen, from); 1046 if (ret != plen) { 1047 ret = -EFAULT; 1048 break; 1049 } 1050 left -= ret; 1051 } 1052 1053 if (ret < 0) { 1054 ceph_release_page_vector(pages, num_pages); 1055 goto out; 1056 } 1057 1058 /* get a second commit callback */ 1059 req->r_unsafe_callback = ceph_sync_write_unsafe; 1060 req->r_inode = inode; 1061 1062 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1063 false, true); 1064 1065 /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 1066 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); 1067 1068 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1069 if (!ret) 1070 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1071 1072 out: 1073 ceph_osdc_put_request(req); 1074 if (ret == 0) { 1075 pos += len; 1076 written += len; 1077 1078 if (pos > i_size_read(inode)) { 1079 check_caps = ceph_inode_set_size(inode, pos); 1080 if (check_caps) 1081 ceph_check_caps(ceph_inode(inode), 1082 CHECK_CAPS_AUTHONLY, 1083 NULL); 1084 } 1085 } else 1086 break; 1087 } 1088 1089 if (ret != -EOLDSNAPC && written > 0) { 1090 ret = written; 1091 iocb->ki_pos = pos; 1092 } 1093 return ret; 1094 } 1095 1096 /* 1097 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1098 * Atomically grab references, so that those bits are not released 1099 * back to the MDS mid-read. 1100 * 1101 * Hmm, the sync read case isn't actually async... should it be? 1102 */ 1103 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1104 { 1105 struct file *filp = iocb->ki_filp; 1106 struct ceph_file_info *fi = filp->private_data; 1107 size_t len = iov_iter_count(to); 1108 struct inode *inode = file_inode(filp); 1109 struct ceph_inode_info *ci = ceph_inode(inode); 1110 struct page *pinned_page = NULL; 1111 ssize_t ret; 1112 int want, got = 0; 1113 int retry_op = 0, read = 0; 1114 1115 again: 1116 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1117 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1118 1119 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1120 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1121 else 1122 want = CEPH_CAP_FILE_CACHE; 1123 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1124 if (ret < 0) 1125 return ret; 1126 1127 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1128 (iocb->ki_flags & IOCB_DIRECT) || 1129 (fi->flags & CEPH_F_SYNC)) { 1130 1131 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1132 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1133 ceph_cap_string(got)); 1134 1135 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1136 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1137 ret = ceph_direct_read_write(iocb, to, 1138 NULL, NULL); 1139 if (ret >= 0 && ret < len) 1140 retry_op = CHECK_EOF; 1141 } else { 1142 ret = ceph_sync_read(iocb, to, &retry_op); 1143 } 1144 } else { 1145 retry_op = READ_INLINE; 1146 } 1147 } else { 1148 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1149 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1150 ceph_cap_string(got)); 1151 1152 ret = generic_file_read_iter(iocb, to); 1153 } 1154 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1155 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1156 if (pinned_page) { 1157 page_cache_release(pinned_page); 1158 pinned_page = NULL; 1159 } 1160 ceph_put_cap_refs(ci, got); 1161 if (retry_op > HAVE_RETRIED && ret >= 0) { 1162 int statret; 1163 struct page *page = NULL; 1164 loff_t i_size; 1165 if (retry_op == READ_INLINE) { 1166 page = __page_cache_alloc(GFP_KERNEL); 1167 if (!page) 1168 return -ENOMEM; 1169 } 1170 1171 statret = __ceph_do_getattr(inode, page, 1172 CEPH_STAT_CAP_INLINE_DATA, !!page); 1173 if (statret < 0) { 1174 __free_page(page); 1175 if (statret == -ENODATA) { 1176 BUG_ON(retry_op != READ_INLINE); 1177 goto again; 1178 } 1179 return statret; 1180 } 1181 1182 i_size = i_size_read(inode); 1183 if (retry_op == READ_INLINE) { 1184 BUG_ON(ret > 0 || read > 0); 1185 if (iocb->ki_pos < i_size && 1186 iocb->ki_pos < PAGE_CACHE_SIZE) { 1187 loff_t end = min_t(loff_t, i_size, 1188 iocb->ki_pos + len); 1189 end = min_t(loff_t, end, PAGE_CACHE_SIZE); 1190 if (statret < end) 1191 zero_user_segment(page, statret, end); 1192 ret = copy_page_to_iter(page, 1193 iocb->ki_pos & ~PAGE_MASK, 1194 end - iocb->ki_pos, to); 1195 iocb->ki_pos += ret; 1196 read += ret; 1197 } 1198 if (iocb->ki_pos < i_size && read < len) { 1199 size_t zlen = min_t(size_t, len - read, 1200 i_size - iocb->ki_pos); 1201 ret = iov_iter_zero(zlen, to); 1202 iocb->ki_pos += ret; 1203 read += ret; 1204 } 1205 __free_pages(page, 0); 1206 return read; 1207 } 1208 1209 /* hit EOF or hole? */ 1210 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1211 ret < len) { 1212 dout("sync_read hit hole, ppos %lld < size %lld" 1213 ", reading more\n", iocb->ki_pos, i_size); 1214 1215 read += ret; 1216 len -= ret; 1217 retry_op = HAVE_RETRIED; 1218 goto again; 1219 } 1220 } 1221 1222 if (ret >= 0) 1223 ret += read; 1224 1225 return ret; 1226 } 1227 1228 /* 1229 * Take cap references to avoid releasing caps to MDS mid-write. 1230 * 1231 * If we are synchronous, and write with an old snap context, the OSD 1232 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1233 * dropping our cap refs and allowing the pending snap to logically 1234 * complete _before_ this write occurs. 1235 * 1236 * If we are near ENOSPC, write synchronously. 1237 */ 1238 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1239 { 1240 struct file *file = iocb->ki_filp; 1241 struct ceph_file_info *fi = file->private_data; 1242 struct inode *inode = file_inode(file); 1243 struct ceph_inode_info *ci = ceph_inode(inode); 1244 struct ceph_osd_client *osdc = 1245 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1246 struct ceph_cap_flush *prealloc_cf; 1247 ssize_t count, written = 0; 1248 int err, want, got; 1249 loff_t pos; 1250 1251 if (ceph_snap(inode) != CEPH_NOSNAP) 1252 return -EROFS; 1253 1254 prealloc_cf = ceph_alloc_cap_flush(); 1255 if (!prealloc_cf) 1256 return -ENOMEM; 1257 1258 inode_lock(inode); 1259 1260 /* We can write back this queue in page reclaim */ 1261 current->backing_dev_info = inode_to_bdi(inode); 1262 1263 if (iocb->ki_flags & IOCB_APPEND) { 1264 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1265 if (err < 0) 1266 goto out; 1267 } 1268 1269 err = generic_write_checks(iocb, from); 1270 if (err <= 0) 1271 goto out; 1272 1273 pos = iocb->ki_pos; 1274 count = iov_iter_count(from); 1275 err = file_remove_privs(file); 1276 if (err) 1277 goto out; 1278 1279 err = file_update_time(file); 1280 if (err) 1281 goto out; 1282 1283 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1284 err = ceph_uninline_data(file, NULL); 1285 if (err < 0) 1286 goto out; 1287 } 1288 1289 retry_snap: 1290 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 1291 err = -ENOSPC; 1292 goto out; 1293 } 1294 1295 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1296 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1297 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1298 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1299 else 1300 want = CEPH_CAP_FILE_BUFFER; 1301 got = 0; 1302 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1303 &got, NULL); 1304 if (err < 0) 1305 goto out; 1306 1307 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1308 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1309 1310 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1311 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { 1312 struct ceph_snap_context *snapc; 1313 struct iov_iter data; 1314 inode_unlock(inode); 1315 1316 spin_lock(&ci->i_ceph_lock); 1317 if (__ceph_have_pending_cap_snap(ci)) { 1318 struct ceph_cap_snap *capsnap = 1319 list_last_entry(&ci->i_cap_snaps, 1320 struct ceph_cap_snap, 1321 ci_item); 1322 snapc = ceph_get_snap_context(capsnap->context); 1323 } else { 1324 BUG_ON(!ci->i_head_snapc); 1325 snapc = ceph_get_snap_context(ci->i_head_snapc); 1326 } 1327 spin_unlock(&ci->i_ceph_lock); 1328 1329 /* we might need to revert back to that point */ 1330 data = *from; 1331 if (iocb->ki_flags & IOCB_DIRECT) 1332 written = ceph_direct_read_write(iocb, &data, snapc, 1333 &prealloc_cf); 1334 else 1335 written = ceph_sync_write(iocb, &data, pos, snapc); 1336 if (written == -EOLDSNAPC) { 1337 dout("aio_write %p %llx.%llx %llu~%u" 1338 "got EOLDSNAPC, retrying\n", 1339 inode, ceph_vinop(inode), 1340 pos, (unsigned)count); 1341 inode_lock(inode); 1342 goto retry_snap; 1343 } 1344 if (written > 0) 1345 iov_iter_advance(from, written); 1346 ceph_put_snap_context(snapc); 1347 } else { 1348 loff_t old_size = i_size_read(inode); 1349 /* 1350 * No need to acquire the i_truncate_mutex. Because 1351 * the MDS revokes Fwb caps before sending truncate 1352 * message to us. We can't get Fwb cap while there 1353 * are pending vmtruncate. So write and vmtruncate 1354 * can not run at the same time 1355 */ 1356 written = generic_perform_write(file, from, pos); 1357 if (likely(written >= 0)) 1358 iocb->ki_pos = pos + written; 1359 if (i_size_read(inode) > old_size) 1360 ceph_fscache_update_objectsize(inode); 1361 inode_unlock(inode); 1362 } 1363 1364 if (written >= 0) { 1365 int dirty; 1366 spin_lock(&ci->i_ceph_lock); 1367 ci->i_inline_version = CEPH_INLINE_NONE; 1368 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1369 &prealloc_cf); 1370 spin_unlock(&ci->i_ceph_lock); 1371 if (dirty) 1372 __mark_inode_dirty(inode, dirty); 1373 } 1374 1375 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1376 inode, ceph_vinop(inode), pos, (unsigned)count, 1377 ceph_cap_string(got)); 1378 ceph_put_cap_refs(ci, got); 1379 1380 if (written >= 0 && 1381 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || 1382 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { 1383 err = vfs_fsync_range(file, pos, pos + written - 1, 1); 1384 if (err < 0) 1385 written = err; 1386 } 1387 1388 goto out_unlocked; 1389 1390 out: 1391 inode_unlock(inode); 1392 out_unlocked: 1393 ceph_free_cap_flush(prealloc_cf); 1394 current->backing_dev_info = NULL; 1395 return written ? written : err; 1396 } 1397 1398 /* 1399 * llseek. be sure to verify file size on SEEK_END. 1400 */ 1401 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1402 { 1403 struct inode *inode = file->f_mapping->host; 1404 loff_t i_size; 1405 int ret; 1406 1407 inode_lock(inode); 1408 1409 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1410 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1411 if (ret < 0) { 1412 offset = ret; 1413 goto out; 1414 } 1415 } 1416 1417 i_size = i_size_read(inode); 1418 switch (whence) { 1419 case SEEK_END: 1420 offset += i_size; 1421 break; 1422 case SEEK_CUR: 1423 /* 1424 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1425 * position-querying operation. Avoid rewriting the "same" 1426 * f_pos value back to the file because a concurrent read(), 1427 * write() or lseek() might have altered it 1428 */ 1429 if (offset == 0) { 1430 offset = file->f_pos; 1431 goto out; 1432 } 1433 offset += file->f_pos; 1434 break; 1435 case SEEK_DATA: 1436 if (offset >= i_size) { 1437 ret = -ENXIO; 1438 goto out; 1439 } 1440 break; 1441 case SEEK_HOLE: 1442 if (offset >= i_size) { 1443 ret = -ENXIO; 1444 goto out; 1445 } 1446 offset = i_size; 1447 break; 1448 } 1449 1450 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1451 1452 out: 1453 inode_unlock(inode); 1454 return offset; 1455 } 1456 1457 static inline void ceph_zero_partial_page( 1458 struct inode *inode, loff_t offset, unsigned size) 1459 { 1460 struct page *page; 1461 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 1462 1463 page = find_lock_page(inode->i_mapping, index); 1464 if (page) { 1465 wait_on_page_writeback(page); 1466 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); 1467 unlock_page(page); 1468 page_cache_release(page); 1469 } 1470 } 1471 1472 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1473 loff_t length) 1474 { 1475 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); 1476 if (offset < nearly) { 1477 loff_t size = nearly - offset; 1478 if (length < size) 1479 size = length; 1480 ceph_zero_partial_page(inode, offset, size); 1481 offset += size; 1482 length -= size; 1483 } 1484 if (length >= PAGE_CACHE_SIZE) { 1485 loff_t size = round_down(length, PAGE_CACHE_SIZE); 1486 truncate_pagecache_range(inode, offset, offset + size - 1); 1487 offset += size; 1488 length -= size; 1489 } 1490 if (length) 1491 ceph_zero_partial_page(inode, offset, length); 1492 } 1493 1494 static int ceph_zero_partial_object(struct inode *inode, 1495 loff_t offset, loff_t *length) 1496 { 1497 struct ceph_inode_info *ci = ceph_inode(inode); 1498 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1499 struct ceph_osd_request *req; 1500 int ret = 0; 1501 loff_t zero = 0; 1502 int op; 1503 1504 if (!length) { 1505 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1506 length = &zero; 1507 } else { 1508 op = CEPH_OSD_OP_ZERO; 1509 } 1510 1511 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1512 ceph_vino(inode), 1513 offset, length, 1514 0, 1, op, 1515 CEPH_OSD_FLAG_WRITE | 1516 CEPH_OSD_FLAG_ONDISK, 1517 NULL, 0, 0, false); 1518 if (IS_ERR(req)) { 1519 ret = PTR_ERR(req); 1520 goto out; 1521 } 1522 1523 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, 1524 &inode->i_mtime); 1525 1526 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1527 if (!ret) { 1528 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1529 if (ret == -ENOENT) 1530 ret = 0; 1531 } 1532 ceph_osdc_put_request(req); 1533 1534 out: 1535 return ret; 1536 } 1537 1538 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1539 { 1540 int ret = 0; 1541 struct ceph_inode_info *ci = ceph_inode(inode); 1542 s32 stripe_unit = ceph_file_layout_su(ci->i_layout); 1543 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); 1544 s32 object_size = ceph_file_layout_object_size(ci->i_layout); 1545 u64 object_set_size = object_size * stripe_count; 1546 u64 nearly, t; 1547 1548 /* round offset up to next period boundary */ 1549 nearly = offset + object_set_size - 1; 1550 t = nearly; 1551 nearly -= do_div(t, object_set_size); 1552 1553 while (length && offset < nearly) { 1554 loff_t size = length; 1555 ret = ceph_zero_partial_object(inode, offset, &size); 1556 if (ret < 0) 1557 return ret; 1558 offset += size; 1559 length -= size; 1560 } 1561 while (length >= object_set_size) { 1562 int i; 1563 loff_t pos = offset; 1564 for (i = 0; i < stripe_count; ++i) { 1565 ret = ceph_zero_partial_object(inode, pos, NULL); 1566 if (ret < 0) 1567 return ret; 1568 pos += stripe_unit; 1569 } 1570 offset += object_set_size; 1571 length -= object_set_size; 1572 } 1573 while (length) { 1574 loff_t size = length; 1575 ret = ceph_zero_partial_object(inode, offset, &size); 1576 if (ret < 0) 1577 return ret; 1578 offset += size; 1579 length -= size; 1580 } 1581 return ret; 1582 } 1583 1584 static long ceph_fallocate(struct file *file, int mode, 1585 loff_t offset, loff_t length) 1586 { 1587 struct ceph_file_info *fi = file->private_data; 1588 struct inode *inode = file_inode(file); 1589 struct ceph_inode_info *ci = ceph_inode(inode); 1590 struct ceph_osd_client *osdc = 1591 &ceph_inode_to_client(inode)->client->osdc; 1592 struct ceph_cap_flush *prealloc_cf; 1593 int want, got = 0; 1594 int dirty; 1595 int ret = 0; 1596 loff_t endoff = 0; 1597 loff_t size; 1598 1599 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1600 return -EOPNOTSUPP; 1601 1602 if (!S_ISREG(inode->i_mode)) 1603 return -EOPNOTSUPP; 1604 1605 prealloc_cf = ceph_alloc_cap_flush(); 1606 if (!prealloc_cf) 1607 return -ENOMEM; 1608 1609 inode_lock(inode); 1610 1611 if (ceph_snap(inode) != CEPH_NOSNAP) { 1612 ret = -EROFS; 1613 goto unlock; 1614 } 1615 1616 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && 1617 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1618 ret = -ENOSPC; 1619 goto unlock; 1620 } 1621 1622 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1623 ret = ceph_uninline_data(file, NULL); 1624 if (ret < 0) 1625 goto unlock; 1626 } 1627 1628 size = i_size_read(inode); 1629 if (!(mode & FALLOC_FL_KEEP_SIZE)) 1630 endoff = offset + length; 1631 1632 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1633 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1634 else 1635 want = CEPH_CAP_FILE_BUFFER; 1636 1637 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1638 if (ret < 0) 1639 goto unlock; 1640 1641 if (mode & FALLOC_FL_PUNCH_HOLE) { 1642 if (offset < size) 1643 ceph_zero_pagecache_range(inode, offset, length); 1644 ret = ceph_zero_objects(inode, offset, length); 1645 } else if (endoff > size) { 1646 truncate_pagecache_range(inode, size, -1); 1647 if (ceph_inode_set_size(inode, endoff)) 1648 ceph_check_caps(ceph_inode(inode), 1649 CHECK_CAPS_AUTHONLY, NULL); 1650 } 1651 1652 if (!ret) { 1653 spin_lock(&ci->i_ceph_lock); 1654 ci->i_inline_version = CEPH_INLINE_NONE; 1655 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1656 &prealloc_cf); 1657 spin_unlock(&ci->i_ceph_lock); 1658 if (dirty) 1659 __mark_inode_dirty(inode, dirty); 1660 } 1661 1662 ceph_put_cap_refs(ci, got); 1663 unlock: 1664 inode_unlock(inode); 1665 ceph_free_cap_flush(prealloc_cf); 1666 return ret; 1667 } 1668 1669 const struct file_operations ceph_file_fops = { 1670 .open = ceph_open, 1671 .release = ceph_release, 1672 .llseek = ceph_llseek, 1673 .read_iter = ceph_read_iter, 1674 .write_iter = ceph_write_iter, 1675 .mmap = ceph_mmap, 1676 .fsync = ceph_fsync, 1677 .lock = ceph_lock, 1678 .flock = ceph_flock, 1679 .splice_read = generic_file_splice_read, 1680 .splice_write = iter_file_splice_write, 1681 .unlocked_ioctl = ceph_ioctl, 1682 .compat_ioctl = ceph_ioctl, 1683 .fallocate = ceph_fallocate, 1684 }; 1685 1686