1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/sched.h> 6 #include <linux/slab.h> 7 #include <linux/file.h> 8 #include <linux/mount.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/falloc.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include "cache.h" 16 17 static __le32 ceph_flags_sys2wire(u32 flags) 18 { 19 u32 wire_flags = 0; 20 21 switch (flags & O_ACCMODE) { 22 case O_RDONLY: 23 wire_flags |= CEPH_O_RDONLY; 24 break; 25 case O_WRONLY: 26 wire_flags |= CEPH_O_WRONLY; 27 break; 28 case O_RDWR: 29 wire_flags |= CEPH_O_RDWR; 30 break; 31 } 32 33 flags &= ~O_ACCMODE; 34 35 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; } 36 37 ceph_sys2wire(O_CREAT); 38 ceph_sys2wire(O_EXCL); 39 ceph_sys2wire(O_TRUNC); 40 ceph_sys2wire(O_DIRECTORY); 41 ceph_sys2wire(O_NOFOLLOW); 42 43 #undef ceph_sys2wire 44 45 if (flags) 46 dout("unused open flags: %x\n", flags); 47 48 return cpu_to_le32(wire_flags); 49 } 50 51 /* 52 * Ceph file operations 53 * 54 * Implement basic open/close functionality, and implement 55 * read/write. 56 * 57 * We implement three modes of file I/O: 58 * - buffered uses the generic_file_aio_{read,write} helpers 59 * 60 * - synchronous is used when there is multi-client read/write 61 * sharing, avoids the page cache, and synchronously waits for an 62 * ack from the OSD. 63 * 64 * - direct io takes the variant of the sync path that references 65 * user pages directly. 66 * 67 * fsync() flushes and waits on dirty pages, but just queues metadata 68 * for writeback: since the MDS can recover size and mtime there is no 69 * need to wait for MDS acknowledgement. 70 */ 71 72 /* 73 * How many pages to get in one call to iov_iter_get_pages(). This 74 * determines the size of the on-stack array used as a buffer. 75 */ 76 #define ITER_GET_BVECS_PAGES 64 77 78 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize, 79 struct bio_vec *bvecs) 80 { 81 size_t size = 0; 82 int bvec_idx = 0; 83 84 if (maxsize > iov_iter_count(iter)) 85 maxsize = iov_iter_count(iter); 86 87 while (size < maxsize) { 88 struct page *pages[ITER_GET_BVECS_PAGES]; 89 ssize_t bytes; 90 size_t start; 91 int idx = 0; 92 93 bytes = iov_iter_get_pages(iter, pages, maxsize - size, 94 ITER_GET_BVECS_PAGES, &start); 95 if (bytes < 0) 96 return size ?: bytes; 97 98 iov_iter_advance(iter, bytes); 99 size += bytes; 100 101 for ( ; bytes; idx++, bvec_idx++) { 102 struct bio_vec bv = { 103 .bv_page = pages[idx], 104 .bv_len = min_t(int, bytes, PAGE_SIZE - start), 105 .bv_offset = start, 106 }; 107 108 bvecs[bvec_idx] = bv; 109 bytes -= bv.bv_len; 110 start = 0; 111 } 112 } 113 114 return size; 115 } 116 117 /* 118 * iov_iter_get_pages() only considers one iov_iter segment, no matter 119 * what maxsize or maxpages are given. For ITER_BVEC that is a single 120 * page. 121 * 122 * Attempt to get up to @maxsize bytes worth of pages from @iter. 123 * Return the number of bytes in the created bio_vec array, or an error. 124 */ 125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize, 126 struct bio_vec **bvecs, int *num_bvecs) 127 { 128 struct bio_vec *bv; 129 size_t orig_count = iov_iter_count(iter); 130 ssize_t bytes; 131 int npages; 132 133 iov_iter_truncate(iter, maxsize); 134 npages = iov_iter_npages(iter, INT_MAX); 135 iov_iter_reexpand(iter, orig_count); 136 137 /* 138 * __iter_get_bvecs() may populate only part of the array -- zero it 139 * out. 140 */ 141 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO); 142 if (!bv) 143 return -ENOMEM; 144 145 bytes = __iter_get_bvecs(iter, maxsize, bv); 146 if (bytes < 0) { 147 /* 148 * No pages were pinned -- just free the array. 149 */ 150 kvfree(bv); 151 return bytes; 152 } 153 154 *bvecs = bv; 155 *num_bvecs = npages; 156 return bytes; 157 } 158 159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty) 160 { 161 int i; 162 163 for (i = 0; i < num_bvecs; i++) { 164 if (bvecs[i].bv_page) { 165 if (should_dirty) 166 set_page_dirty_lock(bvecs[i].bv_page); 167 put_page(bvecs[i].bv_page); 168 } 169 } 170 kvfree(bvecs); 171 } 172 173 /* 174 * Prepare an open request. Preallocate ceph_cap to avoid an 175 * inopportune ENOMEM later. 176 */ 177 static struct ceph_mds_request * 178 prepare_open_request(struct super_block *sb, int flags, int create_mode) 179 { 180 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 181 struct ceph_mds_client *mdsc = fsc->mdsc; 182 struct ceph_mds_request *req; 183 int want_auth = USE_ANY_MDS; 184 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 185 186 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 187 want_auth = USE_AUTH_MDS; 188 189 req = ceph_mdsc_create_request(mdsc, op, want_auth); 190 if (IS_ERR(req)) 191 goto out; 192 req->r_fmode = ceph_flags_to_mode(flags); 193 req->r_args.open.flags = ceph_flags_sys2wire(flags); 194 req->r_args.open.mode = cpu_to_le32(create_mode); 195 out: 196 return req; 197 } 198 199 static int ceph_init_file_info(struct inode *inode, struct file *file, 200 int fmode, bool isdir) 201 { 202 struct ceph_file_info *fi; 203 204 dout("%s %p %p 0%o (%s)\n", __func__, inode, file, 205 inode->i_mode, isdir ? "dir" : "regular"); 206 BUG_ON(inode->i_fop->release != ceph_release); 207 208 if (isdir) { 209 struct ceph_dir_file_info *dfi = 210 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL); 211 if (!dfi) { 212 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 213 return -ENOMEM; 214 } 215 216 file->private_data = dfi; 217 fi = &dfi->file_info; 218 dfi->next_offset = 2; 219 dfi->readdir_cache_idx = -1; 220 } else { 221 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 222 if (!fi) { 223 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 224 return -ENOMEM; 225 } 226 227 file->private_data = fi; 228 } 229 230 fi->fmode = fmode; 231 spin_lock_init(&fi->rw_contexts_lock); 232 INIT_LIST_HEAD(&fi->rw_contexts); 233 234 return 0; 235 } 236 237 /* 238 * initialize private struct file data. 239 * if we fail, clean up by dropping fmode reference on the ceph_inode 240 */ 241 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 242 { 243 int ret = 0; 244 245 switch (inode->i_mode & S_IFMT) { 246 case S_IFREG: 247 ceph_fscache_register_inode_cookie(inode); 248 ceph_fscache_file_set_cookie(inode, file); 249 case S_IFDIR: 250 ret = ceph_init_file_info(inode, file, fmode, 251 S_ISDIR(inode->i_mode)); 252 if (ret) 253 return ret; 254 break; 255 256 case S_IFLNK: 257 dout("init_file %p %p 0%o (symlink)\n", inode, file, 258 inode->i_mode); 259 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 260 break; 261 262 default: 263 dout("init_file %p %p 0%o (special)\n", inode, file, 264 inode->i_mode); 265 /* 266 * we need to drop the open ref now, since we don't 267 * have .release set to ceph_release. 268 */ 269 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 270 BUG_ON(inode->i_fop->release == ceph_release); 271 272 /* call the proper open fop */ 273 ret = inode->i_fop->open(inode, file); 274 } 275 return ret; 276 } 277 278 /* 279 * try renew caps after session gets killed. 280 */ 281 int ceph_renew_caps(struct inode *inode) 282 { 283 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 284 struct ceph_inode_info *ci = ceph_inode(inode); 285 struct ceph_mds_request *req; 286 int err, flags, wanted; 287 288 spin_lock(&ci->i_ceph_lock); 289 wanted = __ceph_caps_file_wanted(ci); 290 if (__ceph_is_any_real_caps(ci) && 291 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { 292 int issued = __ceph_caps_issued(ci, NULL); 293 spin_unlock(&ci->i_ceph_lock); 294 dout("renew caps %p want %s issued %s updating mds_wanted\n", 295 inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 296 ceph_check_caps(ci, 0, NULL); 297 return 0; 298 } 299 spin_unlock(&ci->i_ceph_lock); 300 301 flags = 0; 302 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) 303 flags = O_RDWR; 304 else if (wanted & CEPH_CAP_FILE_RD) 305 flags = O_RDONLY; 306 else if (wanted & CEPH_CAP_FILE_WR) 307 flags = O_WRONLY; 308 #ifdef O_LAZY 309 if (wanted & CEPH_CAP_FILE_LAZYIO) 310 flags |= O_LAZY; 311 #endif 312 313 req = prepare_open_request(inode->i_sb, flags, 0); 314 if (IS_ERR(req)) { 315 err = PTR_ERR(req); 316 goto out; 317 } 318 319 req->r_inode = inode; 320 ihold(inode); 321 req->r_num_caps = 1; 322 req->r_fmode = -1; 323 324 err = ceph_mdsc_do_request(mdsc, NULL, req); 325 ceph_mdsc_put_request(req); 326 out: 327 dout("renew caps %p open result=%d\n", inode, err); 328 return err < 0 ? err : 0; 329 } 330 331 /* 332 * If we already have the requisite capabilities, we can satisfy 333 * the open request locally (no need to request new caps from the 334 * MDS). We do, however, need to inform the MDS (asynchronously) 335 * if our wanted caps set expands. 336 */ 337 int ceph_open(struct inode *inode, struct file *file) 338 { 339 struct ceph_inode_info *ci = ceph_inode(inode); 340 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 341 struct ceph_mds_client *mdsc = fsc->mdsc; 342 struct ceph_mds_request *req; 343 struct ceph_file_info *fi = file->private_data; 344 int err; 345 int flags, fmode, wanted; 346 347 if (fi) { 348 dout("open file %p is already opened\n", file); 349 return 0; 350 } 351 352 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 353 flags = file->f_flags & ~(O_CREAT|O_EXCL); 354 if (S_ISDIR(inode->i_mode)) 355 flags = O_DIRECTORY; /* mds likes to know */ 356 357 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 358 ceph_vinop(inode), file, flags, file->f_flags); 359 fmode = ceph_flags_to_mode(flags); 360 wanted = ceph_caps_for_mode(fmode); 361 362 /* snapped files are read-only */ 363 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 364 return -EROFS; 365 366 /* trivially open snapdir */ 367 if (ceph_snap(inode) == CEPH_SNAPDIR) { 368 spin_lock(&ci->i_ceph_lock); 369 __ceph_get_fmode(ci, fmode); 370 spin_unlock(&ci->i_ceph_lock); 371 return ceph_init_file(inode, file, fmode); 372 } 373 374 /* 375 * No need to block if we have caps on the auth MDS (for 376 * write) or any MDS (for read). Update wanted set 377 * asynchronously. 378 */ 379 spin_lock(&ci->i_ceph_lock); 380 if (__ceph_is_any_real_caps(ci) && 381 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 382 int mds_wanted = __ceph_caps_mds_wanted(ci, true); 383 int issued = __ceph_caps_issued(ci, NULL); 384 385 dout("open %p fmode %d want %s issued %s using existing\n", 386 inode, fmode, ceph_cap_string(wanted), 387 ceph_cap_string(issued)); 388 __ceph_get_fmode(ci, fmode); 389 spin_unlock(&ci->i_ceph_lock); 390 391 /* adjust wanted? */ 392 if ((issued & wanted) != wanted && 393 (mds_wanted & wanted) != wanted && 394 ceph_snap(inode) != CEPH_SNAPDIR) 395 ceph_check_caps(ci, 0, NULL); 396 397 return ceph_init_file(inode, file, fmode); 398 } else if (ceph_snap(inode) != CEPH_NOSNAP && 399 (ci->i_snap_caps & wanted) == wanted) { 400 __ceph_get_fmode(ci, fmode); 401 spin_unlock(&ci->i_ceph_lock); 402 return ceph_init_file(inode, file, fmode); 403 } 404 405 spin_unlock(&ci->i_ceph_lock); 406 407 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 408 req = prepare_open_request(inode->i_sb, flags, 0); 409 if (IS_ERR(req)) { 410 err = PTR_ERR(req); 411 goto out; 412 } 413 req->r_inode = inode; 414 ihold(inode); 415 416 req->r_num_caps = 1; 417 err = ceph_mdsc_do_request(mdsc, NULL, req); 418 if (!err) 419 err = ceph_init_file(inode, file, req->r_fmode); 420 ceph_mdsc_put_request(req); 421 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 422 out: 423 return err; 424 } 425 426 427 /* 428 * Do a lookup + open with a single request. If we get a non-existent 429 * file or symlink, return 1 so the VFS can retry. 430 */ 431 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 432 struct file *file, unsigned flags, umode_t mode, 433 int *opened) 434 { 435 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 436 struct ceph_mds_client *mdsc = fsc->mdsc; 437 struct ceph_mds_request *req; 438 struct dentry *dn; 439 struct ceph_acls_info acls = {}; 440 int mask; 441 int err; 442 443 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 444 dir, dentry, dentry, 445 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 446 447 if (dentry->d_name.len > NAME_MAX) 448 return -ENAMETOOLONG; 449 450 if (flags & O_CREAT) { 451 if (ceph_quota_is_max_files_exceeded(dir)) 452 return -EDQUOT; 453 err = ceph_pre_init_acls(dir, &mode, &acls); 454 if (err < 0) 455 return err; 456 } 457 458 /* do the open */ 459 req = prepare_open_request(dir->i_sb, flags, mode); 460 if (IS_ERR(req)) { 461 err = PTR_ERR(req); 462 goto out_acl; 463 } 464 req->r_dentry = dget(dentry); 465 req->r_num_caps = 2; 466 if (flags & O_CREAT) { 467 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 468 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 469 if (acls.pagelist) { 470 req->r_pagelist = acls.pagelist; 471 acls.pagelist = NULL; 472 } 473 } 474 475 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 476 if (ceph_security_xattr_wanted(dir)) 477 mask |= CEPH_CAP_XATTR_SHARED; 478 req->r_args.open.mask = cpu_to_le32(mask); 479 480 req->r_parent = dir; 481 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 482 err = ceph_mdsc_do_request(mdsc, 483 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 484 req); 485 err = ceph_handle_snapdir(req, dentry, err); 486 if (err) 487 goto out_req; 488 489 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 490 err = ceph_handle_notrace_create(dir, dentry); 491 492 if (d_in_lookup(dentry)) { 493 dn = ceph_finish_lookup(req, dentry, err); 494 if (IS_ERR(dn)) 495 err = PTR_ERR(dn); 496 } else { 497 /* we were given a hashed negative dentry */ 498 dn = NULL; 499 } 500 if (err) 501 goto out_req; 502 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 503 /* make vfs retry on splice, ENOENT, or symlink */ 504 dout("atomic_open finish_no_open on dn %p\n", dn); 505 err = finish_no_open(file, dn); 506 } else { 507 dout("atomic_open finish_open on dn %p\n", dn); 508 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 509 ceph_init_inode_acls(d_inode(dentry), &acls); 510 *opened |= FILE_CREATED; 511 } 512 err = finish_open(file, dentry, ceph_open, opened); 513 } 514 out_req: 515 if (!req->r_err && req->r_target_inode) 516 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 517 ceph_mdsc_put_request(req); 518 out_acl: 519 ceph_release_acls_info(&acls); 520 dout("atomic_open result=%d\n", err); 521 return err; 522 } 523 524 int ceph_release(struct inode *inode, struct file *file) 525 { 526 struct ceph_inode_info *ci = ceph_inode(inode); 527 528 if (S_ISDIR(inode->i_mode)) { 529 struct ceph_dir_file_info *dfi = file->private_data; 530 dout("release inode %p dir file %p\n", inode, file); 531 WARN_ON(!list_empty(&dfi->file_info.rw_contexts)); 532 533 ceph_put_fmode(ci, dfi->file_info.fmode); 534 535 if (dfi->last_readdir) 536 ceph_mdsc_put_request(dfi->last_readdir); 537 kfree(dfi->last_name); 538 kfree(dfi->dir_info); 539 kmem_cache_free(ceph_dir_file_cachep, dfi); 540 } else { 541 struct ceph_file_info *fi = file->private_data; 542 dout("release inode %p regular file %p\n", inode, file); 543 WARN_ON(!list_empty(&fi->rw_contexts)); 544 545 ceph_put_fmode(ci, fi->fmode); 546 kmem_cache_free(ceph_file_cachep, fi); 547 } 548 549 /* wake up anyone waiting for caps on this inode */ 550 wake_up_all(&ci->i_cap_wq); 551 return 0; 552 } 553 554 enum { 555 HAVE_RETRIED = 1, 556 CHECK_EOF = 2, 557 READ_INLINE = 3, 558 }; 559 560 /* 561 * Read a range of bytes striped over one or more objects. Iterate over 562 * objects we stripe over. (That's not atomic, but good enough for now.) 563 * 564 * If we get a short result from the OSD, check against i_size; we need to 565 * only return a short read to the caller if we hit EOF. 566 */ 567 static int striped_read(struct inode *inode, 568 u64 pos, u64 len, 569 struct page **pages, int num_pages, 570 int page_align, int *checkeof) 571 { 572 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 573 struct ceph_inode_info *ci = ceph_inode(inode); 574 u64 this_len; 575 loff_t i_size; 576 int page_idx; 577 int ret, read = 0; 578 bool hit_stripe, was_short; 579 580 /* 581 * we may need to do multiple reads. not atomic, unfortunately. 582 */ 583 more: 584 this_len = len; 585 page_idx = (page_align + read) >> PAGE_SHIFT; 586 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 587 &ci->i_layout, pos, &this_len, 588 ci->i_truncate_seq, ci->i_truncate_size, 589 pages + page_idx, num_pages - page_idx, 590 ((page_align + read) & ~PAGE_MASK)); 591 if (ret == -ENOENT) 592 ret = 0; 593 hit_stripe = this_len < len; 594 was_short = ret >= 0 && ret < this_len; 595 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read, 596 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 597 598 i_size = i_size_read(inode); 599 if (ret >= 0) { 600 if (was_short && (pos + ret < i_size)) { 601 int zlen = min(this_len - ret, i_size - pos - ret); 602 int zoff = page_align + read + ret; 603 dout(" zero gap %llu to %llu\n", 604 pos + ret, pos + ret + zlen); 605 ceph_zero_page_vector_range(zoff, zlen, pages); 606 ret += zlen; 607 } 608 609 read += ret; 610 pos += ret; 611 len -= ret; 612 613 /* hit stripe and need continue*/ 614 if (len && hit_stripe && pos < i_size) 615 goto more; 616 } 617 618 if (read > 0) { 619 ret = read; 620 /* did we bounce off eof? */ 621 if (pos + len > i_size) 622 *checkeof = CHECK_EOF; 623 } 624 625 dout("striped_read returns %d\n", ret); 626 return ret; 627 } 628 629 /* 630 * Completely synchronous read and write methods. Direct from __user 631 * buffer to osd, or directly to user pages (if O_DIRECT). 632 * 633 * If the read spans object boundary, just do multiple reads. 634 */ 635 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, 636 int *checkeof) 637 { 638 struct file *file = iocb->ki_filp; 639 struct inode *inode = file_inode(file); 640 struct page **pages; 641 u64 off = iocb->ki_pos; 642 int num_pages; 643 ssize_t ret; 644 size_t len = iov_iter_count(to); 645 646 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len, 647 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 648 649 if (!len) 650 return 0; 651 /* 652 * flush any page cache pages in this range. this 653 * will make concurrent normal and sync io slow, 654 * but it will at least behave sensibly when they are 655 * in sequence. 656 */ 657 ret = filemap_write_and_wait_range(inode->i_mapping, off, 658 off + len); 659 if (ret < 0) 660 return ret; 661 662 if (unlikely(to->type & ITER_PIPE)) { 663 size_t page_off; 664 ret = iov_iter_get_pages_alloc(to, &pages, len, 665 &page_off); 666 if (ret <= 0) 667 return -ENOMEM; 668 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); 669 670 ret = striped_read(inode, off, ret, pages, num_pages, 671 page_off, checkeof); 672 if (ret > 0) { 673 iov_iter_advance(to, ret); 674 off += ret; 675 } else { 676 iov_iter_advance(to, 0); 677 } 678 ceph_put_page_vector(pages, num_pages, false); 679 } else { 680 num_pages = calc_pages_for(off, len); 681 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 682 if (IS_ERR(pages)) 683 return PTR_ERR(pages); 684 685 ret = striped_read(inode, off, len, pages, num_pages, 686 (off & ~PAGE_MASK), checkeof); 687 if (ret > 0) { 688 int l, k = 0; 689 size_t left = ret; 690 691 while (left) { 692 size_t page_off = off & ~PAGE_MASK; 693 size_t copy = min_t(size_t, left, 694 PAGE_SIZE - page_off); 695 l = copy_page_to_iter(pages[k++], page_off, 696 copy, to); 697 off += l; 698 left -= l; 699 if (l < copy) 700 break; 701 } 702 } 703 ceph_release_page_vector(pages, num_pages); 704 } 705 706 if (off > iocb->ki_pos) { 707 ret = off - iocb->ki_pos; 708 iocb->ki_pos = off; 709 } 710 711 dout("sync_read result %zd\n", ret); 712 return ret; 713 } 714 715 struct ceph_aio_request { 716 struct kiocb *iocb; 717 size_t total_len; 718 bool write; 719 bool should_dirty; 720 int error; 721 struct list_head osd_reqs; 722 unsigned num_reqs; 723 atomic_t pending_reqs; 724 struct timespec mtime; 725 struct ceph_cap_flush *prealloc_cf; 726 }; 727 728 struct ceph_aio_work { 729 struct work_struct work; 730 struct ceph_osd_request *req; 731 }; 732 733 static void ceph_aio_retry_work(struct work_struct *work); 734 735 static void ceph_aio_complete(struct inode *inode, 736 struct ceph_aio_request *aio_req) 737 { 738 struct ceph_inode_info *ci = ceph_inode(inode); 739 int ret; 740 741 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 742 return; 743 744 ret = aio_req->error; 745 if (!ret) 746 ret = aio_req->total_len; 747 748 dout("ceph_aio_complete %p rc %d\n", inode, ret); 749 750 if (ret >= 0 && aio_req->write) { 751 int dirty; 752 753 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 754 if (endoff > i_size_read(inode)) { 755 if (ceph_inode_set_size(inode, endoff)) 756 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 757 } 758 759 spin_lock(&ci->i_ceph_lock); 760 ci->i_inline_version = CEPH_INLINE_NONE; 761 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 762 &aio_req->prealloc_cf); 763 spin_unlock(&ci->i_ceph_lock); 764 if (dirty) 765 __mark_inode_dirty(inode, dirty); 766 767 } 768 769 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 770 CEPH_CAP_FILE_RD)); 771 772 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 773 774 ceph_free_cap_flush(aio_req->prealloc_cf); 775 kfree(aio_req); 776 } 777 778 static void ceph_aio_complete_req(struct ceph_osd_request *req) 779 { 780 int rc = req->r_result; 781 struct inode *inode = req->r_inode; 782 struct ceph_aio_request *aio_req = req->r_priv; 783 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 784 785 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS); 786 BUG_ON(!osd_data->num_bvecs); 787 788 dout("ceph_aio_complete_req %p rc %d bytes %u\n", 789 inode, rc, osd_data->bvec_pos.iter.bi_size); 790 791 if (rc == -EOLDSNAPC) { 792 struct ceph_aio_work *aio_work; 793 BUG_ON(!aio_req->write); 794 795 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 796 if (aio_work) { 797 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 798 aio_work->req = req; 799 queue_work(ceph_inode_to_client(inode)->wb_wq, 800 &aio_work->work); 801 return; 802 } 803 rc = -ENOMEM; 804 } else if (!aio_req->write) { 805 if (rc == -ENOENT) 806 rc = 0; 807 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) { 808 struct iov_iter i; 809 int zlen = osd_data->bvec_pos.iter.bi_size - rc; 810 811 /* 812 * If read is satisfied by single OSD request, 813 * it can pass EOF. Otherwise read is within 814 * i_size. 815 */ 816 if (aio_req->num_reqs == 1) { 817 loff_t i_size = i_size_read(inode); 818 loff_t endoff = aio_req->iocb->ki_pos + rc; 819 if (endoff < i_size) 820 zlen = min_t(size_t, zlen, 821 i_size - endoff); 822 aio_req->total_len = rc + zlen; 823 } 824 825 iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs, 826 osd_data->num_bvecs, 827 osd_data->bvec_pos.iter.bi_size); 828 iov_iter_advance(&i, rc); 829 iov_iter_zero(zlen, &i); 830 } 831 } 832 833 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, 834 aio_req->should_dirty); 835 ceph_osdc_put_request(req); 836 837 if (rc < 0) 838 cmpxchg(&aio_req->error, 0, rc); 839 840 ceph_aio_complete(inode, aio_req); 841 return; 842 } 843 844 static void ceph_aio_retry_work(struct work_struct *work) 845 { 846 struct ceph_aio_work *aio_work = 847 container_of(work, struct ceph_aio_work, work); 848 struct ceph_osd_request *orig_req = aio_work->req; 849 struct ceph_aio_request *aio_req = orig_req->r_priv; 850 struct inode *inode = orig_req->r_inode; 851 struct ceph_inode_info *ci = ceph_inode(inode); 852 struct ceph_snap_context *snapc; 853 struct ceph_osd_request *req; 854 int ret; 855 856 spin_lock(&ci->i_ceph_lock); 857 if (__ceph_have_pending_cap_snap(ci)) { 858 struct ceph_cap_snap *capsnap = 859 list_last_entry(&ci->i_cap_snaps, 860 struct ceph_cap_snap, 861 ci_item); 862 snapc = ceph_get_snap_context(capsnap->context); 863 } else { 864 BUG_ON(!ci->i_head_snapc); 865 snapc = ceph_get_snap_context(ci->i_head_snapc); 866 } 867 spin_unlock(&ci->i_ceph_lock); 868 869 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 870 false, GFP_NOFS); 871 if (!req) { 872 ret = -ENOMEM; 873 req = orig_req; 874 goto out; 875 } 876 877 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 878 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); 879 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); 880 881 ret = ceph_osdc_alloc_messages(req, GFP_NOFS); 882 if (ret) { 883 ceph_osdc_put_request(req); 884 req = orig_req; 885 goto out; 886 } 887 888 req->r_ops[0] = orig_req->r_ops[0]; 889 890 req->r_mtime = aio_req->mtime; 891 req->r_data_offset = req->r_ops[0].extent.offset; 892 893 ceph_osdc_put_request(orig_req); 894 895 req->r_callback = ceph_aio_complete_req; 896 req->r_inode = inode; 897 req->r_priv = aio_req; 898 899 ret = ceph_osdc_start_request(req->r_osdc, req, false); 900 out: 901 if (ret < 0) { 902 req->r_result = ret; 903 ceph_aio_complete_req(req); 904 } 905 906 ceph_put_snap_context(snapc); 907 kfree(aio_work); 908 } 909 910 static ssize_t 911 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 912 struct ceph_snap_context *snapc, 913 struct ceph_cap_flush **pcf) 914 { 915 struct file *file = iocb->ki_filp; 916 struct inode *inode = file_inode(file); 917 struct ceph_inode_info *ci = ceph_inode(inode); 918 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 919 struct ceph_vino vino; 920 struct ceph_osd_request *req; 921 struct bio_vec *bvecs; 922 struct ceph_aio_request *aio_req = NULL; 923 int num_pages = 0; 924 int flags; 925 int ret; 926 struct timespec mtime = timespec64_to_timespec(current_time(inode)); 927 size_t count = iov_iter_count(iter); 928 loff_t pos = iocb->ki_pos; 929 bool write = iov_iter_rw(iter) == WRITE; 930 bool should_dirty = !write && iter_is_iovec(iter); 931 932 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 933 return -EROFS; 934 935 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n", 936 (write ? "write" : "read"), file, pos, (unsigned)count, 937 snapc, snapc->seq); 938 939 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 940 if (ret < 0) 941 return ret; 942 943 if (write) { 944 int ret2 = invalidate_inode_pages2_range(inode->i_mapping, 945 pos >> PAGE_SHIFT, 946 (pos + count) >> PAGE_SHIFT); 947 if (ret2 < 0) 948 dout("invalidate_inode_pages2_range returned %d\n", ret2); 949 950 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 951 } else { 952 flags = CEPH_OSD_FLAG_READ; 953 } 954 955 while (iov_iter_count(iter) > 0) { 956 u64 size = iov_iter_count(iter); 957 ssize_t len; 958 959 if (write) 960 size = min_t(u64, size, fsc->mount_options->wsize); 961 else 962 size = min_t(u64, size, fsc->mount_options->rsize); 963 964 vino = ceph_vino(inode); 965 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 966 vino, pos, &size, 0, 967 1, 968 write ? CEPH_OSD_OP_WRITE : 969 CEPH_OSD_OP_READ, 970 flags, snapc, 971 ci->i_truncate_seq, 972 ci->i_truncate_size, 973 false); 974 if (IS_ERR(req)) { 975 ret = PTR_ERR(req); 976 break; 977 } 978 979 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages); 980 if (len < 0) { 981 ceph_osdc_put_request(req); 982 ret = len; 983 break; 984 } 985 if (len != size) 986 osd_req_op_extent_update(req, 0, len); 987 988 /* 989 * To simplify error handling, allow AIO when IO within i_size 990 * or IO can be satisfied by single OSD request. 991 */ 992 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 993 (len == count || pos + count <= i_size_read(inode))) { 994 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 995 if (aio_req) { 996 aio_req->iocb = iocb; 997 aio_req->write = write; 998 aio_req->should_dirty = should_dirty; 999 INIT_LIST_HEAD(&aio_req->osd_reqs); 1000 if (write) { 1001 aio_req->mtime = mtime; 1002 swap(aio_req->prealloc_cf, *pcf); 1003 } 1004 } 1005 /* ignore error */ 1006 } 1007 1008 if (write) { 1009 /* 1010 * throw out any page cache pages in this range. this 1011 * may block. 1012 */ 1013 truncate_inode_pages_range(inode->i_mapping, pos, 1014 (pos+len) | (PAGE_SIZE - 1)); 1015 1016 req->r_mtime = mtime; 1017 } 1018 1019 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len); 1020 1021 if (aio_req) { 1022 aio_req->total_len += len; 1023 aio_req->num_reqs++; 1024 atomic_inc(&aio_req->pending_reqs); 1025 1026 req->r_callback = ceph_aio_complete_req; 1027 req->r_inode = inode; 1028 req->r_priv = aio_req; 1029 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 1030 1031 pos += len; 1032 continue; 1033 } 1034 1035 ret = ceph_osdc_start_request(req->r_osdc, req, false); 1036 if (!ret) 1037 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1038 1039 size = i_size_read(inode); 1040 if (!write) { 1041 if (ret == -ENOENT) 1042 ret = 0; 1043 if (ret >= 0 && ret < len && pos + ret < size) { 1044 struct iov_iter i; 1045 int zlen = min_t(size_t, len - ret, 1046 size - pos - ret); 1047 1048 iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages, 1049 len); 1050 iov_iter_advance(&i, ret); 1051 iov_iter_zero(zlen, &i); 1052 ret += zlen; 1053 } 1054 if (ret >= 0) 1055 len = ret; 1056 } 1057 1058 put_bvecs(bvecs, num_pages, should_dirty); 1059 ceph_osdc_put_request(req); 1060 if (ret < 0) 1061 break; 1062 1063 pos += len; 1064 if (!write && pos >= size) 1065 break; 1066 1067 if (write && pos > size) { 1068 if (ceph_inode_set_size(inode, pos)) 1069 ceph_check_caps(ceph_inode(inode), 1070 CHECK_CAPS_AUTHONLY, 1071 NULL); 1072 } 1073 } 1074 1075 if (aio_req) { 1076 LIST_HEAD(osd_reqs); 1077 1078 if (aio_req->num_reqs == 0) { 1079 kfree(aio_req); 1080 return ret; 1081 } 1082 1083 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 1084 CEPH_CAP_FILE_RD); 1085 1086 list_splice(&aio_req->osd_reqs, &osd_reqs); 1087 while (!list_empty(&osd_reqs)) { 1088 req = list_first_entry(&osd_reqs, 1089 struct ceph_osd_request, 1090 r_unsafe_item); 1091 list_del_init(&req->r_unsafe_item); 1092 if (ret >= 0) 1093 ret = ceph_osdc_start_request(req->r_osdc, 1094 req, false); 1095 if (ret < 0) { 1096 req->r_result = ret; 1097 ceph_aio_complete_req(req); 1098 } 1099 } 1100 return -EIOCBQUEUED; 1101 } 1102 1103 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 1104 ret = pos - iocb->ki_pos; 1105 iocb->ki_pos = pos; 1106 } 1107 return ret; 1108 } 1109 1110 /* 1111 * Synchronous write, straight from __user pointer or user pages. 1112 * 1113 * If write spans object boundary, just do multiple writes. (For a 1114 * correct atomic write, we should e.g. take write locks on all 1115 * objects, rollback on failure, etc.) 1116 */ 1117 static ssize_t 1118 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 1119 struct ceph_snap_context *snapc) 1120 { 1121 struct file *file = iocb->ki_filp; 1122 struct inode *inode = file_inode(file); 1123 struct ceph_inode_info *ci = ceph_inode(inode); 1124 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1125 struct ceph_vino vino; 1126 struct ceph_osd_request *req; 1127 struct page **pages; 1128 u64 len; 1129 int num_pages; 1130 int written = 0; 1131 int flags; 1132 int ret; 1133 bool check_caps = false; 1134 struct timespec mtime = timespec64_to_timespec(current_time(inode)); 1135 size_t count = iov_iter_count(from); 1136 1137 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1138 return -EROFS; 1139 1140 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n", 1141 file, pos, (unsigned)count, snapc, snapc->seq); 1142 1143 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1144 if (ret < 0) 1145 return ret; 1146 1147 ret = invalidate_inode_pages2_range(inode->i_mapping, 1148 pos >> PAGE_SHIFT, 1149 (pos + count) >> PAGE_SHIFT); 1150 if (ret < 0) 1151 dout("invalidate_inode_pages2_range returned %d\n", ret); 1152 1153 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1154 1155 while ((len = iov_iter_count(from)) > 0) { 1156 size_t left; 1157 int n; 1158 1159 vino = ceph_vino(inode); 1160 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1161 vino, pos, &len, 0, 1, 1162 CEPH_OSD_OP_WRITE, flags, snapc, 1163 ci->i_truncate_seq, 1164 ci->i_truncate_size, 1165 false); 1166 if (IS_ERR(req)) { 1167 ret = PTR_ERR(req); 1168 break; 1169 } 1170 1171 /* 1172 * write from beginning of first page, 1173 * regardless of io alignment 1174 */ 1175 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1176 1177 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1178 if (IS_ERR(pages)) { 1179 ret = PTR_ERR(pages); 1180 goto out; 1181 } 1182 1183 left = len; 1184 for (n = 0; n < num_pages; n++) { 1185 size_t plen = min_t(size_t, left, PAGE_SIZE); 1186 ret = copy_page_from_iter(pages[n], 0, plen, from); 1187 if (ret != plen) { 1188 ret = -EFAULT; 1189 break; 1190 } 1191 left -= ret; 1192 } 1193 1194 if (ret < 0) { 1195 ceph_release_page_vector(pages, num_pages); 1196 goto out; 1197 } 1198 1199 req->r_inode = inode; 1200 1201 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1202 false, true); 1203 1204 req->r_mtime = mtime; 1205 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1206 if (!ret) 1207 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1208 1209 out: 1210 ceph_osdc_put_request(req); 1211 if (ret != 0) { 1212 ceph_set_error_write(ci); 1213 break; 1214 } 1215 1216 ceph_clear_error_write(ci); 1217 pos += len; 1218 written += len; 1219 if (pos > i_size_read(inode)) { 1220 check_caps = ceph_inode_set_size(inode, pos); 1221 if (check_caps) 1222 ceph_check_caps(ceph_inode(inode), 1223 CHECK_CAPS_AUTHONLY, 1224 NULL); 1225 } 1226 1227 } 1228 1229 if (ret != -EOLDSNAPC && written > 0) { 1230 ret = written; 1231 iocb->ki_pos = pos; 1232 } 1233 return ret; 1234 } 1235 1236 /* 1237 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1238 * Atomically grab references, so that those bits are not released 1239 * back to the MDS mid-read. 1240 * 1241 * Hmm, the sync read case isn't actually async... should it be? 1242 */ 1243 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1244 { 1245 struct file *filp = iocb->ki_filp; 1246 struct ceph_file_info *fi = filp->private_data; 1247 size_t len = iov_iter_count(to); 1248 struct inode *inode = file_inode(filp); 1249 struct ceph_inode_info *ci = ceph_inode(inode); 1250 struct page *pinned_page = NULL; 1251 ssize_t ret; 1252 int want, got = 0; 1253 int retry_op = 0, read = 0; 1254 1255 again: 1256 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1257 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1258 1259 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1260 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1261 else 1262 want = CEPH_CAP_FILE_CACHE; 1263 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1264 if (ret < 0) 1265 return ret; 1266 1267 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1268 (iocb->ki_flags & IOCB_DIRECT) || 1269 (fi->flags & CEPH_F_SYNC)) { 1270 1271 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1272 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1273 ceph_cap_string(got)); 1274 1275 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1276 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1277 ret = ceph_direct_read_write(iocb, to, 1278 NULL, NULL); 1279 if (ret >= 0 && ret < len) 1280 retry_op = CHECK_EOF; 1281 } else { 1282 ret = ceph_sync_read(iocb, to, &retry_op); 1283 } 1284 } else { 1285 retry_op = READ_INLINE; 1286 } 1287 } else { 1288 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1289 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1290 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1291 ceph_cap_string(got)); 1292 ceph_add_rw_context(fi, &rw_ctx); 1293 ret = generic_file_read_iter(iocb, to); 1294 ceph_del_rw_context(fi, &rw_ctx); 1295 } 1296 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1297 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1298 if (pinned_page) { 1299 put_page(pinned_page); 1300 pinned_page = NULL; 1301 } 1302 ceph_put_cap_refs(ci, got); 1303 if (retry_op > HAVE_RETRIED && ret >= 0) { 1304 int statret; 1305 struct page *page = NULL; 1306 loff_t i_size; 1307 if (retry_op == READ_INLINE) { 1308 page = __page_cache_alloc(GFP_KERNEL); 1309 if (!page) 1310 return -ENOMEM; 1311 } 1312 1313 statret = __ceph_do_getattr(inode, page, 1314 CEPH_STAT_CAP_INLINE_DATA, !!page); 1315 if (statret < 0) { 1316 if (page) 1317 __free_page(page); 1318 if (statret == -ENODATA) { 1319 BUG_ON(retry_op != READ_INLINE); 1320 goto again; 1321 } 1322 return statret; 1323 } 1324 1325 i_size = i_size_read(inode); 1326 if (retry_op == READ_INLINE) { 1327 BUG_ON(ret > 0 || read > 0); 1328 if (iocb->ki_pos < i_size && 1329 iocb->ki_pos < PAGE_SIZE) { 1330 loff_t end = min_t(loff_t, i_size, 1331 iocb->ki_pos + len); 1332 end = min_t(loff_t, end, PAGE_SIZE); 1333 if (statret < end) 1334 zero_user_segment(page, statret, end); 1335 ret = copy_page_to_iter(page, 1336 iocb->ki_pos & ~PAGE_MASK, 1337 end - iocb->ki_pos, to); 1338 iocb->ki_pos += ret; 1339 read += ret; 1340 } 1341 if (iocb->ki_pos < i_size && read < len) { 1342 size_t zlen = min_t(size_t, len - read, 1343 i_size - iocb->ki_pos); 1344 ret = iov_iter_zero(zlen, to); 1345 iocb->ki_pos += ret; 1346 read += ret; 1347 } 1348 __free_pages(page, 0); 1349 return read; 1350 } 1351 1352 /* hit EOF or hole? */ 1353 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1354 ret < len) { 1355 dout("sync_read hit hole, ppos %lld < size %lld" 1356 ", reading more\n", iocb->ki_pos, i_size); 1357 1358 read += ret; 1359 len -= ret; 1360 retry_op = HAVE_RETRIED; 1361 goto again; 1362 } 1363 } 1364 1365 if (ret >= 0) 1366 ret += read; 1367 1368 return ret; 1369 } 1370 1371 /* 1372 * Take cap references to avoid releasing caps to MDS mid-write. 1373 * 1374 * If we are synchronous, and write with an old snap context, the OSD 1375 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1376 * dropping our cap refs and allowing the pending snap to logically 1377 * complete _before_ this write occurs. 1378 * 1379 * If we are near ENOSPC, write synchronously. 1380 */ 1381 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1382 { 1383 struct file *file = iocb->ki_filp; 1384 struct ceph_file_info *fi = file->private_data; 1385 struct inode *inode = file_inode(file); 1386 struct ceph_inode_info *ci = ceph_inode(inode); 1387 struct ceph_osd_client *osdc = 1388 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1389 struct ceph_cap_flush *prealloc_cf; 1390 ssize_t count, written = 0; 1391 int err, want, got; 1392 loff_t pos; 1393 1394 if (ceph_snap(inode) != CEPH_NOSNAP) 1395 return -EROFS; 1396 1397 prealloc_cf = ceph_alloc_cap_flush(); 1398 if (!prealloc_cf) 1399 return -ENOMEM; 1400 1401 retry_snap: 1402 inode_lock(inode); 1403 1404 /* We can write back this queue in page reclaim */ 1405 current->backing_dev_info = inode_to_bdi(inode); 1406 1407 if (iocb->ki_flags & IOCB_APPEND) { 1408 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1409 if (err < 0) 1410 goto out; 1411 } 1412 1413 err = generic_write_checks(iocb, from); 1414 if (err <= 0) 1415 goto out; 1416 1417 pos = iocb->ki_pos; 1418 count = iov_iter_count(from); 1419 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) { 1420 err = -EDQUOT; 1421 goto out; 1422 } 1423 1424 err = file_remove_privs(file); 1425 if (err) 1426 goto out; 1427 1428 err = file_update_time(file); 1429 if (err) 1430 goto out; 1431 1432 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1433 err = ceph_uninline_data(file, NULL); 1434 if (err < 0) 1435 goto out; 1436 } 1437 1438 /* FIXME: not complete since it doesn't account for being at quota */ 1439 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { 1440 err = -ENOSPC; 1441 goto out; 1442 } 1443 1444 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1445 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1446 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1447 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1448 else 1449 want = CEPH_CAP_FILE_BUFFER; 1450 got = 0; 1451 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1452 &got, NULL); 1453 if (err < 0) 1454 goto out; 1455 1456 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1457 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1458 1459 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1460 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) || 1461 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) { 1462 struct ceph_snap_context *snapc; 1463 struct iov_iter data; 1464 inode_unlock(inode); 1465 1466 spin_lock(&ci->i_ceph_lock); 1467 if (__ceph_have_pending_cap_snap(ci)) { 1468 struct ceph_cap_snap *capsnap = 1469 list_last_entry(&ci->i_cap_snaps, 1470 struct ceph_cap_snap, 1471 ci_item); 1472 snapc = ceph_get_snap_context(capsnap->context); 1473 } else { 1474 BUG_ON(!ci->i_head_snapc); 1475 snapc = ceph_get_snap_context(ci->i_head_snapc); 1476 } 1477 spin_unlock(&ci->i_ceph_lock); 1478 1479 /* we might need to revert back to that point */ 1480 data = *from; 1481 if (iocb->ki_flags & IOCB_DIRECT) 1482 written = ceph_direct_read_write(iocb, &data, snapc, 1483 &prealloc_cf); 1484 else 1485 written = ceph_sync_write(iocb, &data, pos, snapc); 1486 if (written > 0) 1487 iov_iter_advance(from, written); 1488 ceph_put_snap_context(snapc); 1489 } else { 1490 /* 1491 * No need to acquire the i_truncate_mutex. Because 1492 * the MDS revokes Fwb caps before sending truncate 1493 * message to us. We can't get Fwb cap while there 1494 * are pending vmtruncate. So write and vmtruncate 1495 * can not run at the same time 1496 */ 1497 written = generic_perform_write(file, from, pos); 1498 if (likely(written >= 0)) 1499 iocb->ki_pos = pos + written; 1500 inode_unlock(inode); 1501 } 1502 1503 if (written >= 0) { 1504 int dirty; 1505 1506 spin_lock(&ci->i_ceph_lock); 1507 ci->i_inline_version = CEPH_INLINE_NONE; 1508 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1509 &prealloc_cf); 1510 spin_unlock(&ci->i_ceph_lock); 1511 if (dirty) 1512 __mark_inode_dirty(inode, dirty); 1513 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos)) 1514 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL); 1515 } 1516 1517 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1518 inode, ceph_vinop(inode), pos, (unsigned)count, 1519 ceph_cap_string(got)); 1520 ceph_put_cap_refs(ci, got); 1521 1522 if (written == -EOLDSNAPC) { 1523 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", 1524 inode, ceph_vinop(inode), pos, (unsigned)count); 1525 goto retry_snap; 1526 } 1527 1528 if (written >= 0) { 1529 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) 1530 iocb->ki_flags |= IOCB_DSYNC; 1531 written = generic_write_sync(iocb, written); 1532 } 1533 1534 goto out_unlocked; 1535 1536 out: 1537 inode_unlock(inode); 1538 out_unlocked: 1539 ceph_free_cap_flush(prealloc_cf); 1540 current->backing_dev_info = NULL; 1541 return written ? written : err; 1542 } 1543 1544 /* 1545 * llseek. be sure to verify file size on SEEK_END. 1546 */ 1547 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1548 { 1549 struct inode *inode = file->f_mapping->host; 1550 loff_t i_size; 1551 loff_t ret; 1552 1553 inode_lock(inode); 1554 1555 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1556 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1557 if (ret < 0) 1558 goto out; 1559 } 1560 1561 i_size = i_size_read(inode); 1562 switch (whence) { 1563 case SEEK_END: 1564 offset += i_size; 1565 break; 1566 case SEEK_CUR: 1567 /* 1568 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1569 * position-querying operation. Avoid rewriting the "same" 1570 * f_pos value back to the file because a concurrent read(), 1571 * write() or lseek() might have altered it 1572 */ 1573 if (offset == 0) { 1574 ret = file->f_pos; 1575 goto out; 1576 } 1577 offset += file->f_pos; 1578 break; 1579 case SEEK_DATA: 1580 if (offset < 0 || offset >= i_size) { 1581 ret = -ENXIO; 1582 goto out; 1583 } 1584 break; 1585 case SEEK_HOLE: 1586 if (offset < 0 || offset >= i_size) { 1587 ret = -ENXIO; 1588 goto out; 1589 } 1590 offset = i_size; 1591 break; 1592 } 1593 1594 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1595 1596 out: 1597 inode_unlock(inode); 1598 return ret; 1599 } 1600 1601 static inline void ceph_zero_partial_page( 1602 struct inode *inode, loff_t offset, unsigned size) 1603 { 1604 struct page *page; 1605 pgoff_t index = offset >> PAGE_SHIFT; 1606 1607 page = find_lock_page(inode->i_mapping, index); 1608 if (page) { 1609 wait_on_page_writeback(page); 1610 zero_user(page, offset & (PAGE_SIZE - 1), size); 1611 unlock_page(page); 1612 put_page(page); 1613 } 1614 } 1615 1616 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1617 loff_t length) 1618 { 1619 loff_t nearly = round_up(offset, PAGE_SIZE); 1620 if (offset < nearly) { 1621 loff_t size = nearly - offset; 1622 if (length < size) 1623 size = length; 1624 ceph_zero_partial_page(inode, offset, size); 1625 offset += size; 1626 length -= size; 1627 } 1628 if (length >= PAGE_SIZE) { 1629 loff_t size = round_down(length, PAGE_SIZE); 1630 truncate_pagecache_range(inode, offset, offset + size - 1); 1631 offset += size; 1632 length -= size; 1633 } 1634 if (length) 1635 ceph_zero_partial_page(inode, offset, length); 1636 } 1637 1638 static int ceph_zero_partial_object(struct inode *inode, 1639 loff_t offset, loff_t *length) 1640 { 1641 struct ceph_inode_info *ci = ceph_inode(inode); 1642 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1643 struct ceph_osd_request *req; 1644 int ret = 0; 1645 loff_t zero = 0; 1646 int op; 1647 1648 if (!length) { 1649 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1650 length = &zero; 1651 } else { 1652 op = CEPH_OSD_OP_ZERO; 1653 } 1654 1655 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1656 ceph_vino(inode), 1657 offset, length, 1658 0, 1, op, 1659 CEPH_OSD_FLAG_WRITE, 1660 NULL, 0, 0, false); 1661 if (IS_ERR(req)) { 1662 ret = PTR_ERR(req); 1663 goto out; 1664 } 1665 1666 req->r_mtime = timespec64_to_timespec(inode->i_mtime); 1667 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1668 if (!ret) { 1669 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1670 if (ret == -ENOENT) 1671 ret = 0; 1672 } 1673 ceph_osdc_put_request(req); 1674 1675 out: 1676 return ret; 1677 } 1678 1679 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1680 { 1681 int ret = 0; 1682 struct ceph_inode_info *ci = ceph_inode(inode); 1683 s32 stripe_unit = ci->i_layout.stripe_unit; 1684 s32 stripe_count = ci->i_layout.stripe_count; 1685 s32 object_size = ci->i_layout.object_size; 1686 u64 object_set_size = object_size * stripe_count; 1687 u64 nearly, t; 1688 1689 /* round offset up to next period boundary */ 1690 nearly = offset + object_set_size - 1; 1691 t = nearly; 1692 nearly -= do_div(t, object_set_size); 1693 1694 while (length && offset < nearly) { 1695 loff_t size = length; 1696 ret = ceph_zero_partial_object(inode, offset, &size); 1697 if (ret < 0) 1698 return ret; 1699 offset += size; 1700 length -= size; 1701 } 1702 while (length >= object_set_size) { 1703 int i; 1704 loff_t pos = offset; 1705 for (i = 0; i < stripe_count; ++i) { 1706 ret = ceph_zero_partial_object(inode, pos, NULL); 1707 if (ret < 0) 1708 return ret; 1709 pos += stripe_unit; 1710 } 1711 offset += object_set_size; 1712 length -= object_set_size; 1713 } 1714 while (length) { 1715 loff_t size = length; 1716 ret = ceph_zero_partial_object(inode, offset, &size); 1717 if (ret < 0) 1718 return ret; 1719 offset += size; 1720 length -= size; 1721 } 1722 return ret; 1723 } 1724 1725 static long ceph_fallocate(struct file *file, int mode, 1726 loff_t offset, loff_t length) 1727 { 1728 struct ceph_file_info *fi = file->private_data; 1729 struct inode *inode = file_inode(file); 1730 struct ceph_inode_info *ci = ceph_inode(inode); 1731 struct ceph_osd_client *osdc = 1732 &ceph_inode_to_client(inode)->client->osdc; 1733 struct ceph_cap_flush *prealloc_cf; 1734 int want, got = 0; 1735 int dirty; 1736 int ret = 0; 1737 loff_t endoff = 0; 1738 loff_t size; 1739 1740 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1741 return -EOPNOTSUPP; 1742 1743 if (!S_ISREG(inode->i_mode)) 1744 return -EOPNOTSUPP; 1745 1746 prealloc_cf = ceph_alloc_cap_flush(); 1747 if (!prealloc_cf) 1748 return -ENOMEM; 1749 1750 inode_lock(inode); 1751 1752 if (ceph_snap(inode) != CEPH_NOSNAP) { 1753 ret = -EROFS; 1754 goto unlock; 1755 } 1756 1757 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) && 1758 ceph_quota_is_max_bytes_exceeded(inode, offset + length)) { 1759 ret = -EDQUOT; 1760 goto unlock; 1761 } 1762 1763 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && 1764 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1765 ret = -ENOSPC; 1766 goto unlock; 1767 } 1768 1769 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1770 ret = ceph_uninline_data(file, NULL); 1771 if (ret < 0) 1772 goto unlock; 1773 } 1774 1775 size = i_size_read(inode); 1776 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 1777 endoff = offset + length; 1778 ret = inode_newsize_ok(inode, endoff); 1779 if (ret) 1780 goto unlock; 1781 } 1782 1783 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1784 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1785 else 1786 want = CEPH_CAP_FILE_BUFFER; 1787 1788 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1789 if (ret < 0) 1790 goto unlock; 1791 1792 if (mode & FALLOC_FL_PUNCH_HOLE) { 1793 if (offset < size) 1794 ceph_zero_pagecache_range(inode, offset, length); 1795 ret = ceph_zero_objects(inode, offset, length); 1796 } else if (endoff > size) { 1797 truncate_pagecache_range(inode, size, -1); 1798 if (ceph_inode_set_size(inode, endoff)) 1799 ceph_check_caps(ceph_inode(inode), 1800 CHECK_CAPS_AUTHONLY, NULL); 1801 } 1802 1803 if (!ret) { 1804 spin_lock(&ci->i_ceph_lock); 1805 ci->i_inline_version = CEPH_INLINE_NONE; 1806 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1807 &prealloc_cf); 1808 spin_unlock(&ci->i_ceph_lock); 1809 if (dirty) 1810 __mark_inode_dirty(inode, dirty); 1811 if ((endoff > size) && 1812 ceph_quota_is_max_bytes_approaching(inode, endoff)) 1813 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL); 1814 } 1815 1816 ceph_put_cap_refs(ci, got); 1817 unlock: 1818 inode_unlock(inode); 1819 ceph_free_cap_flush(prealloc_cf); 1820 return ret; 1821 } 1822 1823 const struct file_operations ceph_file_fops = { 1824 .open = ceph_open, 1825 .release = ceph_release, 1826 .llseek = ceph_llseek, 1827 .read_iter = ceph_read_iter, 1828 .write_iter = ceph_write_iter, 1829 .mmap = ceph_mmap, 1830 .fsync = ceph_fsync, 1831 .lock = ceph_lock, 1832 .flock = ceph_flock, 1833 .splice_read = generic_file_splice_read, 1834 .splice_write = iter_file_splice_write, 1835 .unlocked_ioctl = ceph_ioctl, 1836 .compat_ioctl = ceph_ioctl, 1837 .fallocate = ceph_fallocate, 1838 }; 1839 1840