1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/sched.h> 6 #include <linux/slab.h> 7 #include <linux/file.h> 8 #include <linux/mount.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/falloc.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include "cache.h" 16 17 static __le32 ceph_flags_sys2wire(u32 flags) 18 { 19 u32 wire_flags = 0; 20 21 switch (flags & O_ACCMODE) { 22 case O_RDONLY: 23 wire_flags |= CEPH_O_RDONLY; 24 break; 25 case O_WRONLY: 26 wire_flags |= CEPH_O_WRONLY; 27 break; 28 case O_RDWR: 29 wire_flags |= CEPH_O_RDWR; 30 break; 31 } 32 33 flags &= ~O_ACCMODE; 34 35 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; } 36 37 ceph_sys2wire(O_CREAT); 38 ceph_sys2wire(O_EXCL); 39 ceph_sys2wire(O_TRUNC); 40 ceph_sys2wire(O_DIRECTORY); 41 ceph_sys2wire(O_NOFOLLOW); 42 43 #undef ceph_sys2wire 44 45 if (flags) 46 dout("unused open flags: %x\n", flags); 47 48 return cpu_to_le32(wire_flags); 49 } 50 51 /* 52 * Ceph file operations 53 * 54 * Implement basic open/close functionality, and implement 55 * read/write. 56 * 57 * We implement three modes of file I/O: 58 * - buffered uses the generic_file_aio_{read,write} helpers 59 * 60 * - synchronous is used when there is multi-client read/write 61 * sharing, avoids the page cache, and synchronously waits for an 62 * ack from the OSD. 63 * 64 * - direct io takes the variant of the sync path that references 65 * user pages directly. 66 * 67 * fsync() flushes and waits on dirty pages, but just queues metadata 68 * for writeback: since the MDS can recover size and mtime there is no 69 * need to wait for MDS acknowledgement. 70 */ 71 72 /* 73 * Calculate the length sum of direct io vectors that can 74 * be combined into one page vector. 75 */ 76 static size_t dio_get_pagev_size(const struct iov_iter *it) 77 { 78 const struct iovec *iov = it->iov; 79 const struct iovec *iovend = iov + it->nr_segs; 80 size_t size; 81 82 size = iov->iov_len - it->iov_offset; 83 /* 84 * An iov can be page vectored when both the current tail 85 * and the next base are page aligned. 86 */ 87 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && 88 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { 89 size += iov->iov_len; 90 } 91 dout("dio_get_pagevlen len = %zu\n", size); 92 return size; 93 } 94 95 /* 96 * Allocate a page vector based on (@it, @nbytes). 97 * The return value is the tuple describing a page vector, 98 * that is (@pages, @page_align, @num_pages). 99 */ 100 static struct page ** 101 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, 102 size_t *page_align, int *num_pages) 103 { 104 struct iov_iter tmp_it = *it; 105 size_t align; 106 struct page **pages; 107 int ret = 0, idx, npages; 108 109 align = (unsigned long)(it->iov->iov_base + it->iov_offset) & 110 (PAGE_SIZE - 1); 111 npages = calc_pages_for(align, nbytes); 112 pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); 113 if (!pages) 114 return ERR_PTR(-ENOMEM); 115 116 for (idx = 0; idx < npages; ) { 117 size_t start; 118 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, 119 npages - idx, &start); 120 if (ret < 0) 121 goto fail; 122 123 iov_iter_advance(&tmp_it, ret); 124 nbytes -= ret; 125 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; 126 } 127 128 BUG_ON(nbytes != 0); 129 *num_pages = npages; 130 *page_align = align; 131 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); 132 return pages; 133 fail: 134 ceph_put_page_vector(pages, idx, false); 135 return ERR_PTR(ret); 136 } 137 138 /* 139 * Prepare an open request. Preallocate ceph_cap to avoid an 140 * inopportune ENOMEM later. 141 */ 142 static struct ceph_mds_request * 143 prepare_open_request(struct super_block *sb, int flags, int create_mode) 144 { 145 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 146 struct ceph_mds_client *mdsc = fsc->mdsc; 147 struct ceph_mds_request *req; 148 int want_auth = USE_ANY_MDS; 149 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 150 151 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) 152 want_auth = USE_AUTH_MDS; 153 154 req = ceph_mdsc_create_request(mdsc, op, want_auth); 155 if (IS_ERR(req)) 156 goto out; 157 req->r_fmode = ceph_flags_to_mode(flags); 158 req->r_args.open.flags = ceph_flags_sys2wire(flags); 159 req->r_args.open.mode = cpu_to_le32(create_mode); 160 out: 161 return req; 162 } 163 164 static int ceph_init_file_info(struct inode *inode, struct file *file, 165 int fmode, bool isdir) 166 { 167 struct ceph_file_info *fi; 168 169 dout("%s %p %p 0%o (%s)\n", __func__, inode, file, 170 inode->i_mode, isdir ? "dir" : "regular"); 171 BUG_ON(inode->i_fop->release != ceph_release); 172 173 if (isdir) { 174 struct ceph_dir_file_info *dfi = 175 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL); 176 if (!dfi) { 177 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 178 return -ENOMEM; 179 } 180 181 file->private_data = dfi; 182 fi = &dfi->file_info; 183 dfi->next_offset = 2; 184 dfi->readdir_cache_idx = -1; 185 } else { 186 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); 187 if (!fi) { 188 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 189 return -ENOMEM; 190 } 191 192 file->private_data = fi; 193 } 194 195 fi->fmode = fmode; 196 spin_lock_init(&fi->rw_contexts_lock); 197 INIT_LIST_HEAD(&fi->rw_contexts); 198 199 return 0; 200 } 201 202 /* 203 * initialize private struct file data. 204 * if we fail, clean up by dropping fmode reference on the ceph_inode 205 */ 206 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 207 { 208 int ret = 0; 209 210 switch (inode->i_mode & S_IFMT) { 211 case S_IFREG: 212 ceph_fscache_register_inode_cookie(inode); 213 ceph_fscache_file_set_cookie(inode, file); 214 case S_IFDIR: 215 ret = ceph_init_file_info(inode, file, fmode, 216 S_ISDIR(inode->i_mode)); 217 if (ret) 218 return ret; 219 break; 220 221 case S_IFLNK: 222 dout("init_file %p %p 0%o (symlink)\n", inode, file, 223 inode->i_mode); 224 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 225 break; 226 227 default: 228 dout("init_file %p %p 0%o (special)\n", inode, file, 229 inode->i_mode); 230 /* 231 * we need to drop the open ref now, since we don't 232 * have .release set to ceph_release. 233 */ 234 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ 235 BUG_ON(inode->i_fop->release == ceph_release); 236 237 /* call the proper open fop */ 238 ret = inode->i_fop->open(inode, file); 239 } 240 return ret; 241 } 242 243 /* 244 * try renew caps after session gets killed. 245 */ 246 int ceph_renew_caps(struct inode *inode) 247 { 248 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 249 struct ceph_inode_info *ci = ceph_inode(inode); 250 struct ceph_mds_request *req; 251 int err, flags, wanted; 252 253 spin_lock(&ci->i_ceph_lock); 254 wanted = __ceph_caps_file_wanted(ci); 255 if (__ceph_is_any_real_caps(ci) && 256 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { 257 int issued = __ceph_caps_issued(ci, NULL); 258 spin_unlock(&ci->i_ceph_lock); 259 dout("renew caps %p want %s issued %s updating mds_wanted\n", 260 inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 261 ceph_check_caps(ci, 0, NULL); 262 return 0; 263 } 264 spin_unlock(&ci->i_ceph_lock); 265 266 flags = 0; 267 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) 268 flags = O_RDWR; 269 else if (wanted & CEPH_CAP_FILE_RD) 270 flags = O_RDONLY; 271 else if (wanted & CEPH_CAP_FILE_WR) 272 flags = O_WRONLY; 273 #ifdef O_LAZY 274 if (wanted & CEPH_CAP_FILE_LAZYIO) 275 flags |= O_LAZY; 276 #endif 277 278 req = prepare_open_request(inode->i_sb, flags, 0); 279 if (IS_ERR(req)) { 280 err = PTR_ERR(req); 281 goto out; 282 } 283 284 req->r_inode = inode; 285 ihold(inode); 286 req->r_num_caps = 1; 287 req->r_fmode = -1; 288 289 err = ceph_mdsc_do_request(mdsc, NULL, req); 290 ceph_mdsc_put_request(req); 291 out: 292 dout("renew caps %p open result=%d\n", inode, err); 293 return err < 0 ? err : 0; 294 } 295 296 /* 297 * If we already have the requisite capabilities, we can satisfy 298 * the open request locally (no need to request new caps from the 299 * MDS). We do, however, need to inform the MDS (asynchronously) 300 * if our wanted caps set expands. 301 */ 302 int ceph_open(struct inode *inode, struct file *file) 303 { 304 struct ceph_inode_info *ci = ceph_inode(inode); 305 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 306 struct ceph_mds_client *mdsc = fsc->mdsc; 307 struct ceph_mds_request *req; 308 struct ceph_file_info *fi = file->private_data; 309 int err; 310 int flags, fmode, wanted; 311 312 if (fi) { 313 dout("open file %p is already opened\n", file); 314 return 0; 315 } 316 317 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ 318 flags = file->f_flags & ~(O_CREAT|O_EXCL); 319 if (S_ISDIR(inode->i_mode)) 320 flags = O_DIRECTORY; /* mds likes to know */ 321 322 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 323 ceph_vinop(inode), file, flags, file->f_flags); 324 fmode = ceph_flags_to_mode(flags); 325 wanted = ceph_caps_for_mode(fmode); 326 327 /* snapped files are read-only */ 328 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) 329 return -EROFS; 330 331 /* trivially open snapdir */ 332 if (ceph_snap(inode) == CEPH_SNAPDIR) { 333 spin_lock(&ci->i_ceph_lock); 334 __ceph_get_fmode(ci, fmode); 335 spin_unlock(&ci->i_ceph_lock); 336 return ceph_init_file(inode, file, fmode); 337 } 338 339 /* 340 * No need to block if we have caps on the auth MDS (for 341 * write) or any MDS (for read). Update wanted set 342 * asynchronously. 343 */ 344 spin_lock(&ci->i_ceph_lock); 345 if (__ceph_is_any_real_caps(ci) && 346 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) { 347 int mds_wanted = __ceph_caps_mds_wanted(ci, true); 348 int issued = __ceph_caps_issued(ci, NULL); 349 350 dout("open %p fmode %d want %s issued %s using existing\n", 351 inode, fmode, ceph_cap_string(wanted), 352 ceph_cap_string(issued)); 353 __ceph_get_fmode(ci, fmode); 354 spin_unlock(&ci->i_ceph_lock); 355 356 /* adjust wanted? */ 357 if ((issued & wanted) != wanted && 358 (mds_wanted & wanted) != wanted && 359 ceph_snap(inode) != CEPH_SNAPDIR) 360 ceph_check_caps(ci, 0, NULL); 361 362 return ceph_init_file(inode, file, fmode); 363 } else if (ceph_snap(inode) != CEPH_NOSNAP && 364 (ci->i_snap_caps & wanted) == wanted) { 365 __ceph_get_fmode(ci, fmode); 366 spin_unlock(&ci->i_ceph_lock); 367 return ceph_init_file(inode, file, fmode); 368 } 369 370 spin_unlock(&ci->i_ceph_lock); 371 372 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 373 req = prepare_open_request(inode->i_sb, flags, 0); 374 if (IS_ERR(req)) { 375 err = PTR_ERR(req); 376 goto out; 377 } 378 req->r_inode = inode; 379 ihold(inode); 380 381 req->r_num_caps = 1; 382 err = ceph_mdsc_do_request(mdsc, NULL, req); 383 if (!err) 384 err = ceph_init_file(inode, file, req->r_fmode); 385 ceph_mdsc_put_request(req); 386 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 387 out: 388 return err; 389 } 390 391 392 /* 393 * Do a lookup + open with a single request. If we get a non-existent 394 * file or symlink, return 1 so the VFS can retry. 395 */ 396 int ceph_atomic_open(struct inode *dir, struct dentry *dentry, 397 struct file *file, unsigned flags, umode_t mode, 398 int *opened) 399 { 400 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 401 struct ceph_mds_client *mdsc = fsc->mdsc; 402 struct ceph_mds_request *req; 403 struct dentry *dn; 404 struct ceph_acls_info acls = {}; 405 int mask; 406 int err; 407 408 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 409 dir, dentry, dentry, 410 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 411 412 if (dentry->d_name.len > NAME_MAX) 413 return -ENAMETOOLONG; 414 415 if (flags & O_CREAT) { 416 if (ceph_quota_is_max_files_exceeded(dir)) 417 return -EDQUOT; 418 err = ceph_pre_init_acls(dir, &mode, &acls); 419 if (err < 0) 420 return err; 421 } 422 423 /* do the open */ 424 req = prepare_open_request(dir->i_sb, flags, mode); 425 if (IS_ERR(req)) { 426 err = PTR_ERR(req); 427 goto out_acl; 428 } 429 req->r_dentry = dget(dentry); 430 req->r_num_caps = 2; 431 if (flags & O_CREAT) { 432 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 433 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 434 if (acls.pagelist) { 435 req->r_pagelist = acls.pagelist; 436 acls.pagelist = NULL; 437 } 438 } 439 440 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 441 if (ceph_security_xattr_wanted(dir)) 442 mask |= CEPH_CAP_XATTR_SHARED; 443 req->r_args.open.mask = cpu_to_le32(mask); 444 445 req->r_parent = dir; 446 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 447 err = ceph_mdsc_do_request(mdsc, 448 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, 449 req); 450 err = ceph_handle_snapdir(req, dentry, err); 451 if (err) 452 goto out_req; 453 454 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) 455 err = ceph_handle_notrace_create(dir, dentry); 456 457 if (d_in_lookup(dentry)) { 458 dn = ceph_finish_lookup(req, dentry, err); 459 if (IS_ERR(dn)) 460 err = PTR_ERR(dn); 461 } else { 462 /* we were given a hashed negative dentry */ 463 dn = NULL; 464 } 465 if (err) 466 goto out_req; 467 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 468 /* make vfs retry on splice, ENOENT, or symlink */ 469 dout("atomic_open finish_no_open on dn %p\n", dn); 470 err = finish_no_open(file, dn); 471 } else { 472 dout("atomic_open finish_open on dn %p\n", dn); 473 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 474 ceph_init_inode_acls(d_inode(dentry), &acls); 475 *opened |= FILE_CREATED; 476 } 477 err = finish_open(file, dentry, ceph_open, opened); 478 } 479 out_req: 480 if (!req->r_err && req->r_target_inode) 481 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); 482 ceph_mdsc_put_request(req); 483 out_acl: 484 ceph_release_acls_info(&acls); 485 dout("atomic_open result=%d\n", err); 486 return err; 487 } 488 489 int ceph_release(struct inode *inode, struct file *file) 490 { 491 struct ceph_inode_info *ci = ceph_inode(inode); 492 493 if (S_ISDIR(inode->i_mode)) { 494 struct ceph_dir_file_info *dfi = file->private_data; 495 dout("release inode %p dir file %p\n", inode, file); 496 WARN_ON(!list_empty(&dfi->file_info.rw_contexts)); 497 498 ceph_put_fmode(ci, dfi->file_info.fmode); 499 500 if (dfi->last_readdir) 501 ceph_mdsc_put_request(dfi->last_readdir); 502 kfree(dfi->last_name); 503 kfree(dfi->dir_info); 504 kmem_cache_free(ceph_dir_file_cachep, dfi); 505 } else { 506 struct ceph_file_info *fi = file->private_data; 507 dout("release inode %p regular file %p\n", inode, file); 508 WARN_ON(!list_empty(&fi->rw_contexts)); 509 510 ceph_put_fmode(ci, fi->fmode); 511 kmem_cache_free(ceph_file_cachep, fi); 512 } 513 514 /* wake up anyone waiting for caps on this inode */ 515 wake_up_all(&ci->i_cap_wq); 516 return 0; 517 } 518 519 enum { 520 HAVE_RETRIED = 1, 521 CHECK_EOF = 2, 522 READ_INLINE = 3, 523 }; 524 525 /* 526 * Read a range of bytes striped over one or more objects. Iterate over 527 * objects we stripe over. (That's not atomic, but good enough for now.) 528 * 529 * If we get a short result from the OSD, check against i_size; we need to 530 * only return a short read to the caller if we hit EOF. 531 */ 532 static int striped_read(struct inode *inode, 533 u64 pos, u64 len, 534 struct page **pages, int num_pages, 535 int page_align, int *checkeof) 536 { 537 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 538 struct ceph_inode_info *ci = ceph_inode(inode); 539 u64 this_len; 540 loff_t i_size; 541 int page_idx; 542 int ret, read = 0; 543 bool hit_stripe, was_short; 544 545 /* 546 * we may need to do multiple reads. not atomic, unfortunately. 547 */ 548 more: 549 this_len = len; 550 page_idx = (page_align + read) >> PAGE_SHIFT; 551 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 552 &ci->i_layout, pos, &this_len, 553 ci->i_truncate_seq, ci->i_truncate_size, 554 pages + page_idx, num_pages - page_idx, 555 ((page_align + read) & ~PAGE_MASK)); 556 if (ret == -ENOENT) 557 ret = 0; 558 hit_stripe = this_len < len; 559 was_short = ret >= 0 && ret < this_len; 560 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read, 561 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 562 563 i_size = i_size_read(inode); 564 if (ret >= 0) { 565 if (was_short && (pos + ret < i_size)) { 566 int zlen = min(this_len - ret, i_size - pos - ret); 567 int zoff = page_align + read + ret; 568 dout(" zero gap %llu to %llu\n", 569 pos + ret, pos + ret + zlen); 570 ceph_zero_page_vector_range(zoff, zlen, pages); 571 ret += zlen; 572 } 573 574 read += ret; 575 pos += ret; 576 len -= ret; 577 578 /* hit stripe and need continue*/ 579 if (len && hit_stripe && pos < i_size) 580 goto more; 581 } 582 583 if (read > 0) { 584 ret = read; 585 /* did we bounce off eof? */ 586 if (pos + len > i_size) 587 *checkeof = CHECK_EOF; 588 } 589 590 dout("striped_read returns %d\n", ret); 591 return ret; 592 } 593 594 /* 595 * Completely synchronous read and write methods. Direct from __user 596 * buffer to osd, or directly to user pages (if O_DIRECT). 597 * 598 * If the read spans object boundary, just do multiple reads. 599 */ 600 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, 601 int *checkeof) 602 { 603 struct file *file = iocb->ki_filp; 604 struct inode *inode = file_inode(file); 605 struct page **pages; 606 u64 off = iocb->ki_pos; 607 int num_pages; 608 ssize_t ret; 609 size_t len = iov_iter_count(to); 610 611 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len, 612 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 613 614 if (!len) 615 return 0; 616 /* 617 * flush any page cache pages in this range. this 618 * will make concurrent normal and sync io slow, 619 * but it will at least behave sensibly when they are 620 * in sequence. 621 */ 622 ret = filemap_write_and_wait_range(inode->i_mapping, off, 623 off + len); 624 if (ret < 0) 625 return ret; 626 627 if (unlikely(to->type & ITER_PIPE)) { 628 size_t page_off; 629 ret = iov_iter_get_pages_alloc(to, &pages, len, 630 &page_off); 631 if (ret <= 0) 632 return -ENOMEM; 633 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); 634 635 ret = striped_read(inode, off, ret, pages, num_pages, 636 page_off, checkeof); 637 if (ret > 0) { 638 iov_iter_advance(to, ret); 639 off += ret; 640 } else { 641 iov_iter_advance(to, 0); 642 } 643 ceph_put_page_vector(pages, num_pages, false); 644 } else { 645 num_pages = calc_pages_for(off, len); 646 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 647 if (IS_ERR(pages)) 648 return PTR_ERR(pages); 649 650 ret = striped_read(inode, off, len, pages, num_pages, 651 (off & ~PAGE_MASK), checkeof); 652 if (ret > 0) { 653 int l, k = 0; 654 size_t left = ret; 655 656 while (left) { 657 size_t page_off = off & ~PAGE_MASK; 658 size_t copy = min_t(size_t, left, 659 PAGE_SIZE - page_off); 660 l = copy_page_to_iter(pages[k++], page_off, 661 copy, to); 662 off += l; 663 left -= l; 664 if (l < copy) 665 break; 666 } 667 } 668 ceph_release_page_vector(pages, num_pages); 669 } 670 671 if (off > iocb->ki_pos) { 672 ret = off - iocb->ki_pos; 673 iocb->ki_pos = off; 674 } 675 676 dout("sync_read result %zd\n", ret); 677 return ret; 678 } 679 680 struct ceph_aio_request { 681 struct kiocb *iocb; 682 size_t total_len; 683 bool write; 684 bool should_dirty; 685 int error; 686 struct list_head osd_reqs; 687 unsigned num_reqs; 688 atomic_t pending_reqs; 689 struct timespec mtime; 690 struct ceph_cap_flush *prealloc_cf; 691 }; 692 693 struct ceph_aio_work { 694 struct work_struct work; 695 struct ceph_osd_request *req; 696 }; 697 698 static void ceph_aio_retry_work(struct work_struct *work); 699 700 static void ceph_aio_complete(struct inode *inode, 701 struct ceph_aio_request *aio_req) 702 { 703 struct ceph_inode_info *ci = ceph_inode(inode); 704 int ret; 705 706 if (!atomic_dec_and_test(&aio_req->pending_reqs)) 707 return; 708 709 ret = aio_req->error; 710 if (!ret) 711 ret = aio_req->total_len; 712 713 dout("ceph_aio_complete %p rc %d\n", inode, ret); 714 715 if (ret >= 0 && aio_req->write) { 716 int dirty; 717 718 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len; 719 if (endoff > i_size_read(inode)) { 720 if (ceph_inode_set_size(inode, endoff)) 721 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 722 } 723 724 spin_lock(&ci->i_ceph_lock); 725 ci->i_inline_version = CEPH_INLINE_NONE; 726 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 727 &aio_req->prealloc_cf); 728 spin_unlock(&ci->i_ceph_lock); 729 if (dirty) 730 __mark_inode_dirty(inode, dirty); 731 732 } 733 734 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 735 CEPH_CAP_FILE_RD)); 736 737 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 738 739 ceph_free_cap_flush(aio_req->prealloc_cf); 740 kfree(aio_req); 741 } 742 743 static void ceph_aio_complete_req(struct ceph_osd_request *req) 744 { 745 int rc = req->r_result; 746 struct inode *inode = req->r_inode; 747 struct ceph_aio_request *aio_req = req->r_priv; 748 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 749 int num_pages = calc_pages_for((u64)osd_data->alignment, 750 osd_data->length); 751 752 dout("ceph_aio_complete_req %p rc %d bytes %llu\n", 753 inode, rc, osd_data->length); 754 755 if (rc == -EOLDSNAPC) { 756 struct ceph_aio_work *aio_work; 757 BUG_ON(!aio_req->write); 758 759 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS); 760 if (aio_work) { 761 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 762 aio_work->req = req; 763 queue_work(ceph_inode_to_client(inode)->wb_wq, 764 &aio_work->work); 765 return; 766 } 767 rc = -ENOMEM; 768 } else if (!aio_req->write) { 769 if (rc == -ENOENT) 770 rc = 0; 771 if (rc >= 0 && osd_data->length > rc) { 772 int zoff = osd_data->alignment + rc; 773 int zlen = osd_data->length - rc; 774 /* 775 * If read is satisfied by single OSD request, 776 * it can pass EOF. Otherwise read is within 777 * i_size. 778 */ 779 if (aio_req->num_reqs == 1) { 780 loff_t i_size = i_size_read(inode); 781 loff_t endoff = aio_req->iocb->ki_pos + rc; 782 if (endoff < i_size) 783 zlen = min_t(size_t, zlen, 784 i_size - endoff); 785 aio_req->total_len = rc + zlen; 786 } 787 788 if (zlen > 0) 789 ceph_zero_page_vector_range(zoff, zlen, 790 osd_data->pages); 791 } 792 } 793 794 ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty); 795 ceph_osdc_put_request(req); 796 797 if (rc < 0) 798 cmpxchg(&aio_req->error, 0, rc); 799 800 ceph_aio_complete(inode, aio_req); 801 return; 802 } 803 804 static void ceph_aio_retry_work(struct work_struct *work) 805 { 806 struct ceph_aio_work *aio_work = 807 container_of(work, struct ceph_aio_work, work); 808 struct ceph_osd_request *orig_req = aio_work->req; 809 struct ceph_aio_request *aio_req = orig_req->r_priv; 810 struct inode *inode = orig_req->r_inode; 811 struct ceph_inode_info *ci = ceph_inode(inode); 812 struct ceph_snap_context *snapc; 813 struct ceph_osd_request *req; 814 int ret; 815 816 spin_lock(&ci->i_ceph_lock); 817 if (__ceph_have_pending_cap_snap(ci)) { 818 struct ceph_cap_snap *capsnap = 819 list_last_entry(&ci->i_cap_snaps, 820 struct ceph_cap_snap, 821 ci_item); 822 snapc = ceph_get_snap_context(capsnap->context); 823 } else { 824 BUG_ON(!ci->i_head_snapc); 825 snapc = ceph_get_snap_context(ci->i_head_snapc); 826 } 827 spin_unlock(&ci->i_ceph_lock); 828 829 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, 830 false, GFP_NOFS); 831 if (!req) { 832 ret = -ENOMEM; 833 req = orig_req; 834 goto out; 835 } 836 837 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 838 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); 839 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); 840 841 ret = ceph_osdc_alloc_messages(req, GFP_NOFS); 842 if (ret) { 843 ceph_osdc_put_request(req); 844 req = orig_req; 845 goto out; 846 } 847 848 req->r_ops[0] = orig_req->r_ops[0]; 849 850 req->r_mtime = aio_req->mtime; 851 req->r_data_offset = req->r_ops[0].extent.offset; 852 853 ceph_osdc_put_request(orig_req); 854 855 req->r_callback = ceph_aio_complete_req; 856 req->r_inode = inode; 857 req->r_priv = aio_req; 858 req->r_abort_on_full = true; 859 860 ret = ceph_osdc_start_request(req->r_osdc, req, false); 861 out: 862 if (ret < 0) { 863 req->r_result = ret; 864 ceph_aio_complete_req(req); 865 } 866 867 ceph_put_snap_context(snapc); 868 kfree(aio_work); 869 } 870 871 static ssize_t 872 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, 873 struct ceph_snap_context *snapc, 874 struct ceph_cap_flush **pcf) 875 { 876 struct file *file = iocb->ki_filp; 877 struct inode *inode = file_inode(file); 878 struct ceph_inode_info *ci = ceph_inode(inode); 879 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 880 struct ceph_vino vino; 881 struct ceph_osd_request *req; 882 struct page **pages; 883 struct ceph_aio_request *aio_req = NULL; 884 int num_pages = 0; 885 int flags; 886 int ret; 887 struct timespec mtime = current_time(inode); 888 size_t count = iov_iter_count(iter); 889 loff_t pos = iocb->ki_pos; 890 bool write = iov_iter_rw(iter) == WRITE; 891 bool should_dirty = !write && iter_is_iovec(iter); 892 893 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 894 return -EROFS; 895 896 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n", 897 (write ? "write" : "read"), file, pos, (unsigned)count, 898 snapc, snapc->seq); 899 900 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 901 if (ret < 0) 902 return ret; 903 904 if (write) { 905 int ret2 = invalidate_inode_pages2_range(inode->i_mapping, 906 pos >> PAGE_SHIFT, 907 (pos + count) >> PAGE_SHIFT); 908 if (ret2 < 0) 909 dout("invalidate_inode_pages2_range returned %d\n", ret2); 910 911 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 912 } else { 913 flags = CEPH_OSD_FLAG_READ; 914 } 915 916 while (iov_iter_count(iter) > 0) { 917 u64 size = dio_get_pagev_size(iter); 918 size_t start = 0; 919 ssize_t len; 920 921 vino = ceph_vino(inode); 922 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 923 vino, pos, &size, 0, 924 1, 925 write ? CEPH_OSD_OP_WRITE : 926 CEPH_OSD_OP_READ, 927 flags, snapc, 928 ci->i_truncate_seq, 929 ci->i_truncate_size, 930 false); 931 if (IS_ERR(req)) { 932 ret = PTR_ERR(req); 933 break; 934 } 935 936 if (write) 937 size = min_t(u64, size, fsc->mount_options->wsize); 938 else 939 size = min_t(u64, size, fsc->mount_options->rsize); 940 941 len = size; 942 pages = dio_get_pages_alloc(iter, len, &start, &num_pages); 943 if (IS_ERR(pages)) { 944 ceph_osdc_put_request(req); 945 ret = PTR_ERR(pages); 946 break; 947 } 948 949 /* 950 * To simplify error handling, allow AIO when IO within i_size 951 * or IO can be satisfied by single OSD request. 952 */ 953 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) && 954 (len == count || pos + count <= i_size_read(inode))) { 955 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL); 956 if (aio_req) { 957 aio_req->iocb = iocb; 958 aio_req->write = write; 959 aio_req->should_dirty = should_dirty; 960 INIT_LIST_HEAD(&aio_req->osd_reqs); 961 if (write) { 962 aio_req->mtime = mtime; 963 swap(aio_req->prealloc_cf, *pcf); 964 } 965 } 966 /* ignore error */ 967 } 968 969 if (write) { 970 /* 971 * throw out any page cache pages in this range. this 972 * may block. 973 */ 974 truncate_inode_pages_range(inode->i_mapping, pos, 975 (pos+len) | (PAGE_SIZE - 1)); 976 977 req->r_mtime = mtime; 978 } 979 980 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, 981 false, false); 982 983 if (aio_req) { 984 aio_req->total_len += len; 985 aio_req->num_reqs++; 986 atomic_inc(&aio_req->pending_reqs); 987 988 req->r_callback = ceph_aio_complete_req; 989 req->r_inode = inode; 990 req->r_priv = aio_req; 991 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); 992 993 pos += len; 994 iov_iter_advance(iter, len); 995 continue; 996 } 997 998 ret = ceph_osdc_start_request(req->r_osdc, req, false); 999 if (!ret) 1000 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1001 1002 size = i_size_read(inode); 1003 if (!write) { 1004 if (ret == -ENOENT) 1005 ret = 0; 1006 if (ret >= 0 && ret < len && pos + ret < size) { 1007 int zlen = min_t(size_t, len - ret, 1008 size - pos - ret); 1009 ceph_zero_page_vector_range(start + ret, zlen, 1010 pages); 1011 ret += zlen; 1012 } 1013 if (ret >= 0) 1014 len = ret; 1015 } 1016 1017 ceph_put_page_vector(pages, num_pages, should_dirty); 1018 1019 ceph_osdc_put_request(req); 1020 if (ret < 0) 1021 break; 1022 1023 pos += len; 1024 iov_iter_advance(iter, len); 1025 1026 if (!write && pos >= size) 1027 break; 1028 1029 if (write && pos > size) { 1030 if (ceph_inode_set_size(inode, pos)) 1031 ceph_check_caps(ceph_inode(inode), 1032 CHECK_CAPS_AUTHONLY, 1033 NULL); 1034 } 1035 } 1036 1037 if (aio_req) { 1038 LIST_HEAD(osd_reqs); 1039 1040 if (aio_req->num_reqs == 0) { 1041 kfree(aio_req); 1042 return ret; 1043 } 1044 1045 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR : 1046 CEPH_CAP_FILE_RD); 1047 1048 list_splice(&aio_req->osd_reqs, &osd_reqs); 1049 while (!list_empty(&osd_reqs)) { 1050 req = list_first_entry(&osd_reqs, 1051 struct ceph_osd_request, 1052 r_unsafe_item); 1053 list_del_init(&req->r_unsafe_item); 1054 if (ret >= 0) 1055 ret = ceph_osdc_start_request(req->r_osdc, 1056 req, false); 1057 if (ret < 0) { 1058 req->r_result = ret; 1059 ceph_aio_complete_req(req); 1060 } 1061 } 1062 return -EIOCBQUEUED; 1063 } 1064 1065 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) { 1066 ret = pos - iocb->ki_pos; 1067 iocb->ki_pos = pos; 1068 } 1069 return ret; 1070 } 1071 1072 /* 1073 * Synchronous write, straight from __user pointer or user pages. 1074 * 1075 * If write spans object boundary, just do multiple writes. (For a 1076 * correct atomic write, we should e.g. take write locks on all 1077 * objects, rollback on failure, etc.) 1078 */ 1079 static ssize_t 1080 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, 1081 struct ceph_snap_context *snapc) 1082 { 1083 struct file *file = iocb->ki_filp; 1084 struct inode *inode = file_inode(file); 1085 struct ceph_inode_info *ci = ceph_inode(inode); 1086 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1087 struct ceph_vino vino; 1088 struct ceph_osd_request *req; 1089 struct page **pages; 1090 u64 len; 1091 int num_pages; 1092 int written = 0; 1093 int flags; 1094 int ret; 1095 bool check_caps = false; 1096 struct timespec mtime = current_time(inode); 1097 size_t count = iov_iter_count(from); 1098 1099 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1100 return -EROFS; 1101 1102 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n", 1103 file, pos, (unsigned)count, snapc, snapc->seq); 1104 1105 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); 1106 if (ret < 0) 1107 return ret; 1108 1109 ret = invalidate_inode_pages2_range(inode->i_mapping, 1110 pos >> PAGE_SHIFT, 1111 (pos + count) >> PAGE_SHIFT); 1112 if (ret < 0) 1113 dout("invalidate_inode_pages2_range returned %d\n", ret); 1114 1115 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1116 1117 while ((len = iov_iter_count(from)) > 0) { 1118 size_t left; 1119 int n; 1120 1121 vino = ceph_vino(inode); 1122 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1123 vino, pos, &len, 0, 1, 1124 CEPH_OSD_OP_WRITE, flags, snapc, 1125 ci->i_truncate_seq, 1126 ci->i_truncate_size, 1127 false); 1128 if (IS_ERR(req)) { 1129 ret = PTR_ERR(req); 1130 break; 1131 } 1132 1133 /* 1134 * write from beginning of first page, 1135 * regardless of io alignment 1136 */ 1137 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1138 1139 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1140 if (IS_ERR(pages)) { 1141 ret = PTR_ERR(pages); 1142 goto out; 1143 } 1144 1145 left = len; 1146 for (n = 0; n < num_pages; n++) { 1147 size_t plen = min_t(size_t, left, PAGE_SIZE); 1148 ret = copy_page_from_iter(pages[n], 0, plen, from); 1149 if (ret != plen) { 1150 ret = -EFAULT; 1151 break; 1152 } 1153 left -= ret; 1154 } 1155 1156 if (ret < 0) { 1157 ceph_release_page_vector(pages, num_pages); 1158 goto out; 1159 } 1160 1161 req->r_inode = inode; 1162 1163 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 1164 false, true); 1165 1166 req->r_mtime = mtime; 1167 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1168 if (!ret) 1169 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1170 1171 out: 1172 ceph_osdc_put_request(req); 1173 if (ret != 0) { 1174 ceph_set_error_write(ci); 1175 break; 1176 } 1177 1178 ceph_clear_error_write(ci); 1179 pos += len; 1180 written += len; 1181 if (pos > i_size_read(inode)) { 1182 check_caps = ceph_inode_set_size(inode, pos); 1183 if (check_caps) 1184 ceph_check_caps(ceph_inode(inode), 1185 CHECK_CAPS_AUTHONLY, 1186 NULL); 1187 } 1188 1189 } 1190 1191 if (ret != -EOLDSNAPC && written > 0) { 1192 ret = written; 1193 iocb->ki_pos = pos; 1194 } 1195 return ret; 1196 } 1197 1198 /* 1199 * Wrap generic_file_aio_read with checks for cap bits on the inode. 1200 * Atomically grab references, so that those bits are not released 1201 * back to the MDS mid-read. 1202 * 1203 * Hmm, the sync read case isn't actually async... should it be? 1204 */ 1205 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) 1206 { 1207 struct file *filp = iocb->ki_filp; 1208 struct ceph_file_info *fi = filp->private_data; 1209 size_t len = iov_iter_count(to); 1210 struct inode *inode = file_inode(filp); 1211 struct ceph_inode_info *ci = ceph_inode(inode); 1212 struct page *pinned_page = NULL; 1213 ssize_t ret; 1214 int want, got = 0; 1215 int retry_op = 0, read = 0; 1216 1217 again: 1218 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 1219 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 1220 1221 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1222 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1223 else 1224 want = CEPH_CAP_FILE_CACHE; 1225 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1226 if (ret < 0) 1227 return ret; 1228 1229 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || 1230 (iocb->ki_flags & IOCB_DIRECT) || 1231 (fi->flags & CEPH_F_SYNC)) { 1232 1233 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1234 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1235 ceph_cap_string(got)); 1236 1237 if (ci->i_inline_version == CEPH_INLINE_NONE) { 1238 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { 1239 ret = ceph_direct_read_write(iocb, to, 1240 NULL, NULL); 1241 if (ret >= 0 && ret < len) 1242 retry_op = CHECK_EOF; 1243 } else { 1244 ret = ceph_sync_read(iocb, to, &retry_op); 1245 } 1246 } else { 1247 retry_op = READ_INLINE; 1248 } 1249 } else { 1250 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1251 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 1252 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 1253 ceph_cap_string(got)); 1254 ceph_add_rw_context(fi, &rw_ctx); 1255 ret = generic_file_read_iter(iocb, to); 1256 ceph_del_rw_context(fi, &rw_ctx); 1257 } 1258 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 1259 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 1260 if (pinned_page) { 1261 put_page(pinned_page); 1262 pinned_page = NULL; 1263 } 1264 ceph_put_cap_refs(ci, got); 1265 if (retry_op > HAVE_RETRIED && ret >= 0) { 1266 int statret; 1267 struct page *page = NULL; 1268 loff_t i_size; 1269 if (retry_op == READ_INLINE) { 1270 page = __page_cache_alloc(GFP_KERNEL); 1271 if (!page) 1272 return -ENOMEM; 1273 } 1274 1275 statret = __ceph_do_getattr(inode, page, 1276 CEPH_STAT_CAP_INLINE_DATA, !!page); 1277 if (statret < 0) { 1278 if (page) 1279 __free_page(page); 1280 if (statret == -ENODATA) { 1281 BUG_ON(retry_op != READ_INLINE); 1282 goto again; 1283 } 1284 return statret; 1285 } 1286 1287 i_size = i_size_read(inode); 1288 if (retry_op == READ_INLINE) { 1289 BUG_ON(ret > 0 || read > 0); 1290 if (iocb->ki_pos < i_size && 1291 iocb->ki_pos < PAGE_SIZE) { 1292 loff_t end = min_t(loff_t, i_size, 1293 iocb->ki_pos + len); 1294 end = min_t(loff_t, end, PAGE_SIZE); 1295 if (statret < end) 1296 zero_user_segment(page, statret, end); 1297 ret = copy_page_to_iter(page, 1298 iocb->ki_pos & ~PAGE_MASK, 1299 end - iocb->ki_pos, to); 1300 iocb->ki_pos += ret; 1301 read += ret; 1302 } 1303 if (iocb->ki_pos < i_size && read < len) { 1304 size_t zlen = min_t(size_t, len - read, 1305 i_size - iocb->ki_pos); 1306 ret = iov_iter_zero(zlen, to); 1307 iocb->ki_pos += ret; 1308 read += ret; 1309 } 1310 __free_pages(page, 0); 1311 return read; 1312 } 1313 1314 /* hit EOF or hole? */ 1315 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 1316 ret < len) { 1317 dout("sync_read hit hole, ppos %lld < size %lld" 1318 ", reading more\n", iocb->ki_pos, i_size); 1319 1320 read += ret; 1321 len -= ret; 1322 retry_op = HAVE_RETRIED; 1323 goto again; 1324 } 1325 } 1326 1327 if (ret >= 0) 1328 ret += read; 1329 1330 return ret; 1331 } 1332 1333 /* 1334 * Take cap references to avoid releasing caps to MDS mid-write. 1335 * 1336 * If we are synchronous, and write with an old snap context, the OSD 1337 * may return EOLDSNAPC. In that case, retry the write.. _after_ 1338 * dropping our cap refs and allowing the pending snap to logically 1339 * complete _before_ this write occurs. 1340 * 1341 * If we are near ENOSPC, write synchronously. 1342 */ 1343 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) 1344 { 1345 struct file *file = iocb->ki_filp; 1346 struct ceph_file_info *fi = file->private_data; 1347 struct inode *inode = file_inode(file); 1348 struct ceph_inode_info *ci = ceph_inode(inode); 1349 struct ceph_osd_client *osdc = 1350 &ceph_sb_to_client(inode->i_sb)->client->osdc; 1351 struct ceph_cap_flush *prealloc_cf; 1352 ssize_t count, written = 0; 1353 int err, want, got; 1354 loff_t pos; 1355 1356 if (ceph_snap(inode) != CEPH_NOSNAP) 1357 return -EROFS; 1358 1359 prealloc_cf = ceph_alloc_cap_flush(); 1360 if (!prealloc_cf) 1361 return -ENOMEM; 1362 1363 retry_snap: 1364 inode_lock(inode); 1365 1366 /* We can write back this queue in page reclaim */ 1367 current->backing_dev_info = inode_to_bdi(inode); 1368 1369 if (iocb->ki_flags & IOCB_APPEND) { 1370 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1371 if (err < 0) 1372 goto out; 1373 } 1374 1375 err = generic_write_checks(iocb, from); 1376 if (err <= 0) 1377 goto out; 1378 1379 pos = iocb->ki_pos; 1380 count = iov_iter_count(from); 1381 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) { 1382 err = -EDQUOT; 1383 goto out; 1384 } 1385 1386 err = file_remove_privs(file); 1387 if (err) 1388 goto out; 1389 1390 err = file_update_time(file); 1391 if (err) 1392 goto out; 1393 1394 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1395 err = ceph_uninline_data(file, NULL); 1396 if (err < 0) 1397 goto out; 1398 } 1399 1400 /* FIXME: not complete since it doesn't account for being at quota */ 1401 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { 1402 err = -ENOSPC; 1403 goto out; 1404 } 1405 1406 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 1407 inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 1408 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1409 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1410 else 1411 want = CEPH_CAP_FILE_BUFFER; 1412 got = 0; 1413 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count, 1414 &got, NULL); 1415 if (err < 0) 1416 goto out; 1417 1418 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 1419 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 1420 1421 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 1422 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) || 1423 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) { 1424 struct ceph_snap_context *snapc; 1425 struct iov_iter data; 1426 inode_unlock(inode); 1427 1428 spin_lock(&ci->i_ceph_lock); 1429 if (__ceph_have_pending_cap_snap(ci)) { 1430 struct ceph_cap_snap *capsnap = 1431 list_last_entry(&ci->i_cap_snaps, 1432 struct ceph_cap_snap, 1433 ci_item); 1434 snapc = ceph_get_snap_context(capsnap->context); 1435 } else { 1436 BUG_ON(!ci->i_head_snapc); 1437 snapc = ceph_get_snap_context(ci->i_head_snapc); 1438 } 1439 spin_unlock(&ci->i_ceph_lock); 1440 1441 /* we might need to revert back to that point */ 1442 data = *from; 1443 if (iocb->ki_flags & IOCB_DIRECT) 1444 written = ceph_direct_read_write(iocb, &data, snapc, 1445 &prealloc_cf); 1446 else 1447 written = ceph_sync_write(iocb, &data, pos, snapc); 1448 if (written > 0) 1449 iov_iter_advance(from, written); 1450 ceph_put_snap_context(snapc); 1451 } else { 1452 /* 1453 * No need to acquire the i_truncate_mutex. Because 1454 * the MDS revokes Fwb caps before sending truncate 1455 * message to us. We can't get Fwb cap while there 1456 * are pending vmtruncate. So write and vmtruncate 1457 * can not run at the same time 1458 */ 1459 written = generic_perform_write(file, from, pos); 1460 if (likely(written >= 0)) 1461 iocb->ki_pos = pos + written; 1462 inode_unlock(inode); 1463 } 1464 1465 if (written >= 0) { 1466 int dirty; 1467 1468 spin_lock(&ci->i_ceph_lock); 1469 ci->i_inline_version = CEPH_INLINE_NONE; 1470 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1471 &prealloc_cf); 1472 spin_unlock(&ci->i_ceph_lock); 1473 if (dirty) 1474 __mark_inode_dirty(inode, dirty); 1475 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos)) 1476 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL); 1477 } 1478 1479 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 1480 inode, ceph_vinop(inode), pos, (unsigned)count, 1481 ceph_cap_string(got)); 1482 ceph_put_cap_refs(ci, got); 1483 1484 if (written == -EOLDSNAPC) { 1485 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", 1486 inode, ceph_vinop(inode), pos, (unsigned)count); 1487 goto retry_snap; 1488 } 1489 1490 if (written >= 0) { 1491 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) 1492 iocb->ki_flags |= IOCB_DSYNC; 1493 written = generic_write_sync(iocb, written); 1494 } 1495 1496 goto out_unlocked; 1497 1498 out: 1499 inode_unlock(inode); 1500 out_unlocked: 1501 ceph_free_cap_flush(prealloc_cf); 1502 current->backing_dev_info = NULL; 1503 return written ? written : err; 1504 } 1505 1506 /* 1507 * llseek. be sure to verify file size on SEEK_END. 1508 */ 1509 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) 1510 { 1511 struct inode *inode = file->f_mapping->host; 1512 loff_t i_size; 1513 loff_t ret; 1514 1515 inode_lock(inode); 1516 1517 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { 1518 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); 1519 if (ret < 0) 1520 goto out; 1521 } 1522 1523 i_size = i_size_read(inode); 1524 switch (whence) { 1525 case SEEK_END: 1526 offset += i_size; 1527 break; 1528 case SEEK_CUR: 1529 /* 1530 * Here we special-case the lseek(fd, 0, SEEK_CUR) 1531 * position-querying operation. Avoid rewriting the "same" 1532 * f_pos value back to the file because a concurrent read(), 1533 * write() or lseek() might have altered it 1534 */ 1535 if (offset == 0) { 1536 ret = file->f_pos; 1537 goto out; 1538 } 1539 offset += file->f_pos; 1540 break; 1541 case SEEK_DATA: 1542 if (offset < 0 || offset >= i_size) { 1543 ret = -ENXIO; 1544 goto out; 1545 } 1546 break; 1547 case SEEK_HOLE: 1548 if (offset < 0 || offset >= i_size) { 1549 ret = -ENXIO; 1550 goto out; 1551 } 1552 offset = i_size; 1553 break; 1554 } 1555 1556 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1557 1558 out: 1559 inode_unlock(inode); 1560 return ret; 1561 } 1562 1563 static inline void ceph_zero_partial_page( 1564 struct inode *inode, loff_t offset, unsigned size) 1565 { 1566 struct page *page; 1567 pgoff_t index = offset >> PAGE_SHIFT; 1568 1569 page = find_lock_page(inode->i_mapping, index); 1570 if (page) { 1571 wait_on_page_writeback(page); 1572 zero_user(page, offset & (PAGE_SIZE - 1), size); 1573 unlock_page(page); 1574 put_page(page); 1575 } 1576 } 1577 1578 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, 1579 loff_t length) 1580 { 1581 loff_t nearly = round_up(offset, PAGE_SIZE); 1582 if (offset < nearly) { 1583 loff_t size = nearly - offset; 1584 if (length < size) 1585 size = length; 1586 ceph_zero_partial_page(inode, offset, size); 1587 offset += size; 1588 length -= size; 1589 } 1590 if (length >= PAGE_SIZE) { 1591 loff_t size = round_down(length, PAGE_SIZE); 1592 truncate_pagecache_range(inode, offset, offset + size - 1); 1593 offset += size; 1594 length -= size; 1595 } 1596 if (length) 1597 ceph_zero_partial_page(inode, offset, length); 1598 } 1599 1600 static int ceph_zero_partial_object(struct inode *inode, 1601 loff_t offset, loff_t *length) 1602 { 1603 struct ceph_inode_info *ci = ceph_inode(inode); 1604 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1605 struct ceph_osd_request *req; 1606 int ret = 0; 1607 loff_t zero = 0; 1608 int op; 1609 1610 if (!length) { 1611 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 1612 length = &zero; 1613 } else { 1614 op = CEPH_OSD_OP_ZERO; 1615 } 1616 1617 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1618 ceph_vino(inode), 1619 offset, length, 1620 0, 1, op, 1621 CEPH_OSD_FLAG_WRITE, 1622 NULL, 0, 0, false); 1623 if (IS_ERR(req)) { 1624 ret = PTR_ERR(req); 1625 goto out; 1626 } 1627 1628 req->r_mtime = inode->i_mtime; 1629 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1630 if (!ret) { 1631 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 1632 if (ret == -ENOENT) 1633 ret = 0; 1634 } 1635 ceph_osdc_put_request(req); 1636 1637 out: 1638 return ret; 1639 } 1640 1641 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) 1642 { 1643 int ret = 0; 1644 struct ceph_inode_info *ci = ceph_inode(inode); 1645 s32 stripe_unit = ci->i_layout.stripe_unit; 1646 s32 stripe_count = ci->i_layout.stripe_count; 1647 s32 object_size = ci->i_layout.object_size; 1648 u64 object_set_size = object_size * stripe_count; 1649 u64 nearly, t; 1650 1651 /* round offset up to next period boundary */ 1652 nearly = offset + object_set_size - 1; 1653 t = nearly; 1654 nearly -= do_div(t, object_set_size); 1655 1656 while (length && offset < nearly) { 1657 loff_t size = length; 1658 ret = ceph_zero_partial_object(inode, offset, &size); 1659 if (ret < 0) 1660 return ret; 1661 offset += size; 1662 length -= size; 1663 } 1664 while (length >= object_set_size) { 1665 int i; 1666 loff_t pos = offset; 1667 for (i = 0; i < stripe_count; ++i) { 1668 ret = ceph_zero_partial_object(inode, pos, NULL); 1669 if (ret < 0) 1670 return ret; 1671 pos += stripe_unit; 1672 } 1673 offset += object_set_size; 1674 length -= object_set_size; 1675 } 1676 while (length) { 1677 loff_t size = length; 1678 ret = ceph_zero_partial_object(inode, offset, &size); 1679 if (ret < 0) 1680 return ret; 1681 offset += size; 1682 length -= size; 1683 } 1684 return ret; 1685 } 1686 1687 static long ceph_fallocate(struct file *file, int mode, 1688 loff_t offset, loff_t length) 1689 { 1690 struct ceph_file_info *fi = file->private_data; 1691 struct inode *inode = file_inode(file); 1692 struct ceph_inode_info *ci = ceph_inode(inode); 1693 struct ceph_osd_client *osdc = 1694 &ceph_inode_to_client(inode)->client->osdc; 1695 struct ceph_cap_flush *prealloc_cf; 1696 int want, got = 0; 1697 int dirty; 1698 int ret = 0; 1699 loff_t endoff = 0; 1700 loff_t size; 1701 1702 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1703 return -EOPNOTSUPP; 1704 1705 if (!S_ISREG(inode->i_mode)) 1706 return -EOPNOTSUPP; 1707 1708 prealloc_cf = ceph_alloc_cap_flush(); 1709 if (!prealloc_cf) 1710 return -ENOMEM; 1711 1712 inode_lock(inode); 1713 1714 if (ceph_snap(inode) != CEPH_NOSNAP) { 1715 ret = -EROFS; 1716 goto unlock; 1717 } 1718 1719 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) && 1720 ceph_quota_is_max_bytes_exceeded(inode, offset + length)) { 1721 ret = -EDQUOT; 1722 goto unlock; 1723 } 1724 1725 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && 1726 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1727 ret = -ENOSPC; 1728 goto unlock; 1729 } 1730 1731 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1732 ret = ceph_uninline_data(file, NULL); 1733 if (ret < 0) 1734 goto unlock; 1735 } 1736 1737 size = i_size_read(inode); 1738 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 1739 endoff = offset + length; 1740 ret = inode_newsize_ok(inode, endoff); 1741 if (ret) 1742 goto unlock; 1743 } 1744 1745 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1746 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1747 else 1748 want = CEPH_CAP_FILE_BUFFER; 1749 1750 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); 1751 if (ret < 0) 1752 goto unlock; 1753 1754 if (mode & FALLOC_FL_PUNCH_HOLE) { 1755 if (offset < size) 1756 ceph_zero_pagecache_range(inode, offset, length); 1757 ret = ceph_zero_objects(inode, offset, length); 1758 } else if (endoff > size) { 1759 truncate_pagecache_range(inode, size, -1); 1760 if (ceph_inode_set_size(inode, endoff)) 1761 ceph_check_caps(ceph_inode(inode), 1762 CHECK_CAPS_AUTHONLY, NULL); 1763 } 1764 1765 if (!ret) { 1766 spin_lock(&ci->i_ceph_lock); 1767 ci->i_inline_version = CEPH_INLINE_NONE; 1768 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1769 &prealloc_cf); 1770 spin_unlock(&ci->i_ceph_lock); 1771 if (dirty) 1772 __mark_inode_dirty(inode, dirty); 1773 if ((endoff > size) && 1774 ceph_quota_is_max_bytes_approaching(inode, endoff)) 1775 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL); 1776 } 1777 1778 ceph_put_cap_refs(ci, got); 1779 unlock: 1780 inode_unlock(inode); 1781 ceph_free_cap_flush(prealloc_cf); 1782 return ret; 1783 } 1784 1785 const struct file_operations ceph_file_fops = { 1786 .open = ceph_open, 1787 .release = ceph_release, 1788 .llseek = ceph_llseek, 1789 .read_iter = ceph_read_iter, 1790 .write_iter = ceph_write_iter, 1791 .mmap = ceph_mmap, 1792 .fsync = ceph_fsync, 1793 .lock = ceph_lock, 1794 .flock = ceph_flock, 1795 .splice_read = generic_file_splice_read, 1796 .splice_write = iter_file_splice_write, 1797 .unlocked_ioctl = ceph_ioctl, 1798 .compat_ioctl = ceph_ioctl, 1799 .fallocate = ceph_fallocate, 1800 }; 1801 1802