1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include "cache.h" 15 #include <linux/ceph/osd_client.h> 16 17 /* 18 * Ceph address space ops. 19 * 20 * There are a few funny things going on here. 21 * 22 * The page->private field is used to reference a struct 23 * ceph_snap_context for _every_ dirty page. This indicates which 24 * snapshot the page was logically dirtied in, and thus which snap 25 * context needs to be associated with the osd write during writeback. 26 * 27 * Similarly, struct ceph_inode_info maintains a set of counters to 28 * count dirty pages on the inode. In the absence of snapshots, 29 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 30 * 31 * When a snapshot is taken (that is, when the client receives 32 * notification that a snapshot was taken), each inode with caps and 33 * with dirty pages (dirty pages implies there is a cap) gets a new 34 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 35 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 36 * moved to capsnap->dirty. (Unless a sync write is currently in 37 * progress. In that case, the capsnap is said to be "pending", new 38 * writes cannot start, and the capsnap isn't "finalized" until the 39 * write completes (or fails) and a final size/mtime for the inode for 40 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 41 * 42 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 43 * we look for the first capsnap in i_cap_snaps and write out pages in 44 * that snap context _only_. Then we move on to the next capsnap, 45 * eventually reaching the "live" or "head" context (i.e., pages that 46 * are not yet snapped) and are writing the most recently dirtied 47 * pages. 48 * 49 * Invalidate and so forth must take care to ensure the dirty page 50 * accounting is preserved. 51 */ 52 53 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 54 #define CONGESTION_OFF_THRESH(congestion_kb) \ 55 (CONGESTION_ON_THRESH(congestion_kb) - \ 56 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 57 58 static inline struct ceph_snap_context *page_snap_context(struct page *page) 59 { 60 if (PagePrivate(page)) 61 return (void *)page->private; 62 return NULL; 63 } 64 65 /* 66 * Dirty a page. Optimistically adjust accounting, on the assumption 67 * that we won't race with invalidate. If we do, readjust. 68 */ 69 static int ceph_set_page_dirty(struct page *page) 70 { 71 struct address_space *mapping = page->mapping; 72 struct inode *inode; 73 struct ceph_inode_info *ci; 74 struct ceph_snap_context *snapc; 75 int ret; 76 77 if (unlikely(!mapping)) 78 return !TestSetPageDirty(page); 79 80 if (PageDirty(page)) { 81 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 82 mapping->host, page, page->index); 83 BUG_ON(!PagePrivate(page)); 84 return 0; 85 } 86 87 inode = mapping->host; 88 ci = ceph_inode(inode); 89 90 /* 91 * Note that we're grabbing a snapc ref here without holding 92 * any locks! 93 */ 94 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 95 96 /* dirty the head */ 97 spin_lock(&ci->i_ceph_lock); 98 if (ci->i_head_snapc == NULL) 99 ci->i_head_snapc = ceph_get_snap_context(snapc); 100 ++ci->i_wrbuffer_ref_head; 101 if (ci->i_wrbuffer_ref == 0) 102 ihold(inode); 103 ++ci->i_wrbuffer_ref; 104 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 105 "snapc %p seq %lld (%d snaps)\n", 106 mapping->host, page, page->index, 107 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 108 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 109 snapc, snapc->seq, snapc->num_snaps); 110 spin_unlock(&ci->i_ceph_lock); 111 112 /* 113 * Reference snap context in page->private. Also set 114 * PagePrivate so that we get invalidatepage callback. 115 */ 116 BUG_ON(PagePrivate(page)); 117 page->private = (unsigned long)snapc; 118 SetPagePrivate(page); 119 120 ret = __set_page_dirty_nobuffers(page); 121 WARN_ON(!PageLocked(page)); 122 WARN_ON(!page->mapping); 123 124 return ret; 125 } 126 127 /* 128 * If we are truncating the full page (i.e. offset == 0), adjust the 129 * dirty page counters appropriately. Only called if there is private 130 * data on the page. 131 */ 132 static void ceph_invalidatepage(struct page *page, unsigned int offset, 133 unsigned int length) 134 { 135 struct inode *inode; 136 struct ceph_inode_info *ci; 137 struct ceph_snap_context *snapc = page_snap_context(page); 138 139 inode = page->mapping->host; 140 ci = ceph_inode(inode); 141 142 if (offset != 0 || length != PAGE_CACHE_SIZE) { 143 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 144 inode, page, page->index, offset, length); 145 return; 146 } 147 148 ceph_invalidate_fscache_page(inode, page); 149 150 if (!PagePrivate(page)) 151 return; 152 153 /* 154 * We can get non-dirty pages here due to races between 155 * set_page_dirty and truncate_complete_page; just spit out a 156 * warning, in case we end up with accounting problems later. 157 */ 158 if (!PageDirty(page)) 159 pr_err("%p invalidatepage %p page not dirty\n", inode, page); 160 161 ClearPageChecked(page); 162 163 dout("%p invalidatepage %p idx %lu full dirty page\n", 164 inode, page, page->index); 165 166 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 167 ceph_put_snap_context(snapc); 168 page->private = 0; 169 ClearPagePrivate(page); 170 } 171 172 static int ceph_releasepage(struct page *page, gfp_t g) 173 { 174 struct inode *inode = page->mapping ? page->mapping->host : NULL; 175 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 176 WARN_ON(PageDirty(page)); 177 178 /* Can we release the page from the cache? */ 179 if (!ceph_release_fscache_page(page, g)) 180 return 0; 181 182 return !PagePrivate(page); 183 } 184 185 /* 186 * read a single page, without unlocking it. 187 */ 188 static int readpage_nounlock(struct file *filp, struct page *page) 189 { 190 struct inode *inode = file_inode(filp); 191 struct ceph_inode_info *ci = ceph_inode(inode); 192 struct ceph_osd_client *osdc = 193 &ceph_inode_to_client(inode)->client->osdc; 194 int err = 0; 195 u64 len = PAGE_CACHE_SIZE; 196 197 err = ceph_readpage_from_fscache(inode, page); 198 199 if (err == 0) 200 goto out; 201 202 dout("readpage inode %p file %p page %p index %lu\n", 203 inode, filp, page, page->index); 204 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 205 (u64) page_offset(page), &len, 206 ci->i_truncate_seq, ci->i_truncate_size, 207 &page, 1, 0); 208 if (err == -ENOENT) 209 err = 0; 210 if (err < 0) { 211 SetPageError(page); 212 ceph_fscache_readpage_cancel(inode, page); 213 goto out; 214 } else { 215 if (err < PAGE_CACHE_SIZE) { 216 /* zero fill remainder of page */ 217 zero_user_segment(page, err, PAGE_CACHE_SIZE); 218 } else { 219 flush_dcache_page(page); 220 } 221 } 222 SetPageUptodate(page); 223 224 if (err >= 0) 225 ceph_readpage_to_fscache(inode, page); 226 227 out: 228 return err < 0 ? err : 0; 229 } 230 231 static int ceph_readpage(struct file *filp, struct page *page) 232 { 233 int r = readpage_nounlock(filp, page); 234 unlock_page(page); 235 return r; 236 } 237 238 /* 239 * Finish an async read(ahead) op. 240 */ 241 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 242 { 243 struct inode *inode = req->r_inode; 244 struct ceph_osd_data *osd_data; 245 int rc = req->r_result; 246 int bytes = le32_to_cpu(msg->hdr.data_len); 247 int num_pages; 248 int i; 249 250 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 251 252 /* unlock all pages, zeroing any data we didn't read */ 253 osd_data = osd_req_op_extent_osd_data(req, 0); 254 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 255 num_pages = calc_pages_for((u64)osd_data->alignment, 256 (u64)osd_data->length); 257 for (i = 0; i < num_pages; i++) { 258 struct page *page = osd_data->pages[i]; 259 260 if (rc < 0) 261 goto unlock; 262 if (bytes < (int)PAGE_CACHE_SIZE) { 263 /* zero (remainder of) page */ 264 int s = bytes < 0 ? 0 : bytes; 265 zero_user_segment(page, s, PAGE_CACHE_SIZE); 266 } 267 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 268 page->index); 269 flush_dcache_page(page); 270 SetPageUptodate(page); 271 ceph_readpage_to_fscache(inode, page); 272 unlock: 273 unlock_page(page); 274 page_cache_release(page); 275 bytes -= PAGE_CACHE_SIZE; 276 } 277 kfree(osd_data->pages); 278 } 279 280 static void ceph_unlock_page_vector(struct page **pages, int num_pages) 281 { 282 int i; 283 284 for (i = 0; i < num_pages; i++) 285 unlock_page(pages[i]); 286 } 287 288 /* 289 * start an async read(ahead) operation. return nr_pages we submitted 290 * a read for on success, or negative error code. 291 */ 292 static int start_read(struct inode *inode, struct list_head *page_list, int max) 293 { 294 struct ceph_osd_client *osdc = 295 &ceph_inode_to_client(inode)->client->osdc; 296 struct ceph_inode_info *ci = ceph_inode(inode); 297 struct page *page = list_entry(page_list->prev, struct page, lru); 298 struct ceph_vino vino; 299 struct ceph_osd_request *req; 300 u64 off; 301 u64 len; 302 int i; 303 struct page **pages; 304 pgoff_t next_index; 305 int nr_pages = 0; 306 int ret; 307 308 off = (u64) page_offset(page); 309 310 /* count pages */ 311 next_index = page->index; 312 list_for_each_entry_reverse(page, page_list, lru) { 313 if (page->index != next_index) 314 break; 315 nr_pages++; 316 next_index++; 317 if (max && nr_pages == max) 318 break; 319 } 320 len = nr_pages << PAGE_CACHE_SHIFT; 321 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 322 off, len); 323 vino = ceph_vino(inode); 324 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 325 1, CEPH_OSD_OP_READ, 326 CEPH_OSD_FLAG_READ, NULL, 327 ci->i_truncate_seq, ci->i_truncate_size, 328 false); 329 if (IS_ERR(req)) 330 return PTR_ERR(req); 331 332 /* build page vector */ 333 nr_pages = calc_pages_for(0, len); 334 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); 335 ret = -ENOMEM; 336 if (!pages) 337 goto out; 338 for (i = 0; i < nr_pages; ++i) { 339 page = list_entry(page_list->prev, struct page, lru); 340 BUG_ON(PageLocked(page)); 341 list_del(&page->lru); 342 343 dout("start_read %p adding %p idx %lu\n", inode, page, 344 page->index); 345 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 346 GFP_NOFS)) { 347 ceph_fscache_uncache_page(inode, page); 348 page_cache_release(page); 349 dout("start_read %p add_to_page_cache failed %p\n", 350 inode, page); 351 nr_pages = i; 352 goto out_pages; 353 } 354 pages[i] = page; 355 } 356 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 357 req->r_callback = finish_read; 358 req->r_inode = inode; 359 360 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 361 362 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 363 ret = ceph_osdc_start_request(osdc, req, false); 364 if (ret < 0) 365 goto out_pages; 366 ceph_osdc_put_request(req); 367 return nr_pages; 368 369 out_pages: 370 ceph_unlock_page_vector(pages, nr_pages); 371 ceph_release_page_vector(pages, nr_pages); 372 out: 373 ceph_osdc_put_request(req); 374 return ret; 375 } 376 377 378 /* 379 * Read multiple pages. Leave pages we don't read + unlock in page_list; 380 * the caller (VM) cleans them up. 381 */ 382 static int ceph_readpages(struct file *file, struct address_space *mapping, 383 struct list_head *page_list, unsigned nr_pages) 384 { 385 struct inode *inode = file_inode(file); 386 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 387 int rc = 0; 388 int max = 0; 389 390 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, 391 &nr_pages); 392 393 if (rc == 0) 394 goto out; 395 396 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 397 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 398 >> PAGE_SHIFT; 399 400 dout("readpages %p file %p nr_pages %d max %d\n", inode, 401 file, nr_pages, 402 max); 403 while (!list_empty(page_list)) { 404 rc = start_read(inode, page_list, max); 405 if (rc < 0) 406 goto out; 407 BUG_ON(rc == 0); 408 } 409 out: 410 ceph_fscache_readpages_cancel(inode, page_list); 411 412 dout("readpages %p file %p ret %d\n", inode, file, rc); 413 return rc; 414 } 415 416 /* 417 * Get ref for the oldest snapc for an inode with dirty data... that is, the 418 * only snap context we are allowed to write back. 419 */ 420 static struct ceph_snap_context *get_oldest_context(struct inode *inode, 421 u64 *snap_size) 422 { 423 struct ceph_inode_info *ci = ceph_inode(inode); 424 struct ceph_snap_context *snapc = NULL; 425 struct ceph_cap_snap *capsnap = NULL; 426 427 spin_lock(&ci->i_ceph_lock); 428 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 429 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 430 capsnap->context, capsnap->dirty_pages); 431 if (capsnap->dirty_pages) { 432 snapc = ceph_get_snap_context(capsnap->context); 433 if (snap_size) 434 *snap_size = capsnap->size; 435 break; 436 } 437 } 438 if (!snapc && ci->i_wrbuffer_ref_head) { 439 snapc = ceph_get_snap_context(ci->i_head_snapc); 440 dout(" head snapc %p has %d dirty pages\n", 441 snapc, ci->i_wrbuffer_ref_head); 442 } 443 spin_unlock(&ci->i_ceph_lock); 444 return snapc; 445 } 446 447 /* 448 * Write a single page, but leave the page locked. 449 * 450 * If we get a write error, set the page error bit, but still adjust the 451 * dirty page accounting (i.e., page is no longer dirty). 452 */ 453 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 454 { 455 struct inode *inode; 456 struct ceph_inode_info *ci; 457 struct ceph_fs_client *fsc; 458 struct ceph_osd_client *osdc; 459 struct ceph_snap_context *snapc, *oldest; 460 loff_t page_off = page_offset(page); 461 long writeback_stat; 462 u64 truncate_size, snap_size = 0; 463 u32 truncate_seq; 464 int err = 0, len = PAGE_CACHE_SIZE; 465 466 dout("writepage %p idx %lu\n", page, page->index); 467 468 if (!page->mapping || !page->mapping->host) { 469 dout("writepage %p - no mapping\n", page); 470 return -EFAULT; 471 } 472 inode = page->mapping->host; 473 ci = ceph_inode(inode); 474 fsc = ceph_inode_to_client(inode); 475 osdc = &fsc->client->osdc; 476 477 /* verify this is a writeable snap context */ 478 snapc = page_snap_context(page); 479 if (snapc == NULL) { 480 dout("writepage %p page %p not dirty?\n", inode, page); 481 goto out; 482 } 483 oldest = get_oldest_context(inode, &snap_size); 484 if (snapc->seq > oldest->seq) { 485 dout("writepage %p page %p snapc %p not writeable - noop\n", 486 inode, page, snapc); 487 /* we should only noop if called by kswapd */ 488 WARN_ON((current->flags & PF_MEMALLOC) == 0); 489 ceph_put_snap_context(oldest); 490 goto out; 491 } 492 ceph_put_snap_context(oldest); 493 494 spin_lock(&ci->i_ceph_lock); 495 truncate_seq = ci->i_truncate_seq; 496 truncate_size = ci->i_truncate_size; 497 if (!snap_size) 498 snap_size = i_size_read(inode); 499 spin_unlock(&ci->i_ceph_lock); 500 501 /* is this a partial page at end of file? */ 502 if (page_off >= snap_size) { 503 dout("%p page eof %llu\n", page, snap_size); 504 goto out; 505 } 506 if (snap_size < page_off + len) 507 len = snap_size - page_off; 508 509 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 510 inode, page, page->index, page_off, len, snapc); 511 512 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 513 if (writeback_stat > 514 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 515 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 516 517 ceph_readpage_to_fscache(inode, page); 518 519 set_page_writeback(page); 520 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 521 &ci->i_layout, snapc, 522 page_off, len, 523 truncate_seq, truncate_size, 524 &inode->i_mtime, &page, 1); 525 if (err < 0) { 526 dout("writepage setting page/mapping error %d %p\n", err, page); 527 SetPageError(page); 528 mapping_set_error(&inode->i_data, err); 529 if (wbc) 530 wbc->pages_skipped++; 531 } else { 532 dout("writepage cleaned page %p\n", page); 533 err = 0; /* vfs expects us to return 0 */ 534 } 535 page->private = 0; 536 ClearPagePrivate(page); 537 end_page_writeback(page); 538 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 539 ceph_put_snap_context(snapc); /* page's reference */ 540 out: 541 return err; 542 } 543 544 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 545 { 546 int err; 547 struct inode *inode = page->mapping->host; 548 BUG_ON(!inode); 549 ihold(inode); 550 err = writepage_nounlock(page, wbc); 551 unlock_page(page); 552 iput(inode); 553 return err; 554 } 555 556 557 /* 558 * lame release_pages helper. release_pages() isn't exported to 559 * modules. 560 */ 561 static void ceph_release_pages(struct page **pages, int num) 562 { 563 struct pagevec pvec; 564 int i; 565 566 pagevec_init(&pvec, 0); 567 for (i = 0; i < num; i++) { 568 if (pagevec_add(&pvec, pages[i]) == 0) 569 pagevec_release(&pvec); 570 } 571 pagevec_release(&pvec); 572 } 573 574 /* 575 * async writeback completion handler. 576 * 577 * If we get an error, set the mapping error bit, but not the individual 578 * page error bits. 579 */ 580 static void writepages_finish(struct ceph_osd_request *req, 581 struct ceph_msg *msg) 582 { 583 struct inode *inode = req->r_inode; 584 struct ceph_inode_info *ci = ceph_inode(inode); 585 struct ceph_osd_data *osd_data; 586 unsigned wrote; 587 struct page *page; 588 int num_pages; 589 int i; 590 struct ceph_snap_context *snapc = req->r_snapc; 591 struct address_space *mapping = inode->i_mapping; 592 int rc = req->r_result; 593 u64 bytes = req->r_ops[0].extent.length; 594 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 595 long writeback_stat; 596 unsigned issued = ceph_caps_issued(ci); 597 598 osd_data = osd_req_op_extent_osd_data(req, 0); 599 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 600 num_pages = calc_pages_for((u64)osd_data->alignment, 601 (u64)osd_data->length); 602 if (rc >= 0) { 603 /* 604 * Assume we wrote the pages we originally sent. The 605 * osd might reply with fewer pages if our writeback 606 * raced with a truncation and was adjusted at the osd, 607 * so don't believe the reply. 608 */ 609 wrote = num_pages; 610 } else { 611 wrote = 0; 612 mapping_set_error(mapping, rc); 613 } 614 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", 615 inode, rc, bytes, wrote); 616 617 /* clean all pages */ 618 for (i = 0; i < num_pages; i++) { 619 page = osd_data->pages[i]; 620 BUG_ON(!page); 621 WARN_ON(!PageUptodate(page)); 622 623 writeback_stat = 624 atomic_long_dec_return(&fsc->writeback_count); 625 if (writeback_stat < 626 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 627 clear_bdi_congested(&fsc->backing_dev_info, 628 BLK_RW_ASYNC); 629 630 ceph_put_snap_context(page_snap_context(page)); 631 page->private = 0; 632 ClearPagePrivate(page); 633 dout("unlocking %d %p\n", i, page); 634 end_page_writeback(page); 635 636 /* 637 * We lost the cache cap, need to truncate the page before 638 * it is unlocked, otherwise we'd truncate it later in the 639 * page truncation thread, possibly losing some data that 640 * raced its way in 641 */ 642 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) 643 generic_error_remove_page(inode->i_mapping, page); 644 645 unlock_page(page); 646 } 647 dout("%p wrote+cleaned %d pages\n", inode, wrote); 648 ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); 649 650 ceph_release_pages(osd_data->pages, num_pages); 651 if (osd_data->pages_from_pool) 652 mempool_free(osd_data->pages, 653 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 654 else 655 kfree(osd_data->pages); 656 ceph_osdc_put_request(req); 657 } 658 659 /* 660 * initiate async writeback 661 */ 662 static int ceph_writepages_start(struct address_space *mapping, 663 struct writeback_control *wbc) 664 { 665 struct inode *inode = mapping->host; 666 struct ceph_inode_info *ci = ceph_inode(inode); 667 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 668 struct ceph_vino vino = ceph_vino(inode); 669 pgoff_t index, start, end; 670 int range_whole = 0; 671 int should_loop = 1; 672 pgoff_t max_pages = 0, max_pages_ever = 0; 673 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 674 struct pagevec pvec; 675 int done = 0; 676 int rc = 0; 677 unsigned wsize = 1 << inode->i_blkbits; 678 struct ceph_osd_request *req = NULL; 679 int do_sync; 680 u64 truncate_size, snap_size; 681 u32 truncate_seq; 682 683 /* 684 * Include a 'sync' in the OSD request if this is a data 685 * integrity write (e.g., O_SYNC write or fsync()), or if our 686 * cap is being revoked. 687 */ 688 if ((wbc->sync_mode == WB_SYNC_ALL) || 689 ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) 690 do_sync = 1; 691 dout("writepages_start %p dosync=%d (mode=%s)\n", 692 inode, do_sync, 693 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 694 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 695 696 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 697 pr_warning("writepage_start %p on forced umount\n", inode); 698 return -EIO; /* we're in a forced umount, don't write! */ 699 } 700 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 701 wsize = fsc->mount_options->wsize; 702 if (wsize < PAGE_CACHE_SIZE) 703 wsize = PAGE_CACHE_SIZE; 704 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 705 706 pagevec_init(&pvec, 0); 707 708 /* where to start/end? */ 709 if (wbc->range_cyclic) { 710 start = mapping->writeback_index; /* Start from prev offset */ 711 end = -1; 712 dout(" cyclic, start at %lu\n", start); 713 } else { 714 start = wbc->range_start >> PAGE_CACHE_SHIFT; 715 end = wbc->range_end >> PAGE_CACHE_SHIFT; 716 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 717 range_whole = 1; 718 should_loop = 0; 719 dout(" not cyclic, %lu to %lu\n", start, end); 720 } 721 index = start; 722 723 retry: 724 /* find oldest snap context with dirty data */ 725 ceph_put_snap_context(snapc); 726 snap_size = 0; 727 snapc = get_oldest_context(inode, &snap_size); 728 if (!snapc) { 729 /* hmm, why does writepages get called when there 730 is no dirty data? */ 731 dout(" no snap context with dirty data?\n"); 732 goto out; 733 } 734 if (snap_size == 0) 735 snap_size = i_size_read(inode); 736 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 737 snapc, snapc->seq, snapc->num_snaps); 738 739 spin_lock(&ci->i_ceph_lock); 740 truncate_seq = ci->i_truncate_seq; 741 truncate_size = ci->i_truncate_size; 742 if (!snap_size) 743 snap_size = i_size_read(inode); 744 spin_unlock(&ci->i_ceph_lock); 745 746 if (last_snapc && snapc != last_snapc) { 747 /* if we switched to a newer snapc, restart our scan at the 748 * start of the original file range. */ 749 dout(" snapc differs from last pass, restarting at %lu\n", 750 index); 751 index = start; 752 } 753 last_snapc = snapc; 754 755 while (!done && index <= end) { 756 int num_ops = do_sync ? 2 : 1; 757 unsigned i; 758 int first; 759 pgoff_t next; 760 int pvec_pages, locked_pages; 761 struct page **pages = NULL; 762 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 763 struct page *page; 764 int want; 765 u64 offset, len; 766 long writeback_stat; 767 768 next = 0; 769 locked_pages = 0; 770 max_pages = max_pages_ever; 771 772 get_more_pages: 773 first = -1; 774 want = min(end - index, 775 min((pgoff_t)PAGEVEC_SIZE, 776 max_pages - (pgoff_t)locked_pages) - 1) 777 + 1; 778 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 779 PAGECACHE_TAG_DIRTY, 780 want); 781 dout("pagevec_lookup_tag got %d\n", pvec_pages); 782 if (!pvec_pages && !locked_pages) 783 break; 784 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 785 page = pvec.pages[i]; 786 dout("? %p idx %lu\n", page, page->index); 787 if (locked_pages == 0) 788 lock_page(page); /* first page */ 789 else if (!trylock_page(page)) 790 break; 791 792 /* only dirty pages, or our accounting breaks */ 793 if (unlikely(!PageDirty(page)) || 794 unlikely(page->mapping != mapping)) { 795 dout("!dirty or !mapping %p\n", page); 796 unlock_page(page); 797 break; 798 } 799 if (!wbc->range_cyclic && page->index > end) { 800 dout("end of range %p\n", page); 801 done = 1; 802 unlock_page(page); 803 break; 804 } 805 if (next && (page->index != next)) { 806 dout("not consecutive %p\n", page); 807 unlock_page(page); 808 break; 809 } 810 if (wbc->sync_mode != WB_SYNC_NONE) { 811 dout("waiting on writeback %p\n", page); 812 wait_on_page_writeback(page); 813 } 814 if (page_offset(page) >= snap_size) { 815 dout("%p page eof %llu\n", page, snap_size); 816 done = 1; 817 unlock_page(page); 818 break; 819 } 820 if (PageWriteback(page)) { 821 dout("%p under writeback\n", page); 822 unlock_page(page); 823 break; 824 } 825 826 /* only if matching snap context */ 827 pgsnapc = page_snap_context(page); 828 if (pgsnapc->seq > snapc->seq) { 829 dout("page snapc %p %lld > oldest %p %lld\n", 830 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 831 unlock_page(page); 832 if (!locked_pages) 833 continue; /* keep looking for snap */ 834 break; 835 } 836 837 if (!clear_page_dirty_for_io(page)) { 838 dout("%p !clear_page_dirty_for_io\n", page); 839 unlock_page(page); 840 break; 841 } 842 843 /* 844 * We have something to write. If this is 845 * the first locked page this time through, 846 * allocate an osd request and a page array 847 * that it will use. 848 */ 849 if (locked_pages == 0) { 850 BUG_ON(pages); 851 /* prepare async write request */ 852 offset = (u64)page_offset(page); 853 len = wsize; 854 req = ceph_osdc_new_request(&fsc->client->osdc, 855 &ci->i_layout, vino, 856 offset, &len, num_ops, 857 CEPH_OSD_OP_WRITE, 858 CEPH_OSD_FLAG_WRITE | 859 CEPH_OSD_FLAG_ONDISK, 860 snapc, truncate_seq, 861 truncate_size, true); 862 if (IS_ERR(req)) { 863 rc = PTR_ERR(req); 864 unlock_page(page); 865 break; 866 } 867 868 req->r_callback = writepages_finish; 869 req->r_inode = inode; 870 871 max_pages = calc_pages_for(0, (u64)len); 872 pages = kmalloc(max_pages * sizeof (*pages), 873 GFP_NOFS); 874 if (!pages) { 875 pool = fsc->wb_pagevec_pool; 876 pages = mempool_alloc(pool, GFP_NOFS); 877 BUG_ON(!pages); 878 } 879 } 880 881 /* note position of first page in pvec */ 882 if (first < 0) 883 first = i; 884 dout("%p will write page %p idx %lu\n", 885 inode, page, page->index); 886 887 writeback_stat = 888 atomic_long_inc_return(&fsc->writeback_count); 889 if (writeback_stat > CONGESTION_ON_THRESH( 890 fsc->mount_options->congestion_kb)) { 891 set_bdi_congested(&fsc->backing_dev_info, 892 BLK_RW_ASYNC); 893 } 894 895 set_page_writeback(page); 896 pages[locked_pages] = page; 897 locked_pages++; 898 next = page->index + 1; 899 } 900 901 /* did we get anything? */ 902 if (!locked_pages) 903 goto release_pvec_pages; 904 if (i) { 905 int j; 906 BUG_ON(!locked_pages || first < 0); 907 908 if (pvec_pages && i == pvec_pages && 909 locked_pages < max_pages) { 910 dout("reached end pvec, trying for more\n"); 911 pagevec_reinit(&pvec); 912 goto get_more_pages; 913 } 914 915 /* shift unused pages over in the pvec... we 916 * will need to release them below. */ 917 for (j = i; j < pvec_pages; j++) { 918 dout(" pvec leftover page %p\n", 919 pvec.pages[j]); 920 pvec.pages[j-i+first] = pvec.pages[j]; 921 } 922 pvec.nr -= i-first; 923 } 924 925 /* Format the osd request message and submit the write */ 926 927 offset = page_offset(pages[0]); 928 len = min(snap_size - offset, 929 (u64)locked_pages << PAGE_CACHE_SHIFT); 930 dout("writepages got %d pages at %llu~%llu\n", 931 locked_pages, offset, len); 932 933 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 934 !!pool, false); 935 936 pages = NULL; /* request message now owns the pages array */ 937 pool = NULL; 938 939 /* Update the write op length in case we changed it */ 940 941 osd_req_op_extent_update(req, 0, len); 942 943 vino = ceph_vino(inode); 944 ceph_osdc_build_request(req, offset, snapc, vino.snap, 945 &inode->i_mtime); 946 947 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 948 BUG_ON(rc); 949 req = NULL; 950 951 /* continue? */ 952 index = next; 953 wbc->nr_to_write -= locked_pages; 954 if (wbc->nr_to_write <= 0) 955 done = 1; 956 957 release_pvec_pages: 958 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 959 pvec.nr ? pvec.pages[0] : NULL); 960 pagevec_release(&pvec); 961 962 if (locked_pages && !done) 963 goto retry; 964 } 965 966 if (should_loop && !done) { 967 /* more to do; loop back to beginning of file */ 968 dout("writepages looping back to beginning of file\n"); 969 should_loop = 0; 970 index = 0; 971 goto retry; 972 } 973 974 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 975 mapping->writeback_index = index; 976 977 out: 978 if (req) 979 ceph_osdc_put_request(req); 980 ceph_put_snap_context(snapc); 981 dout("writepages done, rc = %d\n", rc); 982 return rc; 983 } 984 985 986 987 /* 988 * See if a given @snapc is either writeable, or already written. 989 */ 990 static int context_is_writeable_or_written(struct inode *inode, 991 struct ceph_snap_context *snapc) 992 { 993 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 994 int ret = !oldest || snapc->seq <= oldest->seq; 995 996 ceph_put_snap_context(oldest); 997 return ret; 998 } 999 1000 /* 1001 * We are only allowed to write into/dirty the page if the page is 1002 * clean, or already dirty within the same snap context. 1003 * 1004 * called with page locked. 1005 * return success with page locked, 1006 * or any failure (incl -EAGAIN) with page unlocked. 1007 */ 1008 static int ceph_update_writeable_page(struct file *file, 1009 loff_t pos, unsigned len, 1010 struct page *page) 1011 { 1012 struct inode *inode = file_inode(file); 1013 struct ceph_inode_info *ci = ceph_inode(inode); 1014 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1015 loff_t page_off = pos & PAGE_CACHE_MASK; 1016 int pos_in_page = pos & ~PAGE_CACHE_MASK; 1017 int end_in_page = pos_in_page + len; 1018 loff_t i_size; 1019 int r; 1020 struct ceph_snap_context *snapc, *oldest; 1021 1022 retry_locked: 1023 /* writepages currently holds page lock, but if we change that later, */ 1024 wait_on_page_writeback(page); 1025 1026 /* check snap context */ 1027 BUG_ON(!ci->i_snap_realm); 1028 down_read(&mdsc->snap_rwsem); 1029 BUG_ON(!ci->i_snap_realm->cached_context); 1030 snapc = page_snap_context(page); 1031 if (snapc && snapc != ci->i_head_snapc) { 1032 /* 1033 * this page is already dirty in another (older) snap 1034 * context! is it writeable now? 1035 */ 1036 oldest = get_oldest_context(inode, NULL); 1037 up_read(&mdsc->snap_rwsem); 1038 1039 if (snapc->seq > oldest->seq) { 1040 ceph_put_snap_context(oldest); 1041 dout(" page %p snapc %p not current or oldest\n", 1042 page, snapc); 1043 /* 1044 * queue for writeback, and wait for snapc to 1045 * be writeable or written 1046 */ 1047 snapc = ceph_get_snap_context(snapc); 1048 unlock_page(page); 1049 ceph_queue_writeback(inode); 1050 r = wait_event_interruptible(ci->i_cap_wq, 1051 context_is_writeable_or_written(inode, snapc)); 1052 ceph_put_snap_context(snapc); 1053 if (r == -ERESTARTSYS) 1054 return r; 1055 return -EAGAIN; 1056 } 1057 ceph_put_snap_context(oldest); 1058 1059 /* yay, writeable, do it now (without dropping page lock) */ 1060 dout(" page %p snapc %p not current, but oldest\n", 1061 page, snapc); 1062 if (!clear_page_dirty_for_io(page)) 1063 goto retry_locked; 1064 r = writepage_nounlock(page, NULL); 1065 if (r < 0) 1066 goto fail_nosnap; 1067 goto retry_locked; 1068 } 1069 1070 if (PageUptodate(page)) { 1071 dout(" page %p already uptodate\n", page); 1072 return 0; 1073 } 1074 1075 /* full page? */ 1076 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1077 return 0; 1078 1079 /* past end of file? */ 1080 i_size = inode->i_size; /* caller holds i_mutex */ 1081 1082 if (i_size + len > inode->i_sb->s_maxbytes) { 1083 /* file is too big */ 1084 r = -EINVAL; 1085 goto fail; 1086 } 1087 1088 if (page_off >= i_size || 1089 (pos_in_page == 0 && (pos+len) >= i_size && 1090 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1091 dout(" zeroing %p 0 - %d and %d - %d\n", 1092 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1093 zero_user_segments(page, 1094 0, pos_in_page, 1095 end_in_page, PAGE_CACHE_SIZE); 1096 return 0; 1097 } 1098 1099 /* we need to read it. */ 1100 up_read(&mdsc->snap_rwsem); 1101 r = readpage_nounlock(file, page); 1102 if (r < 0) 1103 goto fail_nosnap; 1104 goto retry_locked; 1105 1106 fail: 1107 up_read(&mdsc->snap_rwsem); 1108 fail_nosnap: 1109 unlock_page(page); 1110 return r; 1111 } 1112 1113 /* 1114 * We are only allowed to write into/dirty the page if the page is 1115 * clean, or already dirty within the same snap context. 1116 */ 1117 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1118 loff_t pos, unsigned len, unsigned flags, 1119 struct page **pagep, void **fsdata) 1120 { 1121 struct inode *inode = file_inode(file); 1122 struct page *page; 1123 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1124 int r; 1125 1126 do { 1127 /* get a page */ 1128 page = grab_cache_page_write_begin(mapping, index, 0); 1129 if (!page) 1130 return -ENOMEM; 1131 *pagep = page; 1132 1133 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1134 inode, page, (int)pos, (int)len); 1135 1136 r = ceph_update_writeable_page(file, pos, len, page); 1137 } while (r == -EAGAIN); 1138 1139 return r; 1140 } 1141 1142 /* 1143 * we don't do anything in here that simple_write_end doesn't do 1144 * except adjust dirty page accounting and drop read lock on 1145 * mdsc->snap_rwsem. 1146 */ 1147 static int ceph_write_end(struct file *file, struct address_space *mapping, 1148 loff_t pos, unsigned len, unsigned copied, 1149 struct page *page, void *fsdata) 1150 { 1151 struct inode *inode = file_inode(file); 1152 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1153 struct ceph_mds_client *mdsc = fsc->mdsc; 1154 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1155 int check_cap = 0; 1156 1157 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1158 inode, page, (int)pos, (int)copied, (int)len); 1159 1160 /* zero the stale part of the page if we did a short copy */ 1161 if (copied < len) 1162 zero_user_segment(page, from+copied, len); 1163 1164 /* did file size increase? */ 1165 /* (no need for i_size_read(); we caller holds i_mutex */ 1166 if (pos+copied > inode->i_size) 1167 check_cap = ceph_inode_set_size(inode, pos+copied); 1168 1169 if (!PageUptodate(page)) 1170 SetPageUptodate(page); 1171 1172 set_page_dirty(page); 1173 1174 unlock_page(page); 1175 up_read(&mdsc->snap_rwsem); 1176 page_cache_release(page); 1177 1178 if (check_cap) 1179 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1180 1181 return copied; 1182 } 1183 1184 /* 1185 * we set .direct_IO to indicate direct io is supported, but since we 1186 * intercept O_DIRECT reads and writes early, this function should 1187 * never get called. 1188 */ 1189 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, 1190 const struct iovec *iov, 1191 loff_t pos, unsigned long nr_segs) 1192 { 1193 WARN_ON(1); 1194 return -EINVAL; 1195 } 1196 1197 const struct address_space_operations ceph_aops = { 1198 .readpage = ceph_readpage, 1199 .readpages = ceph_readpages, 1200 .writepage = ceph_writepage, 1201 .writepages = ceph_writepages_start, 1202 .write_begin = ceph_write_begin, 1203 .write_end = ceph_write_end, 1204 .set_page_dirty = ceph_set_page_dirty, 1205 .invalidatepage = ceph_invalidatepage, 1206 .releasepage = ceph_releasepage, 1207 .direct_IO = ceph_direct_io, 1208 }; 1209 1210 1211 /* 1212 * vm ops 1213 */ 1214 static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1215 { 1216 struct inode *inode = file_inode(vma->vm_file); 1217 struct ceph_inode_info *ci = ceph_inode(inode); 1218 struct ceph_file_info *fi = vma->vm_file->private_data; 1219 loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; 1220 int want, got, ret; 1221 1222 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1223 inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); 1224 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1225 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1226 else 1227 want = CEPH_CAP_FILE_CACHE; 1228 while (1) { 1229 got = 0; 1230 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1); 1231 if (ret == 0) 1232 break; 1233 if (ret != -ERESTARTSYS) { 1234 WARN_ON(1); 1235 return VM_FAULT_SIGBUS; 1236 } 1237 } 1238 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1239 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); 1240 1241 ret = filemap_fault(vma, vmf); 1242 1243 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1244 inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); 1245 ceph_put_cap_refs(ci, got); 1246 1247 return ret; 1248 } 1249 1250 /* 1251 * Reuse write_begin here for simplicity. 1252 */ 1253 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1254 { 1255 struct inode *inode = file_inode(vma->vm_file); 1256 struct ceph_inode_info *ci = ceph_inode(inode); 1257 struct ceph_file_info *fi = vma->vm_file->private_data; 1258 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1259 struct page *page = vmf->page; 1260 loff_t off = page_offset(page); 1261 loff_t size = i_size_read(inode); 1262 size_t len; 1263 int want, got, ret; 1264 1265 if (off + PAGE_CACHE_SIZE <= size) 1266 len = PAGE_CACHE_SIZE; 1267 else 1268 len = size & ~PAGE_CACHE_MASK; 1269 1270 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1271 inode, ceph_vinop(inode), off, len, size); 1272 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1273 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1274 else 1275 want = CEPH_CAP_FILE_BUFFER; 1276 while (1) { 1277 got = 0; 1278 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, off + len); 1279 if (ret == 0) 1280 break; 1281 if (ret != -ERESTARTSYS) { 1282 WARN_ON(1); 1283 return VM_FAULT_SIGBUS; 1284 } 1285 } 1286 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1287 inode, off, len, ceph_cap_string(got)); 1288 1289 /* Update time before taking page lock */ 1290 file_update_time(vma->vm_file); 1291 1292 lock_page(page); 1293 1294 ret = VM_FAULT_NOPAGE; 1295 if ((off > size) || 1296 (page->mapping != inode->i_mapping)) 1297 goto out; 1298 1299 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1300 if (ret == 0) { 1301 /* success. we'll keep the page locked. */ 1302 set_page_dirty(page); 1303 up_read(&mdsc->snap_rwsem); 1304 ret = VM_FAULT_LOCKED; 1305 } else { 1306 if (ret == -ENOMEM) 1307 ret = VM_FAULT_OOM; 1308 else 1309 ret = VM_FAULT_SIGBUS; 1310 } 1311 out: 1312 if (ret != VM_FAULT_LOCKED) { 1313 unlock_page(page); 1314 } else { 1315 int dirty; 1316 spin_lock(&ci->i_ceph_lock); 1317 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); 1318 spin_unlock(&ci->i_ceph_lock); 1319 if (dirty) 1320 __mark_inode_dirty(inode, dirty); 1321 } 1322 1323 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", 1324 inode, off, len, ceph_cap_string(got), ret); 1325 ceph_put_cap_refs(ci, got); 1326 1327 return ret; 1328 } 1329 1330 static struct vm_operations_struct ceph_vmops = { 1331 .fault = ceph_filemap_fault, 1332 .page_mkwrite = ceph_page_mkwrite, 1333 .remap_pages = generic_file_remap_pages, 1334 }; 1335 1336 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1337 { 1338 struct address_space *mapping = file->f_mapping; 1339 1340 if (!mapping->a_ops->readpage) 1341 return -ENOEXEC; 1342 file_accessed(file); 1343 vma->vm_ops = &ceph_vmops; 1344 return 0; 1345 } 1346