1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include <linux/ceph/osd_client.h> 15 16 /* 17 * Ceph address space ops. 18 * 19 * There are a few funny things going on here. 20 * 21 * The page->private field is used to reference a struct 22 * ceph_snap_context for _every_ dirty page. This indicates which 23 * snapshot the page was logically dirtied in, and thus which snap 24 * context needs to be associated with the osd write during writeback. 25 * 26 * Similarly, struct ceph_inode_info maintains a set of counters to 27 * count dirty pages on the inode. In the absence of snapshots, 28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 29 * 30 * When a snapshot is taken (that is, when the client receives 31 * notification that a snapshot was taken), each inode with caps and 32 * with dirty pages (dirty pages implies there is a cap) gets a new 33 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 34 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 35 * moved to capsnap->dirty. (Unless a sync write is currently in 36 * progress. In that case, the capsnap is said to be "pending", new 37 * writes cannot start, and the capsnap isn't "finalized" until the 38 * write completes (or fails) and a final size/mtime for the inode for 39 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 40 * 41 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 42 * we look for the first capsnap in i_cap_snaps and write out pages in 43 * that snap context _only_. Then we move on to the next capsnap, 44 * eventually reaching the "live" or "head" context (i.e., pages that 45 * are not yet snapped) and are writing the most recently dirtied 46 * pages. 47 * 48 * Invalidate and so forth must take care to ensure the dirty page 49 * accounting is preserved. 50 */ 51 52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 53 #define CONGESTION_OFF_THRESH(congestion_kb) \ 54 (CONGESTION_ON_THRESH(congestion_kb) - \ 55 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 56 57 static inline struct ceph_snap_context *page_snap_context(struct page *page) 58 { 59 if (PagePrivate(page)) 60 return (void *)page->private; 61 return NULL; 62 } 63 64 /* 65 * Dirty a page. Optimistically adjust accounting, on the assumption 66 * that we won't race with invalidate. If we do, readjust. 67 */ 68 static int ceph_set_page_dirty(struct page *page) 69 { 70 struct address_space *mapping = page->mapping; 71 struct inode *inode; 72 struct ceph_inode_info *ci; 73 int undo = 0; 74 struct ceph_snap_context *snapc; 75 76 if (unlikely(!mapping)) 77 return !TestSetPageDirty(page); 78 79 if (TestSetPageDirty(page)) { 80 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 81 mapping->host, page, page->index); 82 return 0; 83 } 84 85 inode = mapping->host; 86 ci = ceph_inode(inode); 87 88 /* 89 * Note that we're grabbing a snapc ref here without holding 90 * any locks! 91 */ 92 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 93 94 /* dirty the head */ 95 spin_lock(&ci->i_ceph_lock); 96 if (ci->i_head_snapc == NULL) 97 ci->i_head_snapc = ceph_get_snap_context(snapc); 98 ++ci->i_wrbuffer_ref_head; 99 if (ci->i_wrbuffer_ref == 0) 100 ihold(inode); 101 ++ci->i_wrbuffer_ref; 102 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 103 "snapc %p seq %lld (%d snaps)\n", 104 mapping->host, page, page->index, 105 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 106 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 107 snapc, snapc->seq, snapc->num_snaps); 108 spin_unlock(&ci->i_ceph_lock); 109 110 /* now adjust page */ 111 spin_lock_irq(&mapping->tree_lock); 112 if (page->mapping) { /* Race with truncate? */ 113 WARN_ON_ONCE(!PageUptodate(page)); 114 account_page_dirtied(page, page->mapping); 115 radix_tree_tag_set(&mapping->page_tree, 116 page_index(page), PAGECACHE_TAG_DIRTY); 117 118 /* 119 * Reference snap context in page->private. Also set 120 * PagePrivate so that we get invalidatepage callback. 121 */ 122 page->private = (unsigned long)snapc; 123 SetPagePrivate(page); 124 } else { 125 dout("ANON set_page_dirty %p (raced truncate?)\n", page); 126 undo = 1; 127 } 128 129 spin_unlock_irq(&mapping->tree_lock); 130 131 if (undo) 132 /* whoops, we failed to dirty the page */ 133 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 134 135 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 136 137 BUG_ON(!PageDirty(page)); 138 return 1; 139 } 140 141 /* 142 * If we are truncating the full page (i.e. offset == 0), adjust the 143 * dirty page counters appropriately. Only called if there is private 144 * data on the page. 145 */ 146 static void ceph_invalidatepage(struct page *page, unsigned long offset) 147 { 148 struct inode *inode; 149 struct ceph_inode_info *ci; 150 struct ceph_snap_context *snapc = page_snap_context(page); 151 152 BUG_ON(!PageLocked(page)); 153 BUG_ON(!PagePrivate(page)); 154 BUG_ON(!page->mapping); 155 156 inode = page->mapping->host; 157 158 /* 159 * We can get non-dirty pages here due to races between 160 * set_page_dirty and truncate_complete_page; just spit out a 161 * warning, in case we end up with accounting problems later. 162 */ 163 if (!PageDirty(page)) 164 pr_err("%p invalidatepage %p page not dirty\n", inode, page); 165 166 if (offset == 0) 167 ClearPageChecked(page); 168 169 ci = ceph_inode(inode); 170 if (offset == 0) { 171 dout("%p invalidatepage %p idx %lu full dirty page %lu\n", 172 inode, page, page->index, offset); 173 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 174 ceph_put_snap_context(snapc); 175 page->private = 0; 176 ClearPagePrivate(page); 177 } else { 178 dout("%p invalidatepage %p idx %lu partial dirty page\n", 179 inode, page, page->index); 180 } 181 } 182 183 /* just a sanity check */ 184 static int ceph_releasepage(struct page *page, gfp_t g) 185 { 186 struct inode *inode = page->mapping ? page->mapping->host : NULL; 187 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 188 WARN_ON(PageDirty(page)); 189 WARN_ON(PagePrivate(page)); 190 return 0; 191 } 192 193 /* 194 * read a single page, without unlocking it. 195 */ 196 static int readpage_nounlock(struct file *filp, struct page *page) 197 { 198 struct inode *inode = file_inode(filp); 199 struct ceph_inode_info *ci = ceph_inode(inode); 200 struct ceph_osd_client *osdc = 201 &ceph_inode_to_client(inode)->client->osdc; 202 int err = 0; 203 u64 len = PAGE_CACHE_SIZE; 204 205 dout("readpage inode %p file %p page %p index %lu\n", 206 inode, filp, page, page->index); 207 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 208 (u64) page_offset(page), &len, 209 ci->i_truncate_seq, ci->i_truncate_size, 210 &page, 1, 0); 211 if (err == -ENOENT) 212 err = 0; 213 if (err < 0) { 214 SetPageError(page); 215 goto out; 216 } else if (err < PAGE_CACHE_SIZE) { 217 /* zero fill remainder of page */ 218 zero_user_segment(page, err, PAGE_CACHE_SIZE); 219 } 220 SetPageUptodate(page); 221 222 out: 223 return err < 0 ? err : 0; 224 } 225 226 static int ceph_readpage(struct file *filp, struct page *page) 227 { 228 int r = readpage_nounlock(filp, page); 229 unlock_page(page); 230 return r; 231 } 232 233 /* 234 * Finish an async read(ahead) op. 235 */ 236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 237 { 238 struct inode *inode = req->r_inode; 239 int rc = req->r_result; 240 int bytes = le32_to_cpu(msg->hdr.data_len); 241 int i; 242 243 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 244 245 /* unlock all pages, zeroing any data we didn't read */ 246 BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); 247 for (i = 0; i < req->r_data.num_pages; i++, bytes -= PAGE_CACHE_SIZE) { 248 struct page *page = req->r_data.pages[i]; 249 250 if (bytes < (int)PAGE_CACHE_SIZE) { 251 /* zero (remainder of) page */ 252 int s = bytes < 0 ? 0 : bytes; 253 zero_user_segment(page, s, PAGE_CACHE_SIZE); 254 } 255 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 256 page->index); 257 flush_dcache_page(page); 258 SetPageUptodate(page); 259 unlock_page(page); 260 page_cache_release(page); 261 } 262 kfree(req->r_data.pages); 263 } 264 265 static void ceph_unlock_page_vector(struct page **pages, int num_pages) 266 { 267 int i; 268 269 for (i = 0; i < num_pages; i++) 270 unlock_page(pages[i]); 271 } 272 273 /* 274 * start an async read(ahead) operation. return nr_pages we submitted 275 * a read for on success, or negative error code. 276 */ 277 static int start_read(struct inode *inode, struct list_head *page_list, int max) 278 { 279 struct ceph_osd_client *osdc = 280 &ceph_inode_to_client(inode)->client->osdc; 281 struct ceph_inode_info *ci = ceph_inode(inode); 282 struct page *page = list_entry(page_list->prev, struct page, lru); 283 struct ceph_osd_request *req; 284 u64 off; 285 u64 len; 286 int i; 287 struct page **pages; 288 pgoff_t next_index; 289 int nr_pages = 0; 290 int ret; 291 292 off = (u64) page_offset(page); 293 294 /* count pages */ 295 next_index = page->index; 296 list_for_each_entry_reverse(page, page_list, lru) { 297 if (page->index != next_index) 298 break; 299 nr_pages++; 300 next_index++; 301 if (max && nr_pages == max) 302 break; 303 } 304 len = nr_pages << PAGE_CACHE_SHIFT; 305 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 306 off, len); 307 308 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), 309 off, &len, 310 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 311 NULL, 0, 312 ci->i_truncate_seq, ci->i_truncate_size, 313 NULL, false); 314 if (IS_ERR(req)) 315 return PTR_ERR(req); 316 317 /* build page vector */ 318 nr_pages = calc_pages_for(0, len); 319 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); 320 ret = -ENOMEM; 321 if (!pages) 322 goto out; 323 for (i = 0; i < nr_pages; ++i) { 324 page = list_entry(page_list->prev, struct page, lru); 325 BUG_ON(PageLocked(page)); 326 list_del(&page->lru); 327 328 dout("start_read %p adding %p idx %lu\n", inode, page, 329 page->index); 330 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 331 GFP_NOFS)) { 332 page_cache_release(page); 333 dout("start_read %p add_to_page_cache failed %p\n", 334 inode, page); 335 nr_pages = i; 336 goto out_pages; 337 } 338 pages[i] = page; 339 } 340 req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; 341 req->r_data.pages = pages; 342 req->r_data.num_pages = nr_pages; 343 req->r_data.alignment = 0; 344 req->r_callback = finish_read; 345 req->r_inode = inode; 346 347 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 348 ret = ceph_osdc_start_request(osdc, req, false); 349 if (ret < 0) 350 goto out_pages; 351 ceph_osdc_put_request(req); 352 return nr_pages; 353 354 out_pages: 355 ceph_unlock_page_vector(pages, nr_pages); 356 ceph_release_page_vector(pages, nr_pages); 357 out: 358 ceph_osdc_put_request(req); 359 return ret; 360 } 361 362 363 /* 364 * Read multiple pages. Leave pages we don't read + unlock in page_list; 365 * the caller (VM) cleans them up. 366 */ 367 static int ceph_readpages(struct file *file, struct address_space *mapping, 368 struct list_head *page_list, unsigned nr_pages) 369 { 370 struct inode *inode = file_inode(file); 371 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 372 int rc = 0; 373 int max = 0; 374 375 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 376 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 377 >> PAGE_SHIFT; 378 379 dout("readpages %p file %p nr_pages %d max %d\n", inode, 380 file, nr_pages, 381 max); 382 while (!list_empty(page_list)) { 383 rc = start_read(inode, page_list, max); 384 if (rc < 0) 385 goto out; 386 BUG_ON(rc == 0); 387 } 388 out: 389 dout("readpages %p file %p ret %d\n", inode, file, rc); 390 return rc; 391 } 392 393 /* 394 * Get ref for the oldest snapc for an inode with dirty data... that is, the 395 * only snap context we are allowed to write back. 396 */ 397 static struct ceph_snap_context *get_oldest_context(struct inode *inode, 398 u64 *snap_size) 399 { 400 struct ceph_inode_info *ci = ceph_inode(inode); 401 struct ceph_snap_context *snapc = NULL; 402 struct ceph_cap_snap *capsnap = NULL; 403 404 spin_lock(&ci->i_ceph_lock); 405 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 406 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 407 capsnap->context, capsnap->dirty_pages); 408 if (capsnap->dirty_pages) { 409 snapc = ceph_get_snap_context(capsnap->context); 410 if (snap_size) 411 *snap_size = capsnap->size; 412 break; 413 } 414 } 415 if (!snapc && ci->i_wrbuffer_ref_head) { 416 snapc = ceph_get_snap_context(ci->i_head_snapc); 417 dout(" head snapc %p has %d dirty pages\n", 418 snapc, ci->i_wrbuffer_ref_head); 419 } 420 spin_unlock(&ci->i_ceph_lock); 421 return snapc; 422 } 423 424 /* 425 * Write a single page, but leave the page locked. 426 * 427 * If we get a write error, set the page error bit, but still adjust the 428 * dirty page accounting (i.e., page is no longer dirty). 429 */ 430 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 431 { 432 struct inode *inode; 433 struct ceph_inode_info *ci; 434 struct ceph_fs_client *fsc; 435 struct ceph_osd_client *osdc; 436 loff_t page_off = page_offset(page); 437 int len = PAGE_CACHE_SIZE; 438 loff_t i_size; 439 int err = 0; 440 struct ceph_snap_context *snapc, *oldest; 441 u64 snap_size = 0; 442 long writeback_stat; 443 444 dout("writepage %p idx %lu\n", page, page->index); 445 446 if (!page->mapping || !page->mapping->host) { 447 dout("writepage %p - no mapping\n", page); 448 return -EFAULT; 449 } 450 inode = page->mapping->host; 451 ci = ceph_inode(inode); 452 fsc = ceph_inode_to_client(inode); 453 osdc = &fsc->client->osdc; 454 455 /* verify this is a writeable snap context */ 456 snapc = page_snap_context(page); 457 if (snapc == NULL) { 458 dout("writepage %p page %p not dirty?\n", inode, page); 459 goto out; 460 } 461 oldest = get_oldest_context(inode, &snap_size); 462 if (snapc->seq > oldest->seq) { 463 dout("writepage %p page %p snapc %p not writeable - noop\n", 464 inode, page, snapc); 465 /* we should only noop if called by kswapd */ 466 WARN_ON((current->flags & PF_MEMALLOC) == 0); 467 ceph_put_snap_context(oldest); 468 goto out; 469 } 470 ceph_put_snap_context(oldest); 471 472 /* is this a partial page at end of file? */ 473 if (snap_size) 474 i_size = snap_size; 475 else 476 i_size = i_size_read(inode); 477 if (i_size < page_off + len) 478 len = i_size - page_off; 479 480 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 481 inode, page, page->index, page_off, len, snapc); 482 483 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 484 if (writeback_stat > 485 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 486 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 487 488 set_page_writeback(page); 489 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 490 &ci->i_layout, snapc, 491 page_off, len, 492 ci->i_truncate_seq, ci->i_truncate_size, 493 &inode->i_mtime, &page, 1); 494 if (err < 0) { 495 dout("writepage setting page/mapping error %d %p\n", err, page); 496 SetPageError(page); 497 mapping_set_error(&inode->i_data, err); 498 if (wbc) 499 wbc->pages_skipped++; 500 } else { 501 dout("writepage cleaned page %p\n", page); 502 err = 0; /* vfs expects us to return 0 */ 503 } 504 page->private = 0; 505 ClearPagePrivate(page); 506 end_page_writeback(page); 507 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 508 ceph_put_snap_context(snapc); /* page's reference */ 509 out: 510 return err; 511 } 512 513 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 514 { 515 int err; 516 struct inode *inode = page->mapping->host; 517 BUG_ON(!inode); 518 ihold(inode); 519 err = writepage_nounlock(page, wbc); 520 unlock_page(page); 521 iput(inode); 522 return err; 523 } 524 525 526 /* 527 * lame release_pages helper. release_pages() isn't exported to 528 * modules. 529 */ 530 static void ceph_release_pages(struct page **pages, int num) 531 { 532 struct pagevec pvec; 533 int i; 534 535 pagevec_init(&pvec, 0); 536 for (i = 0; i < num; i++) { 537 if (pagevec_add(&pvec, pages[i]) == 0) 538 pagevec_release(&pvec); 539 } 540 pagevec_release(&pvec); 541 } 542 543 544 /* 545 * async writeback completion handler. 546 * 547 * If we get an error, set the mapping error bit, but not the individual 548 * page error bits. 549 */ 550 static void writepages_finish(struct ceph_osd_request *req, 551 struct ceph_msg *msg) 552 { 553 struct inode *inode = req->r_inode; 554 struct ceph_inode_info *ci = ceph_inode(inode); 555 unsigned wrote; 556 struct page *page; 557 int i; 558 struct ceph_snap_context *snapc = req->r_snapc; 559 struct address_space *mapping = inode->i_mapping; 560 int rc = req->r_result; 561 u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length); 562 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 563 long writeback_stat; 564 unsigned issued = ceph_caps_issued(ci); 565 566 BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); 567 if (rc >= 0) { 568 /* 569 * Assume we wrote the pages we originally sent. The 570 * osd might reply with fewer pages if our writeback 571 * raced with a truncation and was adjusted at the osd, 572 * so don't believe the reply. 573 */ 574 wrote = req->r_data.num_pages; 575 } else { 576 wrote = 0; 577 mapping_set_error(mapping, rc); 578 } 579 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", 580 inode, rc, bytes, wrote); 581 582 /* clean all pages */ 583 for (i = 0; i < req->r_data.num_pages; i++) { 584 page = req->r_data.pages[i]; 585 BUG_ON(!page); 586 WARN_ON(!PageUptodate(page)); 587 588 writeback_stat = 589 atomic_long_dec_return(&fsc->writeback_count); 590 if (writeback_stat < 591 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 592 clear_bdi_congested(&fsc->backing_dev_info, 593 BLK_RW_ASYNC); 594 595 ceph_put_snap_context(page_snap_context(page)); 596 page->private = 0; 597 ClearPagePrivate(page); 598 dout("unlocking %d %p\n", i, page); 599 end_page_writeback(page); 600 601 /* 602 * We lost the cache cap, need to truncate the page before 603 * it is unlocked, otherwise we'd truncate it later in the 604 * page truncation thread, possibly losing some data that 605 * raced its way in 606 */ 607 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) 608 generic_error_remove_page(inode->i_mapping, page); 609 610 unlock_page(page); 611 } 612 dout("%p wrote+cleaned %d pages\n", inode, wrote); 613 ceph_put_wrbuffer_cap_refs(ci, req->r_data.num_pages, snapc); 614 615 ceph_release_pages(req->r_data.pages, req->r_data.num_pages); 616 if (req->r_data.pages_from_pool) 617 mempool_free(req->r_data.pages, 618 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 619 else 620 kfree(req->r_data.pages); 621 ceph_osdc_put_request(req); 622 } 623 624 /* 625 * allocate a page vec, either directly, or if necessary, via a the 626 * mempool. we avoid the mempool if we can because req->r_data.num_pages 627 * may be less than the maximum write size. 628 */ 629 static void alloc_page_vec(struct ceph_fs_client *fsc, 630 struct ceph_osd_request *req) 631 { 632 req->r_data.pages = kmalloc(sizeof(struct page *) * req->r_data.num_pages, 633 GFP_NOFS); 634 if (!req->r_data.pages) { 635 req->r_data.pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); 636 req->r_data.pages_from_pool = 1; 637 WARN_ON(!req->r_data.pages); 638 } 639 } 640 641 /* 642 * initiate async writeback 643 */ 644 static int ceph_writepages_start(struct address_space *mapping, 645 struct writeback_control *wbc) 646 { 647 struct inode *inode = mapping->host; 648 struct ceph_inode_info *ci = ceph_inode(inode); 649 struct ceph_fs_client *fsc; 650 pgoff_t index, start, end; 651 int range_whole = 0; 652 int should_loop = 1; 653 pgoff_t max_pages = 0, max_pages_ever = 0; 654 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 655 struct pagevec pvec; 656 int done = 0; 657 int rc = 0; 658 unsigned wsize = 1 << inode->i_blkbits; 659 struct ceph_osd_request *req = NULL; 660 int do_sync; 661 u64 snap_size = 0; 662 663 /* 664 * Include a 'sync' in the OSD request if this is a data 665 * integrity write (e.g., O_SYNC write or fsync()), or if our 666 * cap is being revoked. 667 */ 668 do_sync = wbc->sync_mode == WB_SYNC_ALL; 669 if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) 670 do_sync = 1; 671 dout("writepages_start %p dosync=%d (mode=%s)\n", 672 inode, do_sync, 673 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 674 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 675 676 fsc = ceph_inode_to_client(inode); 677 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 678 pr_warning("writepage_start %p on forced umount\n", inode); 679 return -EIO; /* we're in a forced umount, don't write! */ 680 } 681 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 682 wsize = fsc->mount_options->wsize; 683 if (wsize < PAGE_CACHE_SIZE) 684 wsize = PAGE_CACHE_SIZE; 685 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 686 687 pagevec_init(&pvec, 0); 688 689 /* where to start/end? */ 690 if (wbc->range_cyclic) { 691 start = mapping->writeback_index; /* Start from prev offset */ 692 end = -1; 693 dout(" cyclic, start at %lu\n", start); 694 } else { 695 start = wbc->range_start >> PAGE_CACHE_SHIFT; 696 end = wbc->range_end >> PAGE_CACHE_SHIFT; 697 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 698 range_whole = 1; 699 should_loop = 0; 700 dout(" not cyclic, %lu to %lu\n", start, end); 701 } 702 index = start; 703 704 retry: 705 /* find oldest snap context with dirty data */ 706 ceph_put_snap_context(snapc); 707 snapc = get_oldest_context(inode, &snap_size); 708 if (!snapc) { 709 /* hmm, why does writepages get called when there 710 is no dirty data? */ 711 dout(" no snap context with dirty data?\n"); 712 goto out; 713 } 714 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 715 snapc, snapc->seq, snapc->num_snaps); 716 if (last_snapc && snapc != last_snapc) { 717 /* if we switched to a newer snapc, restart our scan at the 718 * start of the original file range. */ 719 dout(" snapc differs from last pass, restarting at %lu\n", 720 index); 721 index = start; 722 } 723 last_snapc = snapc; 724 725 while (!done && index <= end) { 726 unsigned i; 727 int first; 728 pgoff_t next; 729 int pvec_pages, locked_pages; 730 struct page *page; 731 int want; 732 u64 offset, len; 733 long writeback_stat; 734 735 next = 0; 736 locked_pages = 0; 737 max_pages = max_pages_ever; 738 739 get_more_pages: 740 first = -1; 741 want = min(end - index, 742 min((pgoff_t)PAGEVEC_SIZE, 743 max_pages - (pgoff_t)locked_pages) - 1) 744 + 1; 745 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 746 PAGECACHE_TAG_DIRTY, 747 want); 748 dout("pagevec_lookup_tag got %d\n", pvec_pages); 749 if (!pvec_pages && !locked_pages) 750 break; 751 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 752 page = pvec.pages[i]; 753 dout("? %p idx %lu\n", page, page->index); 754 if (locked_pages == 0) 755 lock_page(page); /* first page */ 756 else if (!trylock_page(page)) 757 break; 758 759 /* only dirty pages, or our accounting breaks */ 760 if (unlikely(!PageDirty(page)) || 761 unlikely(page->mapping != mapping)) { 762 dout("!dirty or !mapping %p\n", page); 763 unlock_page(page); 764 break; 765 } 766 if (!wbc->range_cyclic && page->index > end) { 767 dout("end of range %p\n", page); 768 done = 1; 769 unlock_page(page); 770 break; 771 } 772 if (next && (page->index != next)) { 773 dout("not consecutive %p\n", page); 774 unlock_page(page); 775 break; 776 } 777 if (wbc->sync_mode != WB_SYNC_NONE) { 778 dout("waiting on writeback %p\n", page); 779 wait_on_page_writeback(page); 780 } 781 if ((snap_size && page_offset(page) > snap_size) || 782 (!snap_size && 783 page_offset(page) > i_size_read(inode))) { 784 dout("%p page eof %llu\n", page, snap_size ? 785 snap_size : i_size_read(inode)); 786 done = 1; 787 unlock_page(page); 788 break; 789 } 790 if (PageWriteback(page)) { 791 dout("%p under writeback\n", page); 792 unlock_page(page); 793 break; 794 } 795 796 /* only if matching snap context */ 797 pgsnapc = page_snap_context(page); 798 if (pgsnapc->seq > snapc->seq) { 799 dout("page snapc %p %lld > oldest %p %lld\n", 800 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 801 unlock_page(page); 802 if (!locked_pages) 803 continue; /* keep looking for snap */ 804 break; 805 } 806 807 if (!clear_page_dirty_for_io(page)) { 808 dout("%p !clear_page_dirty_for_io\n", page); 809 unlock_page(page); 810 break; 811 } 812 813 /* ok */ 814 if (locked_pages == 0) { 815 /* prepare async write request */ 816 offset = (u64) page_offset(page); 817 len = wsize; 818 req = ceph_osdc_new_request(&fsc->client->osdc, 819 &ci->i_layout, 820 ceph_vino(inode), 821 offset, &len, 822 CEPH_OSD_OP_WRITE, 823 CEPH_OSD_FLAG_WRITE | 824 CEPH_OSD_FLAG_ONDISK, 825 snapc, do_sync, 826 ci->i_truncate_seq, 827 ci->i_truncate_size, 828 &inode->i_mtime, true); 829 830 if (IS_ERR(req)) { 831 rc = PTR_ERR(req); 832 unlock_page(page); 833 break; 834 } 835 836 req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; 837 req->r_data.num_pages = calc_pages_for(0, len); 838 req->r_data.alignment = 0; 839 max_pages = req->r_data.num_pages; 840 841 alloc_page_vec(fsc, req); 842 req->r_callback = writepages_finish; 843 req->r_inode = inode; 844 } 845 846 /* note position of first page in pvec */ 847 if (first < 0) 848 first = i; 849 dout("%p will write page %p idx %lu\n", 850 inode, page, page->index); 851 852 writeback_stat = 853 atomic_long_inc_return(&fsc->writeback_count); 854 if (writeback_stat > CONGESTION_ON_THRESH( 855 fsc->mount_options->congestion_kb)) { 856 set_bdi_congested(&fsc->backing_dev_info, 857 BLK_RW_ASYNC); 858 } 859 860 set_page_writeback(page); 861 req->r_data.pages[locked_pages] = page; 862 locked_pages++; 863 next = page->index + 1; 864 } 865 866 /* did we get anything? */ 867 if (!locked_pages) 868 goto release_pvec_pages; 869 if (i) { 870 int j; 871 BUG_ON(!locked_pages || first < 0); 872 873 if (pvec_pages && i == pvec_pages && 874 locked_pages < max_pages) { 875 dout("reached end pvec, trying for more\n"); 876 pagevec_reinit(&pvec); 877 goto get_more_pages; 878 } 879 880 /* shift unused pages over in the pvec... we 881 * will need to release them below. */ 882 for (j = i; j < pvec_pages; j++) { 883 dout(" pvec leftover page %p\n", 884 pvec.pages[j]); 885 pvec.pages[j-i+first] = pvec.pages[j]; 886 } 887 pvec.nr -= i-first; 888 } 889 890 /* submit the write */ 891 offset = req->r_data.pages[0]->index << PAGE_CACHE_SHIFT; 892 len = min((snap_size ? snap_size : i_size_read(inode)) - offset, 893 (u64)locked_pages << PAGE_CACHE_SHIFT); 894 dout("writepages got %d pages at %llu~%llu\n", 895 locked_pages, offset, len); 896 897 /* revise final length, page count */ 898 req->r_data.num_pages = locked_pages; 899 req->r_request_ops[0].extent.length = cpu_to_le64(len); 900 req->r_request_ops[0].payload_len = cpu_to_le32(len); 901 req->r_request->hdr.data_len = cpu_to_le32(len); 902 903 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 904 BUG_ON(rc); 905 req = NULL; 906 907 /* continue? */ 908 index = next; 909 wbc->nr_to_write -= locked_pages; 910 if (wbc->nr_to_write <= 0) 911 done = 1; 912 913 release_pvec_pages: 914 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 915 pvec.nr ? pvec.pages[0] : NULL); 916 pagevec_release(&pvec); 917 918 if (locked_pages && !done) 919 goto retry; 920 } 921 922 if (should_loop && !done) { 923 /* more to do; loop back to beginning of file */ 924 dout("writepages looping back to beginning of file\n"); 925 should_loop = 0; 926 index = 0; 927 goto retry; 928 } 929 930 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 931 mapping->writeback_index = index; 932 933 out: 934 if (req) 935 ceph_osdc_put_request(req); 936 ceph_put_snap_context(snapc); 937 dout("writepages done, rc = %d\n", rc); 938 return rc; 939 } 940 941 942 943 /* 944 * See if a given @snapc is either writeable, or already written. 945 */ 946 static int context_is_writeable_or_written(struct inode *inode, 947 struct ceph_snap_context *snapc) 948 { 949 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 950 int ret = !oldest || snapc->seq <= oldest->seq; 951 952 ceph_put_snap_context(oldest); 953 return ret; 954 } 955 956 /* 957 * We are only allowed to write into/dirty the page if the page is 958 * clean, or already dirty within the same snap context. 959 * 960 * called with page locked. 961 * return success with page locked, 962 * or any failure (incl -EAGAIN) with page unlocked. 963 */ 964 static int ceph_update_writeable_page(struct file *file, 965 loff_t pos, unsigned len, 966 struct page *page) 967 { 968 struct inode *inode = file_inode(file); 969 struct ceph_inode_info *ci = ceph_inode(inode); 970 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 971 loff_t page_off = pos & PAGE_CACHE_MASK; 972 int pos_in_page = pos & ~PAGE_CACHE_MASK; 973 int end_in_page = pos_in_page + len; 974 loff_t i_size; 975 int r; 976 struct ceph_snap_context *snapc, *oldest; 977 978 retry_locked: 979 /* writepages currently holds page lock, but if we change that later, */ 980 wait_on_page_writeback(page); 981 982 /* check snap context */ 983 BUG_ON(!ci->i_snap_realm); 984 down_read(&mdsc->snap_rwsem); 985 BUG_ON(!ci->i_snap_realm->cached_context); 986 snapc = page_snap_context(page); 987 if (snapc && snapc != ci->i_head_snapc) { 988 /* 989 * this page is already dirty in another (older) snap 990 * context! is it writeable now? 991 */ 992 oldest = get_oldest_context(inode, NULL); 993 up_read(&mdsc->snap_rwsem); 994 995 if (snapc->seq > oldest->seq) { 996 ceph_put_snap_context(oldest); 997 dout(" page %p snapc %p not current or oldest\n", 998 page, snapc); 999 /* 1000 * queue for writeback, and wait for snapc to 1001 * be writeable or written 1002 */ 1003 snapc = ceph_get_snap_context(snapc); 1004 unlock_page(page); 1005 ceph_queue_writeback(inode); 1006 r = wait_event_interruptible(ci->i_cap_wq, 1007 context_is_writeable_or_written(inode, snapc)); 1008 ceph_put_snap_context(snapc); 1009 if (r == -ERESTARTSYS) 1010 return r; 1011 return -EAGAIN; 1012 } 1013 ceph_put_snap_context(oldest); 1014 1015 /* yay, writeable, do it now (without dropping page lock) */ 1016 dout(" page %p snapc %p not current, but oldest\n", 1017 page, snapc); 1018 if (!clear_page_dirty_for_io(page)) 1019 goto retry_locked; 1020 r = writepage_nounlock(page, NULL); 1021 if (r < 0) 1022 goto fail_nosnap; 1023 goto retry_locked; 1024 } 1025 1026 if (PageUptodate(page)) { 1027 dout(" page %p already uptodate\n", page); 1028 return 0; 1029 } 1030 1031 /* full page? */ 1032 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1033 return 0; 1034 1035 /* past end of file? */ 1036 i_size = inode->i_size; /* caller holds i_mutex */ 1037 1038 if (i_size + len > inode->i_sb->s_maxbytes) { 1039 /* file is too big */ 1040 r = -EINVAL; 1041 goto fail; 1042 } 1043 1044 if (page_off >= i_size || 1045 (pos_in_page == 0 && (pos+len) >= i_size && 1046 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1047 dout(" zeroing %p 0 - %d and %d - %d\n", 1048 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1049 zero_user_segments(page, 1050 0, pos_in_page, 1051 end_in_page, PAGE_CACHE_SIZE); 1052 return 0; 1053 } 1054 1055 /* we need to read it. */ 1056 up_read(&mdsc->snap_rwsem); 1057 r = readpage_nounlock(file, page); 1058 if (r < 0) 1059 goto fail_nosnap; 1060 goto retry_locked; 1061 1062 fail: 1063 up_read(&mdsc->snap_rwsem); 1064 fail_nosnap: 1065 unlock_page(page); 1066 return r; 1067 } 1068 1069 /* 1070 * We are only allowed to write into/dirty the page if the page is 1071 * clean, or already dirty within the same snap context. 1072 */ 1073 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1074 loff_t pos, unsigned len, unsigned flags, 1075 struct page **pagep, void **fsdata) 1076 { 1077 struct inode *inode = file_inode(file); 1078 struct page *page; 1079 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1080 int r; 1081 1082 do { 1083 /* get a page */ 1084 page = grab_cache_page_write_begin(mapping, index, 0); 1085 if (!page) 1086 return -ENOMEM; 1087 *pagep = page; 1088 1089 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1090 inode, page, (int)pos, (int)len); 1091 1092 r = ceph_update_writeable_page(file, pos, len, page); 1093 } while (r == -EAGAIN); 1094 1095 return r; 1096 } 1097 1098 /* 1099 * we don't do anything in here that simple_write_end doesn't do 1100 * except adjust dirty page accounting and drop read lock on 1101 * mdsc->snap_rwsem. 1102 */ 1103 static int ceph_write_end(struct file *file, struct address_space *mapping, 1104 loff_t pos, unsigned len, unsigned copied, 1105 struct page *page, void *fsdata) 1106 { 1107 struct inode *inode = file_inode(file); 1108 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1109 struct ceph_mds_client *mdsc = fsc->mdsc; 1110 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1111 int check_cap = 0; 1112 1113 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1114 inode, page, (int)pos, (int)copied, (int)len); 1115 1116 /* zero the stale part of the page if we did a short copy */ 1117 if (copied < len) 1118 zero_user_segment(page, from+copied, len); 1119 1120 /* did file size increase? */ 1121 /* (no need for i_size_read(); we caller holds i_mutex */ 1122 if (pos+copied > inode->i_size) 1123 check_cap = ceph_inode_set_size(inode, pos+copied); 1124 1125 if (!PageUptodate(page)) 1126 SetPageUptodate(page); 1127 1128 set_page_dirty(page); 1129 1130 unlock_page(page); 1131 up_read(&mdsc->snap_rwsem); 1132 page_cache_release(page); 1133 1134 if (check_cap) 1135 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1136 1137 return copied; 1138 } 1139 1140 /* 1141 * we set .direct_IO to indicate direct io is supported, but since we 1142 * intercept O_DIRECT reads and writes early, this function should 1143 * never get called. 1144 */ 1145 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, 1146 const struct iovec *iov, 1147 loff_t pos, unsigned long nr_segs) 1148 { 1149 WARN_ON(1); 1150 return -EINVAL; 1151 } 1152 1153 const struct address_space_operations ceph_aops = { 1154 .readpage = ceph_readpage, 1155 .readpages = ceph_readpages, 1156 .writepage = ceph_writepage, 1157 .writepages = ceph_writepages_start, 1158 .write_begin = ceph_write_begin, 1159 .write_end = ceph_write_end, 1160 .set_page_dirty = ceph_set_page_dirty, 1161 .invalidatepage = ceph_invalidatepage, 1162 .releasepage = ceph_releasepage, 1163 .direct_IO = ceph_direct_io, 1164 }; 1165 1166 1167 /* 1168 * vm ops 1169 */ 1170 1171 /* 1172 * Reuse write_begin here for simplicity. 1173 */ 1174 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1175 { 1176 struct inode *inode = file_inode(vma->vm_file); 1177 struct page *page = vmf->page; 1178 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1179 loff_t off = page_offset(page); 1180 loff_t size, len; 1181 int ret; 1182 1183 /* Update time before taking page lock */ 1184 file_update_time(vma->vm_file); 1185 1186 size = i_size_read(inode); 1187 if (off + PAGE_CACHE_SIZE <= size) 1188 len = PAGE_CACHE_SIZE; 1189 else 1190 len = size & ~PAGE_CACHE_MASK; 1191 1192 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode, 1193 off, len, page, page->index); 1194 1195 lock_page(page); 1196 1197 ret = VM_FAULT_NOPAGE; 1198 if ((off > size) || 1199 (page->mapping != inode->i_mapping)) 1200 goto out; 1201 1202 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1203 if (ret == 0) { 1204 /* success. we'll keep the page locked. */ 1205 set_page_dirty(page); 1206 up_read(&mdsc->snap_rwsem); 1207 ret = VM_FAULT_LOCKED; 1208 } else { 1209 if (ret == -ENOMEM) 1210 ret = VM_FAULT_OOM; 1211 else 1212 ret = VM_FAULT_SIGBUS; 1213 } 1214 out: 1215 dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret); 1216 if (ret != VM_FAULT_LOCKED) 1217 unlock_page(page); 1218 return ret; 1219 } 1220 1221 static struct vm_operations_struct ceph_vmops = { 1222 .fault = filemap_fault, 1223 .page_mkwrite = ceph_page_mkwrite, 1224 .remap_pages = generic_file_remap_pages, 1225 }; 1226 1227 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1228 { 1229 struct address_space *mapping = file->f_mapping; 1230 1231 if (!mapping->a_ops->readpage) 1232 return -ENOEXEC; 1233 file_accessed(file); 1234 vma->vm_ops = &ceph_vmops; 1235 return 0; 1236 } 1237