1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include <linux/ceph/osd_client.h> 15 16 /* 17 * Ceph address space ops. 18 * 19 * There are a few funny things going on here. 20 * 21 * The page->private field is used to reference a struct 22 * ceph_snap_context for _every_ dirty page. This indicates which 23 * snapshot the page was logically dirtied in, and thus which snap 24 * context needs to be associated with the osd write during writeback. 25 * 26 * Similarly, struct ceph_inode_info maintains a set of counters to 27 * count dirty pages on the inode. In the absence of snapshots, 28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 29 * 30 * When a snapshot is taken (that is, when the client receives 31 * notification that a snapshot was taken), each inode with caps and 32 * with dirty pages (dirty pages implies there is a cap) gets a new 33 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 34 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 35 * moved to capsnap->dirty. (Unless a sync write is currently in 36 * progress. In that case, the capsnap is said to be "pending", new 37 * writes cannot start, and the capsnap isn't "finalized" until the 38 * write completes (or fails) and a final size/mtime for the inode for 39 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 40 * 41 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 42 * we look for the first capsnap in i_cap_snaps and write out pages in 43 * that snap context _only_. Then we move on to the next capsnap, 44 * eventually reaching the "live" or "head" context (i.e., pages that 45 * are not yet snapped) and are writing the most recently dirtied 46 * pages. 47 * 48 * Invalidate and so forth must take care to ensure the dirty page 49 * accounting is preserved. 50 */ 51 52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 53 #define CONGESTION_OFF_THRESH(congestion_kb) \ 54 (CONGESTION_ON_THRESH(congestion_kb) - \ 55 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 56 57 static inline struct ceph_snap_context *page_snap_context(struct page *page) 58 { 59 if (PagePrivate(page)) 60 return (void *)page->private; 61 return NULL; 62 } 63 64 /* 65 * Dirty a page. Optimistically adjust accounting, on the assumption 66 * that we won't race with invalidate. If we do, readjust. 67 */ 68 static int ceph_set_page_dirty(struct page *page) 69 { 70 struct address_space *mapping = page->mapping; 71 struct inode *inode; 72 struct ceph_inode_info *ci; 73 int undo = 0; 74 struct ceph_snap_context *snapc; 75 76 if (unlikely(!mapping)) 77 return !TestSetPageDirty(page); 78 79 if (TestSetPageDirty(page)) { 80 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 81 mapping->host, page, page->index); 82 return 0; 83 } 84 85 inode = mapping->host; 86 ci = ceph_inode(inode); 87 88 /* 89 * Note that we're grabbing a snapc ref here without holding 90 * any locks! 91 */ 92 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 93 94 /* dirty the head */ 95 spin_lock(&ci->i_ceph_lock); 96 if (ci->i_head_snapc == NULL) 97 ci->i_head_snapc = ceph_get_snap_context(snapc); 98 ++ci->i_wrbuffer_ref_head; 99 if (ci->i_wrbuffer_ref == 0) 100 ihold(inode); 101 ++ci->i_wrbuffer_ref; 102 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 103 "snapc %p seq %lld (%d snaps)\n", 104 mapping->host, page, page->index, 105 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 106 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 107 snapc, snapc->seq, snapc->num_snaps); 108 spin_unlock(&ci->i_ceph_lock); 109 110 /* now adjust page */ 111 spin_lock_irq(&mapping->tree_lock); 112 if (page->mapping) { /* Race with truncate? */ 113 WARN_ON_ONCE(!PageUptodate(page)); 114 account_page_dirtied(page, page->mapping); 115 radix_tree_tag_set(&mapping->page_tree, 116 page_index(page), PAGECACHE_TAG_DIRTY); 117 118 /* 119 * Reference snap context in page->private. Also set 120 * PagePrivate so that we get invalidatepage callback. 121 */ 122 page->private = (unsigned long)snapc; 123 SetPagePrivate(page); 124 } else { 125 dout("ANON set_page_dirty %p (raced truncate?)\n", page); 126 undo = 1; 127 } 128 129 spin_unlock_irq(&mapping->tree_lock); 130 131 if (undo) 132 /* whoops, we failed to dirty the page */ 133 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 134 135 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 136 137 BUG_ON(!PageDirty(page)); 138 return 1; 139 } 140 141 /* 142 * If we are truncating the full page (i.e. offset == 0), adjust the 143 * dirty page counters appropriately. Only called if there is private 144 * data on the page. 145 */ 146 static void ceph_invalidatepage(struct page *page, unsigned long offset) 147 { 148 struct inode *inode; 149 struct ceph_inode_info *ci; 150 struct ceph_snap_context *snapc = page_snap_context(page); 151 152 BUG_ON(!PageLocked(page)); 153 BUG_ON(!PagePrivate(page)); 154 BUG_ON(!page->mapping); 155 156 inode = page->mapping->host; 157 158 /* 159 * We can get non-dirty pages here due to races between 160 * set_page_dirty and truncate_complete_page; just spit out a 161 * warning, in case we end up with accounting problems later. 162 */ 163 if (!PageDirty(page)) 164 pr_err("%p invalidatepage %p page not dirty\n", inode, page); 165 166 if (offset == 0) 167 ClearPageChecked(page); 168 169 ci = ceph_inode(inode); 170 if (offset == 0) { 171 dout("%p invalidatepage %p idx %lu full dirty page %lu\n", 172 inode, page, page->index, offset); 173 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 174 ceph_put_snap_context(snapc); 175 page->private = 0; 176 ClearPagePrivate(page); 177 } else { 178 dout("%p invalidatepage %p idx %lu partial dirty page\n", 179 inode, page, page->index); 180 } 181 } 182 183 /* just a sanity check */ 184 static int ceph_releasepage(struct page *page, gfp_t g) 185 { 186 struct inode *inode = page->mapping ? page->mapping->host : NULL; 187 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 188 WARN_ON(PageDirty(page)); 189 WARN_ON(PagePrivate(page)); 190 return 0; 191 } 192 193 /* 194 * read a single page, without unlocking it. 195 */ 196 static int readpage_nounlock(struct file *filp, struct page *page) 197 { 198 struct inode *inode = file_inode(filp); 199 struct ceph_inode_info *ci = ceph_inode(inode); 200 struct ceph_osd_client *osdc = 201 &ceph_inode_to_client(inode)->client->osdc; 202 int err = 0; 203 u64 len = PAGE_CACHE_SIZE; 204 205 dout("readpage inode %p file %p page %p index %lu\n", 206 inode, filp, page, page->index); 207 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 208 (u64) page_offset(page), &len, 209 ci->i_truncate_seq, ci->i_truncate_size, 210 &page, 1, 0); 211 if (err == -ENOENT) 212 err = 0; 213 if (err < 0) { 214 SetPageError(page); 215 goto out; 216 } else if (err < PAGE_CACHE_SIZE) { 217 /* zero fill remainder of page */ 218 zero_user_segment(page, err, PAGE_CACHE_SIZE); 219 } 220 SetPageUptodate(page); 221 222 out: 223 return err < 0 ? err : 0; 224 } 225 226 static int ceph_readpage(struct file *filp, struct page *page) 227 { 228 int r = readpage_nounlock(filp, page); 229 unlock_page(page); 230 return r; 231 } 232 233 /* 234 * Finish an async read(ahead) op. 235 */ 236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 237 { 238 struct inode *inode = req->r_inode; 239 int rc = req->r_result; 240 int bytes = le32_to_cpu(msg->hdr.data_len); 241 int i; 242 243 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 244 245 /* unlock all pages, zeroing any data we didn't read */ 246 BUG_ON(req->r_data_in.type != CEPH_OSD_DATA_TYPE_PAGES); 247 for (i = 0; i < req->r_data_in.num_pages; i++) { 248 struct page *page = req->r_data_in.pages[i]; 249 250 if (bytes < (int)PAGE_CACHE_SIZE) { 251 /* zero (remainder of) page */ 252 int s = bytes < 0 ? 0 : bytes; 253 zero_user_segment(page, s, PAGE_CACHE_SIZE); 254 } 255 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 256 page->index); 257 flush_dcache_page(page); 258 SetPageUptodate(page); 259 unlock_page(page); 260 page_cache_release(page); 261 bytes -= PAGE_CACHE_SIZE; 262 } 263 kfree(req->r_data_in.pages); 264 } 265 266 static void ceph_unlock_page_vector(struct page **pages, int num_pages) 267 { 268 int i; 269 270 for (i = 0; i < num_pages; i++) 271 unlock_page(pages[i]); 272 } 273 274 /* 275 * start an async read(ahead) operation. return nr_pages we submitted 276 * a read for on success, or negative error code. 277 */ 278 static int start_read(struct inode *inode, struct list_head *page_list, int max) 279 { 280 struct ceph_osd_client *osdc = 281 &ceph_inode_to_client(inode)->client->osdc; 282 struct ceph_inode_info *ci = ceph_inode(inode); 283 struct page *page = list_entry(page_list->prev, struct page, lru); 284 struct ceph_osd_request *req; 285 u64 off; 286 u64 len; 287 int i; 288 struct page **pages; 289 pgoff_t next_index; 290 int nr_pages = 0; 291 int ret; 292 293 off = (u64) page_offset(page); 294 295 /* count pages */ 296 next_index = page->index; 297 list_for_each_entry_reverse(page, page_list, lru) { 298 if (page->index != next_index) 299 break; 300 nr_pages++; 301 next_index++; 302 if (max && nr_pages == max) 303 break; 304 } 305 len = nr_pages << PAGE_CACHE_SHIFT; 306 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 307 off, len); 308 309 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), 310 off, &len, 311 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 312 NULL, 0, 313 ci->i_truncate_seq, ci->i_truncate_size, 314 NULL, false); 315 if (IS_ERR(req)) 316 return PTR_ERR(req); 317 318 /* build page vector */ 319 nr_pages = calc_pages_for(0, len); 320 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); 321 ret = -ENOMEM; 322 if (!pages) 323 goto out; 324 for (i = 0; i < nr_pages; ++i) { 325 page = list_entry(page_list->prev, struct page, lru); 326 BUG_ON(PageLocked(page)); 327 list_del(&page->lru); 328 329 dout("start_read %p adding %p idx %lu\n", inode, page, 330 page->index); 331 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 332 GFP_NOFS)) { 333 page_cache_release(page); 334 dout("start_read %p add_to_page_cache failed %p\n", 335 inode, page); 336 nr_pages = i; 337 goto out_pages; 338 } 339 pages[i] = page; 340 } 341 req->r_data_in.type = CEPH_OSD_DATA_TYPE_PAGES; 342 req->r_data_in.pages = pages; 343 req->r_data_in.num_pages = nr_pages; 344 req->r_data_in.alignment = 0; 345 req->r_callback = finish_read; 346 req->r_inode = inode; 347 348 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 349 ret = ceph_osdc_start_request(osdc, req, false); 350 if (ret < 0) 351 goto out_pages; 352 ceph_osdc_put_request(req); 353 return nr_pages; 354 355 out_pages: 356 ceph_unlock_page_vector(pages, nr_pages); 357 ceph_release_page_vector(pages, nr_pages); 358 out: 359 ceph_osdc_put_request(req); 360 return ret; 361 } 362 363 364 /* 365 * Read multiple pages. Leave pages we don't read + unlock in page_list; 366 * the caller (VM) cleans them up. 367 */ 368 static int ceph_readpages(struct file *file, struct address_space *mapping, 369 struct list_head *page_list, unsigned nr_pages) 370 { 371 struct inode *inode = file_inode(file); 372 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 373 int rc = 0; 374 int max = 0; 375 376 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 377 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 378 >> PAGE_SHIFT; 379 380 dout("readpages %p file %p nr_pages %d max %d\n", inode, 381 file, nr_pages, 382 max); 383 while (!list_empty(page_list)) { 384 rc = start_read(inode, page_list, max); 385 if (rc < 0) 386 goto out; 387 BUG_ON(rc == 0); 388 } 389 out: 390 dout("readpages %p file %p ret %d\n", inode, file, rc); 391 return rc; 392 } 393 394 /* 395 * Get ref for the oldest snapc for an inode with dirty data... that is, the 396 * only snap context we are allowed to write back. 397 */ 398 static struct ceph_snap_context *get_oldest_context(struct inode *inode, 399 u64 *snap_size) 400 { 401 struct ceph_inode_info *ci = ceph_inode(inode); 402 struct ceph_snap_context *snapc = NULL; 403 struct ceph_cap_snap *capsnap = NULL; 404 405 spin_lock(&ci->i_ceph_lock); 406 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 407 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 408 capsnap->context, capsnap->dirty_pages); 409 if (capsnap->dirty_pages) { 410 snapc = ceph_get_snap_context(capsnap->context); 411 if (snap_size) 412 *snap_size = capsnap->size; 413 break; 414 } 415 } 416 if (!snapc && ci->i_wrbuffer_ref_head) { 417 snapc = ceph_get_snap_context(ci->i_head_snapc); 418 dout(" head snapc %p has %d dirty pages\n", 419 snapc, ci->i_wrbuffer_ref_head); 420 } 421 spin_unlock(&ci->i_ceph_lock); 422 return snapc; 423 } 424 425 /* 426 * Write a single page, but leave the page locked. 427 * 428 * If we get a write error, set the page error bit, but still adjust the 429 * dirty page accounting (i.e., page is no longer dirty). 430 */ 431 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 432 { 433 struct inode *inode; 434 struct ceph_inode_info *ci; 435 struct ceph_fs_client *fsc; 436 struct ceph_osd_client *osdc; 437 loff_t page_off = page_offset(page); 438 int len = PAGE_CACHE_SIZE; 439 loff_t i_size; 440 int err = 0; 441 struct ceph_snap_context *snapc, *oldest; 442 u64 snap_size = 0; 443 long writeback_stat; 444 445 dout("writepage %p idx %lu\n", page, page->index); 446 447 if (!page->mapping || !page->mapping->host) { 448 dout("writepage %p - no mapping\n", page); 449 return -EFAULT; 450 } 451 inode = page->mapping->host; 452 ci = ceph_inode(inode); 453 fsc = ceph_inode_to_client(inode); 454 osdc = &fsc->client->osdc; 455 456 /* verify this is a writeable snap context */ 457 snapc = page_snap_context(page); 458 if (snapc == NULL) { 459 dout("writepage %p page %p not dirty?\n", inode, page); 460 goto out; 461 } 462 oldest = get_oldest_context(inode, &snap_size); 463 if (snapc->seq > oldest->seq) { 464 dout("writepage %p page %p snapc %p not writeable - noop\n", 465 inode, page, snapc); 466 /* we should only noop if called by kswapd */ 467 WARN_ON((current->flags & PF_MEMALLOC) == 0); 468 ceph_put_snap_context(oldest); 469 goto out; 470 } 471 ceph_put_snap_context(oldest); 472 473 /* is this a partial page at end of file? */ 474 if (snap_size) 475 i_size = snap_size; 476 else 477 i_size = i_size_read(inode); 478 if (i_size < page_off + len) 479 len = i_size - page_off; 480 481 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 482 inode, page, page->index, page_off, len, snapc); 483 484 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 485 if (writeback_stat > 486 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 487 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 488 489 set_page_writeback(page); 490 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 491 &ci->i_layout, snapc, 492 page_off, len, 493 ci->i_truncate_seq, ci->i_truncate_size, 494 &inode->i_mtime, &page, 1); 495 if (err < 0) { 496 dout("writepage setting page/mapping error %d %p\n", err, page); 497 SetPageError(page); 498 mapping_set_error(&inode->i_data, err); 499 if (wbc) 500 wbc->pages_skipped++; 501 } else { 502 dout("writepage cleaned page %p\n", page); 503 err = 0; /* vfs expects us to return 0 */ 504 } 505 page->private = 0; 506 ClearPagePrivate(page); 507 end_page_writeback(page); 508 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 509 ceph_put_snap_context(snapc); /* page's reference */ 510 out: 511 return err; 512 } 513 514 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 515 { 516 int err; 517 struct inode *inode = page->mapping->host; 518 BUG_ON(!inode); 519 ihold(inode); 520 err = writepage_nounlock(page, wbc); 521 unlock_page(page); 522 iput(inode); 523 return err; 524 } 525 526 527 /* 528 * lame release_pages helper. release_pages() isn't exported to 529 * modules. 530 */ 531 static void ceph_release_pages(struct page **pages, int num) 532 { 533 struct pagevec pvec; 534 int i; 535 536 pagevec_init(&pvec, 0); 537 for (i = 0; i < num; i++) { 538 if (pagevec_add(&pvec, pages[i]) == 0) 539 pagevec_release(&pvec); 540 } 541 pagevec_release(&pvec); 542 } 543 544 545 /* 546 * async writeback completion handler. 547 * 548 * If we get an error, set the mapping error bit, but not the individual 549 * page error bits. 550 */ 551 static void writepages_finish(struct ceph_osd_request *req, 552 struct ceph_msg *msg) 553 { 554 struct inode *inode = req->r_inode; 555 struct ceph_inode_info *ci = ceph_inode(inode); 556 unsigned wrote; 557 struct page *page; 558 int i; 559 struct ceph_snap_context *snapc = req->r_snapc; 560 struct address_space *mapping = inode->i_mapping; 561 int rc = req->r_result; 562 u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length); 563 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 564 long writeback_stat; 565 unsigned issued = ceph_caps_issued(ci); 566 567 BUG_ON(req->r_data_out.type != CEPH_OSD_DATA_TYPE_PAGES); 568 if (rc >= 0) { 569 /* 570 * Assume we wrote the pages we originally sent. The 571 * osd might reply with fewer pages if our writeback 572 * raced with a truncation and was adjusted at the osd, 573 * so don't believe the reply. 574 */ 575 wrote = req->r_data_out.num_pages; 576 } else { 577 wrote = 0; 578 mapping_set_error(mapping, rc); 579 } 580 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", 581 inode, rc, bytes, wrote); 582 583 /* clean all pages */ 584 for (i = 0; i < req->r_data_out.num_pages; i++) { 585 page = req->r_data_out.pages[i]; 586 BUG_ON(!page); 587 WARN_ON(!PageUptodate(page)); 588 589 writeback_stat = 590 atomic_long_dec_return(&fsc->writeback_count); 591 if (writeback_stat < 592 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 593 clear_bdi_congested(&fsc->backing_dev_info, 594 BLK_RW_ASYNC); 595 596 ceph_put_snap_context(page_snap_context(page)); 597 page->private = 0; 598 ClearPagePrivate(page); 599 dout("unlocking %d %p\n", i, page); 600 end_page_writeback(page); 601 602 /* 603 * We lost the cache cap, need to truncate the page before 604 * it is unlocked, otherwise we'd truncate it later in the 605 * page truncation thread, possibly losing some data that 606 * raced its way in 607 */ 608 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) 609 generic_error_remove_page(inode->i_mapping, page); 610 611 unlock_page(page); 612 } 613 dout("%p wrote+cleaned %d pages\n", inode, wrote); 614 ceph_put_wrbuffer_cap_refs(ci, req->r_data_out.num_pages, snapc); 615 616 ceph_release_pages(req->r_data_out.pages, req->r_data_out.num_pages); 617 if (req->r_data_out.pages_from_pool) 618 mempool_free(req->r_data_out.pages, 619 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 620 else 621 kfree(req->r_data_out.pages); 622 ceph_osdc_put_request(req); 623 } 624 625 /* 626 * allocate a page vec, either directly, or if necessary, via a the 627 * mempool. we avoid the mempool if we can because req->r_data_out.num_pages 628 * may be less than the maximum write size. 629 */ 630 static void alloc_page_vec(struct ceph_fs_client *fsc, 631 struct ceph_osd_request *req) 632 { 633 size_t size; 634 635 size = sizeof (struct page *) * req->r_data_out.num_pages; 636 req->r_data_out.pages = kmalloc(size, GFP_NOFS); 637 if (!req->r_data_out.pages) { 638 req->r_data_out.pages = mempool_alloc(fsc->wb_pagevec_pool, 639 GFP_NOFS); 640 req->r_data_out.pages_from_pool = 1; 641 WARN_ON(!req->r_data_out.pages); 642 } 643 } 644 645 /* 646 * initiate async writeback 647 */ 648 static int ceph_writepages_start(struct address_space *mapping, 649 struct writeback_control *wbc) 650 { 651 struct inode *inode = mapping->host; 652 struct ceph_inode_info *ci = ceph_inode(inode); 653 struct ceph_fs_client *fsc; 654 pgoff_t index, start, end; 655 int range_whole = 0; 656 int should_loop = 1; 657 pgoff_t max_pages = 0, max_pages_ever = 0; 658 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 659 struct pagevec pvec; 660 int done = 0; 661 int rc = 0; 662 unsigned wsize = 1 << inode->i_blkbits; 663 struct ceph_osd_request *req = NULL; 664 int do_sync; 665 u64 snap_size = 0; 666 667 /* 668 * Include a 'sync' in the OSD request if this is a data 669 * integrity write (e.g., O_SYNC write or fsync()), or if our 670 * cap is being revoked. 671 */ 672 do_sync = wbc->sync_mode == WB_SYNC_ALL; 673 if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) 674 do_sync = 1; 675 dout("writepages_start %p dosync=%d (mode=%s)\n", 676 inode, do_sync, 677 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 678 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 679 680 fsc = ceph_inode_to_client(inode); 681 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 682 pr_warning("writepage_start %p on forced umount\n", inode); 683 return -EIO; /* we're in a forced umount, don't write! */ 684 } 685 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 686 wsize = fsc->mount_options->wsize; 687 if (wsize < PAGE_CACHE_SIZE) 688 wsize = PAGE_CACHE_SIZE; 689 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 690 691 pagevec_init(&pvec, 0); 692 693 /* where to start/end? */ 694 if (wbc->range_cyclic) { 695 start = mapping->writeback_index; /* Start from prev offset */ 696 end = -1; 697 dout(" cyclic, start at %lu\n", start); 698 } else { 699 start = wbc->range_start >> PAGE_CACHE_SHIFT; 700 end = wbc->range_end >> PAGE_CACHE_SHIFT; 701 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 702 range_whole = 1; 703 should_loop = 0; 704 dout(" not cyclic, %lu to %lu\n", start, end); 705 } 706 index = start; 707 708 retry: 709 /* find oldest snap context with dirty data */ 710 ceph_put_snap_context(snapc); 711 snapc = get_oldest_context(inode, &snap_size); 712 if (!snapc) { 713 /* hmm, why does writepages get called when there 714 is no dirty data? */ 715 dout(" no snap context with dirty data?\n"); 716 goto out; 717 } 718 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 719 snapc, snapc->seq, snapc->num_snaps); 720 if (last_snapc && snapc != last_snapc) { 721 /* if we switched to a newer snapc, restart our scan at the 722 * start of the original file range. */ 723 dout(" snapc differs from last pass, restarting at %lu\n", 724 index); 725 index = start; 726 } 727 last_snapc = snapc; 728 729 while (!done && index <= end) { 730 unsigned i; 731 int first; 732 pgoff_t next; 733 int pvec_pages, locked_pages; 734 struct page *page; 735 int want; 736 u64 offset, len; 737 long writeback_stat; 738 739 next = 0; 740 locked_pages = 0; 741 max_pages = max_pages_ever; 742 743 get_more_pages: 744 first = -1; 745 want = min(end - index, 746 min((pgoff_t)PAGEVEC_SIZE, 747 max_pages - (pgoff_t)locked_pages) - 1) 748 + 1; 749 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 750 PAGECACHE_TAG_DIRTY, 751 want); 752 dout("pagevec_lookup_tag got %d\n", pvec_pages); 753 if (!pvec_pages && !locked_pages) 754 break; 755 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 756 page = pvec.pages[i]; 757 dout("? %p idx %lu\n", page, page->index); 758 if (locked_pages == 0) 759 lock_page(page); /* first page */ 760 else if (!trylock_page(page)) 761 break; 762 763 /* only dirty pages, or our accounting breaks */ 764 if (unlikely(!PageDirty(page)) || 765 unlikely(page->mapping != mapping)) { 766 dout("!dirty or !mapping %p\n", page); 767 unlock_page(page); 768 break; 769 } 770 if (!wbc->range_cyclic && page->index > end) { 771 dout("end of range %p\n", page); 772 done = 1; 773 unlock_page(page); 774 break; 775 } 776 if (next && (page->index != next)) { 777 dout("not consecutive %p\n", page); 778 unlock_page(page); 779 break; 780 } 781 if (wbc->sync_mode != WB_SYNC_NONE) { 782 dout("waiting on writeback %p\n", page); 783 wait_on_page_writeback(page); 784 } 785 if ((snap_size && page_offset(page) > snap_size) || 786 (!snap_size && 787 page_offset(page) > i_size_read(inode))) { 788 dout("%p page eof %llu\n", page, snap_size ? 789 snap_size : i_size_read(inode)); 790 done = 1; 791 unlock_page(page); 792 break; 793 } 794 if (PageWriteback(page)) { 795 dout("%p under writeback\n", page); 796 unlock_page(page); 797 break; 798 } 799 800 /* only if matching snap context */ 801 pgsnapc = page_snap_context(page); 802 if (pgsnapc->seq > snapc->seq) { 803 dout("page snapc %p %lld > oldest %p %lld\n", 804 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 805 unlock_page(page); 806 if (!locked_pages) 807 continue; /* keep looking for snap */ 808 break; 809 } 810 811 if (!clear_page_dirty_for_io(page)) { 812 dout("%p !clear_page_dirty_for_io\n", page); 813 unlock_page(page); 814 break; 815 } 816 817 /* ok */ 818 if (locked_pages == 0) { 819 /* prepare async write request */ 820 offset = (u64) page_offset(page); 821 len = wsize; 822 req = ceph_osdc_new_request(&fsc->client->osdc, 823 &ci->i_layout, 824 ceph_vino(inode), 825 offset, &len, 826 CEPH_OSD_OP_WRITE, 827 CEPH_OSD_FLAG_WRITE | 828 CEPH_OSD_FLAG_ONDISK, 829 snapc, do_sync, 830 ci->i_truncate_seq, 831 ci->i_truncate_size, 832 &inode->i_mtime, true); 833 834 if (IS_ERR(req)) { 835 rc = PTR_ERR(req); 836 unlock_page(page); 837 break; 838 } 839 840 req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; 841 req->r_data_out.num_pages = 842 calc_pages_for(0, len); 843 req->r_data_out.alignment = 0; 844 max_pages = req->r_data_out.num_pages; 845 846 alloc_page_vec(fsc, req); 847 req->r_callback = writepages_finish; 848 req->r_inode = inode; 849 } 850 851 /* note position of first page in pvec */ 852 if (first < 0) 853 first = i; 854 dout("%p will write page %p idx %lu\n", 855 inode, page, page->index); 856 857 writeback_stat = 858 atomic_long_inc_return(&fsc->writeback_count); 859 if (writeback_stat > CONGESTION_ON_THRESH( 860 fsc->mount_options->congestion_kb)) { 861 set_bdi_congested(&fsc->backing_dev_info, 862 BLK_RW_ASYNC); 863 } 864 865 set_page_writeback(page); 866 req->r_data_out.pages[locked_pages] = page; 867 locked_pages++; 868 next = page->index + 1; 869 } 870 871 /* did we get anything? */ 872 if (!locked_pages) 873 goto release_pvec_pages; 874 if (i) { 875 int j; 876 BUG_ON(!locked_pages || first < 0); 877 878 if (pvec_pages && i == pvec_pages && 879 locked_pages < max_pages) { 880 dout("reached end pvec, trying for more\n"); 881 pagevec_reinit(&pvec); 882 goto get_more_pages; 883 } 884 885 /* shift unused pages over in the pvec... we 886 * will need to release them below. */ 887 for (j = i; j < pvec_pages; j++) { 888 dout(" pvec leftover page %p\n", 889 pvec.pages[j]); 890 pvec.pages[j-i+first] = pvec.pages[j]; 891 } 892 pvec.nr -= i-first; 893 } 894 895 /* submit the write */ 896 offset = req->r_data_out.pages[0]->index << PAGE_CACHE_SHIFT; 897 len = min((snap_size ? snap_size : i_size_read(inode)) - offset, 898 (u64)locked_pages << PAGE_CACHE_SHIFT); 899 dout("writepages got %d pages at %llu~%llu\n", 900 locked_pages, offset, len); 901 902 /* revise final length, page count */ 903 req->r_data_out.num_pages = locked_pages; 904 req->r_request_ops[0].extent.length = cpu_to_le64(len); 905 req->r_request_ops[0].payload_len = cpu_to_le32(len); 906 req->r_request->hdr.data_len = cpu_to_le32(len); 907 908 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 909 BUG_ON(rc); 910 req = NULL; 911 912 /* continue? */ 913 index = next; 914 wbc->nr_to_write -= locked_pages; 915 if (wbc->nr_to_write <= 0) 916 done = 1; 917 918 release_pvec_pages: 919 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 920 pvec.nr ? pvec.pages[0] : NULL); 921 pagevec_release(&pvec); 922 923 if (locked_pages && !done) 924 goto retry; 925 } 926 927 if (should_loop && !done) { 928 /* more to do; loop back to beginning of file */ 929 dout("writepages looping back to beginning of file\n"); 930 should_loop = 0; 931 index = 0; 932 goto retry; 933 } 934 935 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 936 mapping->writeback_index = index; 937 938 out: 939 if (req) 940 ceph_osdc_put_request(req); 941 ceph_put_snap_context(snapc); 942 dout("writepages done, rc = %d\n", rc); 943 return rc; 944 } 945 946 947 948 /* 949 * See if a given @snapc is either writeable, or already written. 950 */ 951 static int context_is_writeable_or_written(struct inode *inode, 952 struct ceph_snap_context *snapc) 953 { 954 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 955 int ret = !oldest || snapc->seq <= oldest->seq; 956 957 ceph_put_snap_context(oldest); 958 return ret; 959 } 960 961 /* 962 * We are only allowed to write into/dirty the page if the page is 963 * clean, or already dirty within the same snap context. 964 * 965 * called with page locked. 966 * return success with page locked, 967 * or any failure (incl -EAGAIN) with page unlocked. 968 */ 969 static int ceph_update_writeable_page(struct file *file, 970 loff_t pos, unsigned len, 971 struct page *page) 972 { 973 struct inode *inode = file_inode(file); 974 struct ceph_inode_info *ci = ceph_inode(inode); 975 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 976 loff_t page_off = pos & PAGE_CACHE_MASK; 977 int pos_in_page = pos & ~PAGE_CACHE_MASK; 978 int end_in_page = pos_in_page + len; 979 loff_t i_size; 980 int r; 981 struct ceph_snap_context *snapc, *oldest; 982 983 retry_locked: 984 /* writepages currently holds page lock, but if we change that later, */ 985 wait_on_page_writeback(page); 986 987 /* check snap context */ 988 BUG_ON(!ci->i_snap_realm); 989 down_read(&mdsc->snap_rwsem); 990 BUG_ON(!ci->i_snap_realm->cached_context); 991 snapc = page_snap_context(page); 992 if (snapc && snapc != ci->i_head_snapc) { 993 /* 994 * this page is already dirty in another (older) snap 995 * context! is it writeable now? 996 */ 997 oldest = get_oldest_context(inode, NULL); 998 up_read(&mdsc->snap_rwsem); 999 1000 if (snapc->seq > oldest->seq) { 1001 ceph_put_snap_context(oldest); 1002 dout(" page %p snapc %p not current or oldest\n", 1003 page, snapc); 1004 /* 1005 * queue for writeback, and wait for snapc to 1006 * be writeable or written 1007 */ 1008 snapc = ceph_get_snap_context(snapc); 1009 unlock_page(page); 1010 ceph_queue_writeback(inode); 1011 r = wait_event_interruptible(ci->i_cap_wq, 1012 context_is_writeable_or_written(inode, snapc)); 1013 ceph_put_snap_context(snapc); 1014 if (r == -ERESTARTSYS) 1015 return r; 1016 return -EAGAIN; 1017 } 1018 ceph_put_snap_context(oldest); 1019 1020 /* yay, writeable, do it now (without dropping page lock) */ 1021 dout(" page %p snapc %p not current, but oldest\n", 1022 page, snapc); 1023 if (!clear_page_dirty_for_io(page)) 1024 goto retry_locked; 1025 r = writepage_nounlock(page, NULL); 1026 if (r < 0) 1027 goto fail_nosnap; 1028 goto retry_locked; 1029 } 1030 1031 if (PageUptodate(page)) { 1032 dout(" page %p already uptodate\n", page); 1033 return 0; 1034 } 1035 1036 /* full page? */ 1037 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1038 return 0; 1039 1040 /* past end of file? */ 1041 i_size = inode->i_size; /* caller holds i_mutex */ 1042 1043 if (i_size + len > inode->i_sb->s_maxbytes) { 1044 /* file is too big */ 1045 r = -EINVAL; 1046 goto fail; 1047 } 1048 1049 if (page_off >= i_size || 1050 (pos_in_page == 0 && (pos+len) >= i_size && 1051 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1052 dout(" zeroing %p 0 - %d and %d - %d\n", 1053 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1054 zero_user_segments(page, 1055 0, pos_in_page, 1056 end_in_page, PAGE_CACHE_SIZE); 1057 return 0; 1058 } 1059 1060 /* we need to read it. */ 1061 up_read(&mdsc->snap_rwsem); 1062 r = readpage_nounlock(file, page); 1063 if (r < 0) 1064 goto fail_nosnap; 1065 goto retry_locked; 1066 1067 fail: 1068 up_read(&mdsc->snap_rwsem); 1069 fail_nosnap: 1070 unlock_page(page); 1071 return r; 1072 } 1073 1074 /* 1075 * We are only allowed to write into/dirty the page if the page is 1076 * clean, or already dirty within the same snap context. 1077 */ 1078 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1079 loff_t pos, unsigned len, unsigned flags, 1080 struct page **pagep, void **fsdata) 1081 { 1082 struct inode *inode = file_inode(file); 1083 struct page *page; 1084 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1085 int r; 1086 1087 do { 1088 /* get a page */ 1089 page = grab_cache_page_write_begin(mapping, index, 0); 1090 if (!page) 1091 return -ENOMEM; 1092 *pagep = page; 1093 1094 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1095 inode, page, (int)pos, (int)len); 1096 1097 r = ceph_update_writeable_page(file, pos, len, page); 1098 } while (r == -EAGAIN); 1099 1100 return r; 1101 } 1102 1103 /* 1104 * we don't do anything in here that simple_write_end doesn't do 1105 * except adjust dirty page accounting and drop read lock on 1106 * mdsc->snap_rwsem. 1107 */ 1108 static int ceph_write_end(struct file *file, struct address_space *mapping, 1109 loff_t pos, unsigned len, unsigned copied, 1110 struct page *page, void *fsdata) 1111 { 1112 struct inode *inode = file_inode(file); 1113 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1114 struct ceph_mds_client *mdsc = fsc->mdsc; 1115 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1116 int check_cap = 0; 1117 1118 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1119 inode, page, (int)pos, (int)copied, (int)len); 1120 1121 /* zero the stale part of the page if we did a short copy */ 1122 if (copied < len) 1123 zero_user_segment(page, from+copied, len); 1124 1125 /* did file size increase? */ 1126 /* (no need for i_size_read(); we caller holds i_mutex */ 1127 if (pos+copied > inode->i_size) 1128 check_cap = ceph_inode_set_size(inode, pos+copied); 1129 1130 if (!PageUptodate(page)) 1131 SetPageUptodate(page); 1132 1133 set_page_dirty(page); 1134 1135 unlock_page(page); 1136 up_read(&mdsc->snap_rwsem); 1137 page_cache_release(page); 1138 1139 if (check_cap) 1140 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1141 1142 return copied; 1143 } 1144 1145 /* 1146 * we set .direct_IO to indicate direct io is supported, but since we 1147 * intercept O_DIRECT reads and writes early, this function should 1148 * never get called. 1149 */ 1150 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, 1151 const struct iovec *iov, 1152 loff_t pos, unsigned long nr_segs) 1153 { 1154 WARN_ON(1); 1155 return -EINVAL; 1156 } 1157 1158 const struct address_space_operations ceph_aops = { 1159 .readpage = ceph_readpage, 1160 .readpages = ceph_readpages, 1161 .writepage = ceph_writepage, 1162 .writepages = ceph_writepages_start, 1163 .write_begin = ceph_write_begin, 1164 .write_end = ceph_write_end, 1165 .set_page_dirty = ceph_set_page_dirty, 1166 .invalidatepage = ceph_invalidatepage, 1167 .releasepage = ceph_releasepage, 1168 .direct_IO = ceph_direct_io, 1169 }; 1170 1171 1172 /* 1173 * vm ops 1174 */ 1175 1176 /* 1177 * Reuse write_begin here for simplicity. 1178 */ 1179 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1180 { 1181 struct inode *inode = file_inode(vma->vm_file); 1182 struct page *page = vmf->page; 1183 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1184 loff_t off = page_offset(page); 1185 loff_t size, len; 1186 int ret; 1187 1188 /* Update time before taking page lock */ 1189 file_update_time(vma->vm_file); 1190 1191 size = i_size_read(inode); 1192 if (off + PAGE_CACHE_SIZE <= size) 1193 len = PAGE_CACHE_SIZE; 1194 else 1195 len = size & ~PAGE_CACHE_MASK; 1196 1197 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode, 1198 off, len, page, page->index); 1199 1200 lock_page(page); 1201 1202 ret = VM_FAULT_NOPAGE; 1203 if ((off > size) || 1204 (page->mapping != inode->i_mapping)) 1205 goto out; 1206 1207 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1208 if (ret == 0) { 1209 /* success. we'll keep the page locked. */ 1210 set_page_dirty(page); 1211 up_read(&mdsc->snap_rwsem); 1212 ret = VM_FAULT_LOCKED; 1213 } else { 1214 if (ret == -ENOMEM) 1215 ret = VM_FAULT_OOM; 1216 else 1217 ret = VM_FAULT_SIGBUS; 1218 } 1219 out: 1220 dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret); 1221 if (ret != VM_FAULT_LOCKED) 1222 unlock_page(page); 1223 return ret; 1224 } 1225 1226 static struct vm_operations_struct ceph_vmops = { 1227 .fault = filemap_fault, 1228 .page_mkwrite = ceph_page_mkwrite, 1229 .remap_pages = generic_file_remap_pages, 1230 }; 1231 1232 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1233 { 1234 struct address_space *mapping = file->f_mapping; 1235 1236 if (!mapping->a_ops->readpage) 1237 return -ENOEXEC; 1238 file_accessed(file); 1239 vma->vm_ops = &ceph_vmops; 1240 return 0; 1241 } 1242