1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 #include <linux/ceph/osd_client.h> 15 16 /* 17 * Ceph address space ops. 18 * 19 * There are a few funny things going on here. 20 * 21 * The page->private field is used to reference a struct 22 * ceph_snap_context for _every_ dirty page. This indicates which 23 * snapshot the page was logically dirtied in, and thus which snap 24 * context needs to be associated with the osd write during writeback. 25 * 26 * Similarly, struct ceph_inode_info maintains a set of counters to 27 * count dirty pages on the inode. In the absence of snapshots, 28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 29 * 30 * When a snapshot is taken (that is, when the client receives 31 * notification that a snapshot was taken), each inode with caps and 32 * with dirty pages (dirty pages implies there is a cap) gets a new 33 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 34 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 35 * moved to capsnap->dirty. (Unless a sync write is currently in 36 * progress. In that case, the capsnap is said to be "pending", new 37 * writes cannot start, and the capsnap isn't "finalized" until the 38 * write completes (or fails) and a final size/mtime for the inode for 39 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 40 * 41 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 42 * we look for the first capsnap in i_cap_snaps and write out pages in 43 * that snap context _only_. Then we move on to the next capsnap, 44 * eventually reaching the "live" or "head" context (i.e., pages that 45 * are not yet snapped) and are writing the most recently dirtied 46 * pages. 47 * 48 * Invalidate and so forth must take care to ensure the dirty page 49 * accounting is preserved. 50 */ 51 52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 53 #define CONGESTION_OFF_THRESH(congestion_kb) \ 54 (CONGESTION_ON_THRESH(congestion_kb) - \ 55 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 56 57 static inline struct ceph_snap_context *page_snap_context(struct page *page) 58 { 59 if (PagePrivate(page)) 60 return (void *)page->private; 61 return NULL; 62 } 63 64 /* 65 * Dirty a page. Optimistically adjust accounting, on the assumption 66 * that we won't race with invalidate. If we do, readjust. 67 */ 68 static int ceph_set_page_dirty(struct page *page) 69 { 70 struct address_space *mapping = page->mapping; 71 struct inode *inode; 72 struct ceph_inode_info *ci; 73 int undo = 0; 74 struct ceph_snap_context *snapc; 75 76 if (unlikely(!mapping)) 77 return !TestSetPageDirty(page); 78 79 if (TestSetPageDirty(page)) { 80 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 81 mapping->host, page, page->index); 82 return 0; 83 } 84 85 inode = mapping->host; 86 ci = ceph_inode(inode); 87 88 /* 89 * Note that we're grabbing a snapc ref here without holding 90 * any locks! 91 */ 92 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 93 94 /* dirty the head */ 95 spin_lock(&ci->i_ceph_lock); 96 if (ci->i_head_snapc == NULL) 97 ci->i_head_snapc = ceph_get_snap_context(snapc); 98 ++ci->i_wrbuffer_ref_head; 99 if (ci->i_wrbuffer_ref == 0) 100 ihold(inode); 101 ++ci->i_wrbuffer_ref; 102 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 103 "snapc %p seq %lld (%d snaps)\n", 104 mapping->host, page, page->index, 105 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 106 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 107 snapc, snapc->seq, snapc->num_snaps); 108 spin_unlock(&ci->i_ceph_lock); 109 110 /* now adjust page */ 111 spin_lock_irq(&mapping->tree_lock); 112 if (page->mapping) { /* Race with truncate? */ 113 WARN_ON_ONCE(!PageUptodate(page)); 114 account_page_dirtied(page, page->mapping); 115 radix_tree_tag_set(&mapping->page_tree, 116 page_index(page), PAGECACHE_TAG_DIRTY); 117 118 /* 119 * Reference snap context in page->private. Also set 120 * PagePrivate so that we get invalidatepage callback. 121 */ 122 page->private = (unsigned long)snapc; 123 SetPagePrivate(page); 124 } else { 125 dout("ANON set_page_dirty %p (raced truncate?)\n", page); 126 undo = 1; 127 } 128 129 spin_unlock_irq(&mapping->tree_lock); 130 131 if (undo) 132 /* whoops, we failed to dirty the page */ 133 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 134 135 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 136 137 BUG_ON(!PageDirty(page)); 138 return 1; 139 } 140 141 /* 142 * If we are truncating the full page (i.e. offset == 0), adjust the 143 * dirty page counters appropriately. Only called if there is private 144 * data on the page. 145 */ 146 static void ceph_invalidatepage(struct page *page, unsigned long offset) 147 { 148 struct inode *inode; 149 struct ceph_inode_info *ci; 150 struct ceph_snap_context *snapc = page_snap_context(page); 151 152 BUG_ON(!PageLocked(page)); 153 BUG_ON(!PagePrivate(page)); 154 BUG_ON(!page->mapping); 155 156 inode = page->mapping->host; 157 158 /* 159 * We can get non-dirty pages here due to races between 160 * set_page_dirty and truncate_complete_page; just spit out a 161 * warning, in case we end up with accounting problems later. 162 */ 163 if (!PageDirty(page)) 164 pr_err("%p invalidatepage %p page not dirty\n", inode, page); 165 166 if (offset == 0) 167 ClearPageChecked(page); 168 169 ci = ceph_inode(inode); 170 if (offset == 0) { 171 dout("%p invalidatepage %p idx %lu full dirty page %lu\n", 172 inode, page, page->index, offset); 173 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 174 ceph_put_snap_context(snapc); 175 page->private = 0; 176 ClearPagePrivate(page); 177 } else { 178 dout("%p invalidatepage %p idx %lu partial dirty page\n", 179 inode, page, page->index); 180 } 181 } 182 183 /* just a sanity check */ 184 static int ceph_releasepage(struct page *page, gfp_t g) 185 { 186 struct inode *inode = page->mapping ? page->mapping->host : NULL; 187 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 188 WARN_ON(PageDirty(page)); 189 WARN_ON(PagePrivate(page)); 190 return 0; 191 } 192 193 /* 194 * read a single page, without unlocking it. 195 */ 196 static int readpage_nounlock(struct file *filp, struct page *page) 197 { 198 struct inode *inode = file_inode(filp); 199 struct ceph_inode_info *ci = ceph_inode(inode); 200 struct ceph_osd_client *osdc = 201 &ceph_inode_to_client(inode)->client->osdc; 202 int err = 0; 203 u64 len = PAGE_CACHE_SIZE; 204 205 dout("readpage inode %p file %p page %p index %lu\n", 206 inode, filp, page, page->index); 207 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 208 (u64) page_offset(page), &len, 209 ci->i_truncate_seq, ci->i_truncate_size, 210 &page, 1, 0); 211 if (err == -ENOENT) 212 err = 0; 213 if (err < 0) { 214 SetPageError(page); 215 goto out; 216 } else if (err < PAGE_CACHE_SIZE) { 217 /* zero fill remainder of page */ 218 zero_user_segment(page, err, PAGE_CACHE_SIZE); 219 } 220 SetPageUptodate(page); 221 222 out: 223 return err < 0 ? err : 0; 224 } 225 226 static int ceph_readpage(struct file *filp, struct page *page) 227 { 228 int r = readpage_nounlock(filp, page); 229 unlock_page(page); 230 return r; 231 } 232 233 /* 234 * Finish an async read(ahead) op. 235 */ 236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) 237 { 238 struct inode *inode = req->r_inode; 239 struct ceph_osd_data *osd_data; 240 int rc = req->r_result; 241 int bytes = le32_to_cpu(msg->hdr.data_len); 242 int num_pages; 243 int i; 244 245 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 246 247 /* unlock all pages, zeroing any data we didn't read */ 248 osd_data = osd_req_op_extent_osd_data(req, 0); 249 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 250 num_pages = calc_pages_for((u64)osd_data->alignment, 251 (u64)osd_data->length); 252 for (i = 0; i < num_pages; i++) { 253 struct page *page = osd_data->pages[i]; 254 255 if (bytes < (int)PAGE_CACHE_SIZE) { 256 /* zero (remainder of) page */ 257 int s = bytes < 0 ? 0 : bytes; 258 zero_user_segment(page, s, PAGE_CACHE_SIZE); 259 } 260 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 261 page->index); 262 flush_dcache_page(page); 263 SetPageUptodate(page); 264 unlock_page(page); 265 page_cache_release(page); 266 bytes -= PAGE_CACHE_SIZE; 267 } 268 kfree(osd_data->pages); 269 } 270 271 static void ceph_unlock_page_vector(struct page **pages, int num_pages) 272 { 273 int i; 274 275 for (i = 0; i < num_pages; i++) 276 unlock_page(pages[i]); 277 } 278 279 /* 280 * start an async read(ahead) operation. return nr_pages we submitted 281 * a read for on success, or negative error code. 282 */ 283 static int start_read(struct inode *inode, struct list_head *page_list, int max) 284 { 285 struct ceph_osd_client *osdc = 286 &ceph_inode_to_client(inode)->client->osdc; 287 struct ceph_inode_info *ci = ceph_inode(inode); 288 struct page *page = list_entry(page_list->prev, struct page, lru); 289 struct ceph_vino vino; 290 struct ceph_osd_request *req; 291 u64 off; 292 u64 len; 293 int i; 294 struct page **pages; 295 pgoff_t next_index; 296 int nr_pages = 0; 297 int ret; 298 299 off = (u64) page_offset(page); 300 301 /* count pages */ 302 next_index = page->index; 303 list_for_each_entry_reverse(page, page_list, lru) { 304 if (page->index != next_index) 305 break; 306 nr_pages++; 307 next_index++; 308 if (max && nr_pages == max) 309 break; 310 } 311 len = nr_pages << PAGE_CACHE_SHIFT; 312 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 313 off, len); 314 vino = ceph_vino(inode); 315 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 316 1, CEPH_OSD_OP_READ, 317 CEPH_OSD_FLAG_READ, NULL, 318 ci->i_truncate_seq, ci->i_truncate_size, 319 false); 320 if (IS_ERR(req)) 321 return PTR_ERR(req); 322 323 /* build page vector */ 324 nr_pages = calc_pages_for(0, len); 325 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); 326 ret = -ENOMEM; 327 if (!pages) 328 goto out; 329 for (i = 0; i < nr_pages; ++i) { 330 page = list_entry(page_list->prev, struct page, lru); 331 BUG_ON(PageLocked(page)); 332 list_del(&page->lru); 333 334 dout("start_read %p adding %p idx %lu\n", inode, page, 335 page->index); 336 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 337 GFP_NOFS)) { 338 page_cache_release(page); 339 dout("start_read %p add_to_page_cache failed %p\n", 340 inode, page); 341 nr_pages = i; 342 goto out_pages; 343 } 344 pages[i] = page; 345 } 346 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 347 req->r_callback = finish_read; 348 req->r_inode = inode; 349 350 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 351 352 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 353 ret = ceph_osdc_start_request(osdc, req, false); 354 if (ret < 0) 355 goto out_pages; 356 ceph_osdc_put_request(req); 357 return nr_pages; 358 359 out_pages: 360 ceph_unlock_page_vector(pages, nr_pages); 361 ceph_release_page_vector(pages, nr_pages); 362 out: 363 ceph_osdc_put_request(req); 364 return ret; 365 } 366 367 368 /* 369 * Read multiple pages. Leave pages we don't read + unlock in page_list; 370 * the caller (VM) cleans them up. 371 */ 372 static int ceph_readpages(struct file *file, struct address_space *mapping, 373 struct list_head *page_list, unsigned nr_pages) 374 { 375 struct inode *inode = file_inode(file); 376 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 377 int rc = 0; 378 int max = 0; 379 380 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) 381 max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) 382 >> PAGE_SHIFT; 383 384 dout("readpages %p file %p nr_pages %d max %d\n", inode, 385 file, nr_pages, 386 max); 387 while (!list_empty(page_list)) { 388 rc = start_read(inode, page_list, max); 389 if (rc < 0) 390 goto out; 391 BUG_ON(rc == 0); 392 } 393 out: 394 dout("readpages %p file %p ret %d\n", inode, file, rc); 395 return rc; 396 } 397 398 /* 399 * Get ref for the oldest snapc for an inode with dirty data... that is, the 400 * only snap context we are allowed to write back. 401 */ 402 static struct ceph_snap_context *get_oldest_context(struct inode *inode, 403 u64 *snap_size) 404 { 405 struct ceph_inode_info *ci = ceph_inode(inode); 406 struct ceph_snap_context *snapc = NULL; 407 struct ceph_cap_snap *capsnap = NULL; 408 409 spin_lock(&ci->i_ceph_lock); 410 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 411 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 412 capsnap->context, capsnap->dirty_pages); 413 if (capsnap->dirty_pages) { 414 snapc = ceph_get_snap_context(capsnap->context); 415 if (snap_size) 416 *snap_size = capsnap->size; 417 break; 418 } 419 } 420 if (!snapc && ci->i_wrbuffer_ref_head) { 421 snapc = ceph_get_snap_context(ci->i_head_snapc); 422 dout(" head snapc %p has %d dirty pages\n", 423 snapc, ci->i_wrbuffer_ref_head); 424 } 425 spin_unlock(&ci->i_ceph_lock); 426 return snapc; 427 } 428 429 /* 430 * Write a single page, but leave the page locked. 431 * 432 * If we get a write error, set the page error bit, but still adjust the 433 * dirty page accounting (i.e., page is no longer dirty). 434 */ 435 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 436 { 437 struct inode *inode; 438 struct ceph_inode_info *ci; 439 struct ceph_fs_client *fsc; 440 struct ceph_osd_client *osdc; 441 loff_t page_off = page_offset(page); 442 int len = PAGE_CACHE_SIZE; 443 loff_t i_size; 444 int err = 0; 445 struct ceph_snap_context *snapc, *oldest; 446 u64 snap_size = 0; 447 long writeback_stat; 448 449 dout("writepage %p idx %lu\n", page, page->index); 450 451 if (!page->mapping || !page->mapping->host) { 452 dout("writepage %p - no mapping\n", page); 453 return -EFAULT; 454 } 455 inode = page->mapping->host; 456 ci = ceph_inode(inode); 457 fsc = ceph_inode_to_client(inode); 458 osdc = &fsc->client->osdc; 459 460 /* verify this is a writeable snap context */ 461 snapc = page_snap_context(page); 462 if (snapc == NULL) { 463 dout("writepage %p page %p not dirty?\n", inode, page); 464 goto out; 465 } 466 oldest = get_oldest_context(inode, &snap_size); 467 if (snapc->seq > oldest->seq) { 468 dout("writepage %p page %p snapc %p not writeable - noop\n", 469 inode, page, snapc); 470 /* we should only noop if called by kswapd */ 471 WARN_ON((current->flags & PF_MEMALLOC) == 0); 472 ceph_put_snap_context(oldest); 473 goto out; 474 } 475 ceph_put_snap_context(oldest); 476 477 /* is this a partial page at end of file? */ 478 if (snap_size) 479 i_size = snap_size; 480 else 481 i_size = i_size_read(inode); 482 if (i_size < page_off + len) 483 len = i_size - page_off; 484 485 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 486 inode, page, page->index, page_off, len, snapc); 487 488 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 489 if (writeback_stat > 490 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 491 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 492 493 set_page_writeback(page); 494 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 495 &ci->i_layout, snapc, 496 page_off, len, 497 ci->i_truncate_seq, ci->i_truncate_size, 498 &inode->i_mtime, &page, 1); 499 if (err < 0) { 500 dout("writepage setting page/mapping error %d %p\n", err, page); 501 SetPageError(page); 502 mapping_set_error(&inode->i_data, err); 503 if (wbc) 504 wbc->pages_skipped++; 505 } else { 506 dout("writepage cleaned page %p\n", page); 507 err = 0; /* vfs expects us to return 0 */ 508 } 509 page->private = 0; 510 ClearPagePrivate(page); 511 end_page_writeback(page); 512 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 513 ceph_put_snap_context(snapc); /* page's reference */ 514 out: 515 return err; 516 } 517 518 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 519 { 520 int err; 521 struct inode *inode = page->mapping->host; 522 BUG_ON(!inode); 523 ihold(inode); 524 err = writepage_nounlock(page, wbc); 525 unlock_page(page); 526 iput(inode); 527 return err; 528 } 529 530 531 /* 532 * lame release_pages helper. release_pages() isn't exported to 533 * modules. 534 */ 535 static void ceph_release_pages(struct page **pages, int num) 536 { 537 struct pagevec pvec; 538 int i; 539 540 pagevec_init(&pvec, 0); 541 for (i = 0; i < num; i++) { 542 if (pagevec_add(&pvec, pages[i]) == 0) 543 pagevec_release(&pvec); 544 } 545 pagevec_release(&pvec); 546 } 547 548 549 /* 550 * async writeback completion handler. 551 * 552 * If we get an error, set the mapping error bit, but not the individual 553 * page error bits. 554 */ 555 static void writepages_finish(struct ceph_osd_request *req, 556 struct ceph_msg *msg) 557 { 558 struct inode *inode = req->r_inode; 559 struct ceph_inode_info *ci = ceph_inode(inode); 560 struct ceph_osd_data *osd_data; 561 unsigned wrote; 562 struct page *page; 563 int num_pages; 564 int i; 565 struct ceph_snap_context *snapc = req->r_snapc; 566 struct address_space *mapping = inode->i_mapping; 567 int rc = req->r_result; 568 u64 bytes = req->r_ops[0].extent.length; 569 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 570 long writeback_stat; 571 unsigned issued = ceph_caps_issued(ci); 572 573 osd_data = osd_req_op_extent_osd_data(req, 0); 574 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 575 num_pages = calc_pages_for((u64)osd_data->alignment, 576 (u64)osd_data->length); 577 if (rc >= 0) { 578 /* 579 * Assume we wrote the pages we originally sent. The 580 * osd might reply with fewer pages if our writeback 581 * raced with a truncation and was adjusted at the osd, 582 * so don't believe the reply. 583 */ 584 wrote = num_pages; 585 } else { 586 wrote = 0; 587 mapping_set_error(mapping, rc); 588 } 589 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", 590 inode, rc, bytes, wrote); 591 592 /* clean all pages */ 593 for (i = 0; i < num_pages; i++) { 594 page = osd_data->pages[i]; 595 BUG_ON(!page); 596 WARN_ON(!PageUptodate(page)); 597 598 writeback_stat = 599 atomic_long_dec_return(&fsc->writeback_count); 600 if (writeback_stat < 601 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 602 clear_bdi_congested(&fsc->backing_dev_info, 603 BLK_RW_ASYNC); 604 605 ceph_put_snap_context(page_snap_context(page)); 606 page->private = 0; 607 ClearPagePrivate(page); 608 dout("unlocking %d %p\n", i, page); 609 end_page_writeback(page); 610 611 /* 612 * We lost the cache cap, need to truncate the page before 613 * it is unlocked, otherwise we'd truncate it later in the 614 * page truncation thread, possibly losing some data that 615 * raced its way in 616 */ 617 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) 618 generic_error_remove_page(inode->i_mapping, page); 619 620 unlock_page(page); 621 } 622 dout("%p wrote+cleaned %d pages\n", inode, wrote); 623 ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); 624 625 ceph_release_pages(osd_data->pages, num_pages); 626 if (osd_data->pages_from_pool) 627 mempool_free(osd_data->pages, 628 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 629 else 630 kfree(osd_data->pages); 631 ceph_osdc_put_request(req); 632 } 633 634 static struct ceph_osd_request * 635 ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, 636 struct ceph_snap_context *snapc, int num_ops) 637 { 638 struct ceph_fs_client *fsc; 639 struct ceph_inode_info *ci; 640 struct ceph_vino vino; 641 642 fsc = ceph_inode_to_client(inode); 643 ci = ceph_inode(inode); 644 vino = ceph_vino(inode); 645 /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 646 647 return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 648 vino, offset, len, num_ops, CEPH_OSD_OP_WRITE, 649 CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK, 650 snapc, ci->i_truncate_seq, ci->i_truncate_size, true); 651 } 652 653 /* 654 * initiate async writeback 655 */ 656 static int ceph_writepages_start(struct address_space *mapping, 657 struct writeback_control *wbc) 658 { 659 struct inode *inode = mapping->host; 660 struct ceph_inode_info *ci = ceph_inode(inode); 661 struct ceph_fs_client *fsc; 662 pgoff_t index, start, end; 663 int range_whole = 0; 664 int should_loop = 1; 665 pgoff_t max_pages = 0, max_pages_ever = 0; 666 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 667 struct pagevec pvec; 668 int done = 0; 669 int rc = 0; 670 unsigned wsize = 1 << inode->i_blkbits; 671 struct ceph_osd_request *req = NULL; 672 int do_sync; 673 u64 snap_size; 674 675 /* 676 * Include a 'sync' in the OSD request if this is a data 677 * integrity write (e.g., O_SYNC write or fsync()), or if our 678 * cap is being revoked. 679 */ 680 do_sync = wbc->sync_mode == WB_SYNC_ALL; 681 if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) 682 do_sync = 1; 683 dout("writepages_start %p dosync=%d (mode=%s)\n", 684 inode, do_sync, 685 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 686 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 687 688 fsc = ceph_inode_to_client(inode); 689 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 690 pr_warning("writepage_start %p on forced umount\n", inode); 691 return -EIO; /* we're in a forced umount, don't write! */ 692 } 693 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) 694 wsize = fsc->mount_options->wsize; 695 if (wsize < PAGE_CACHE_SIZE) 696 wsize = PAGE_CACHE_SIZE; 697 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 698 699 pagevec_init(&pvec, 0); 700 701 /* where to start/end? */ 702 if (wbc->range_cyclic) { 703 start = mapping->writeback_index; /* Start from prev offset */ 704 end = -1; 705 dout(" cyclic, start at %lu\n", start); 706 } else { 707 start = wbc->range_start >> PAGE_CACHE_SHIFT; 708 end = wbc->range_end >> PAGE_CACHE_SHIFT; 709 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 710 range_whole = 1; 711 should_loop = 0; 712 dout(" not cyclic, %lu to %lu\n", start, end); 713 } 714 index = start; 715 716 retry: 717 /* find oldest snap context with dirty data */ 718 ceph_put_snap_context(snapc); 719 snap_size = 0; 720 snapc = get_oldest_context(inode, &snap_size); 721 if (!snapc) { 722 /* hmm, why does writepages get called when there 723 is no dirty data? */ 724 dout(" no snap context with dirty data?\n"); 725 goto out; 726 } 727 if (snap_size == 0) 728 snap_size = i_size_read(inode); 729 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 730 snapc, snapc->seq, snapc->num_snaps); 731 if (last_snapc && snapc != last_snapc) { 732 /* if we switched to a newer snapc, restart our scan at the 733 * start of the original file range. */ 734 dout(" snapc differs from last pass, restarting at %lu\n", 735 index); 736 index = start; 737 } 738 last_snapc = snapc; 739 740 while (!done && index <= end) { 741 int num_ops = do_sync ? 2 : 1; 742 struct ceph_vino vino; 743 unsigned i; 744 int first; 745 pgoff_t next; 746 int pvec_pages, locked_pages; 747 struct page **pages = NULL; 748 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 749 struct page *page; 750 int want; 751 u64 offset, len; 752 long writeback_stat; 753 754 next = 0; 755 locked_pages = 0; 756 max_pages = max_pages_ever; 757 758 get_more_pages: 759 first = -1; 760 want = min(end - index, 761 min((pgoff_t)PAGEVEC_SIZE, 762 max_pages - (pgoff_t)locked_pages) - 1) 763 + 1; 764 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 765 PAGECACHE_TAG_DIRTY, 766 want); 767 dout("pagevec_lookup_tag got %d\n", pvec_pages); 768 if (!pvec_pages && !locked_pages) 769 break; 770 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 771 page = pvec.pages[i]; 772 dout("? %p idx %lu\n", page, page->index); 773 if (locked_pages == 0) 774 lock_page(page); /* first page */ 775 else if (!trylock_page(page)) 776 break; 777 778 /* only dirty pages, or our accounting breaks */ 779 if (unlikely(!PageDirty(page)) || 780 unlikely(page->mapping != mapping)) { 781 dout("!dirty or !mapping %p\n", page); 782 unlock_page(page); 783 break; 784 } 785 if (!wbc->range_cyclic && page->index > end) { 786 dout("end of range %p\n", page); 787 done = 1; 788 unlock_page(page); 789 break; 790 } 791 if (next && (page->index != next)) { 792 dout("not consecutive %p\n", page); 793 unlock_page(page); 794 break; 795 } 796 if (wbc->sync_mode != WB_SYNC_NONE) { 797 dout("waiting on writeback %p\n", page); 798 wait_on_page_writeback(page); 799 } 800 if (page_offset(page) >= snap_size) { 801 dout("%p page eof %llu\n", page, snap_size); 802 done = 1; 803 unlock_page(page); 804 break; 805 } 806 if (PageWriteback(page)) { 807 dout("%p under writeback\n", page); 808 unlock_page(page); 809 break; 810 } 811 812 /* only if matching snap context */ 813 pgsnapc = page_snap_context(page); 814 if (pgsnapc->seq > snapc->seq) { 815 dout("page snapc %p %lld > oldest %p %lld\n", 816 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 817 unlock_page(page); 818 if (!locked_pages) 819 continue; /* keep looking for snap */ 820 break; 821 } 822 823 if (!clear_page_dirty_for_io(page)) { 824 dout("%p !clear_page_dirty_for_io\n", page); 825 unlock_page(page); 826 break; 827 } 828 829 /* 830 * We have something to write. If this is 831 * the first locked page this time through, 832 * allocate an osd request and a page array 833 * that it will use. 834 */ 835 if (locked_pages == 0) { 836 size_t size; 837 838 BUG_ON(pages); 839 840 /* prepare async write request */ 841 offset = (u64)page_offset(page); 842 len = wsize; 843 req = ceph_writepages_osd_request(inode, 844 offset, &len, snapc, 845 num_ops); 846 847 if (IS_ERR(req)) { 848 rc = PTR_ERR(req); 849 unlock_page(page); 850 break; 851 } 852 853 req->r_callback = writepages_finish; 854 req->r_inode = inode; 855 856 max_pages = calc_pages_for(0, (u64)len); 857 size = max_pages * sizeof (*pages); 858 pages = kmalloc(size, GFP_NOFS); 859 if (!pages) { 860 pool = fsc->wb_pagevec_pool; 861 pages = mempool_alloc(pool, GFP_NOFS); 862 BUG_ON(!pages); 863 } 864 } 865 866 /* note position of first page in pvec */ 867 if (first < 0) 868 first = i; 869 dout("%p will write page %p idx %lu\n", 870 inode, page, page->index); 871 872 writeback_stat = 873 atomic_long_inc_return(&fsc->writeback_count); 874 if (writeback_stat > CONGESTION_ON_THRESH( 875 fsc->mount_options->congestion_kb)) { 876 set_bdi_congested(&fsc->backing_dev_info, 877 BLK_RW_ASYNC); 878 } 879 880 set_page_writeback(page); 881 pages[locked_pages] = page; 882 locked_pages++; 883 next = page->index + 1; 884 } 885 886 /* did we get anything? */ 887 if (!locked_pages) 888 goto release_pvec_pages; 889 if (i) { 890 int j; 891 BUG_ON(!locked_pages || first < 0); 892 893 if (pvec_pages && i == pvec_pages && 894 locked_pages < max_pages) { 895 dout("reached end pvec, trying for more\n"); 896 pagevec_reinit(&pvec); 897 goto get_more_pages; 898 } 899 900 /* shift unused pages over in the pvec... we 901 * will need to release them below. */ 902 for (j = i; j < pvec_pages; j++) { 903 dout(" pvec leftover page %p\n", 904 pvec.pages[j]); 905 pvec.pages[j-i+first] = pvec.pages[j]; 906 } 907 pvec.nr -= i-first; 908 } 909 910 /* Format the osd request message and submit the write */ 911 912 offset = page_offset(pages[0]); 913 len = min(snap_size - offset, 914 (u64)locked_pages << PAGE_CACHE_SHIFT); 915 dout("writepages got %d pages at %llu~%llu\n", 916 locked_pages, offset, len); 917 918 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, 919 !!pool, false); 920 921 pages = NULL; /* request message now owns the pages array */ 922 pool = NULL; 923 924 /* Update the write op length in case we changed it */ 925 926 osd_req_op_extent_update(req, 0, len); 927 928 vino = ceph_vino(inode); 929 ceph_osdc_build_request(req, offset, snapc, vino.snap, 930 &inode->i_mtime); 931 932 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 933 BUG_ON(rc); 934 req = NULL; 935 936 /* continue? */ 937 index = next; 938 wbc->nr_to_write -= locked_pages; 939 if (wbc->nr_to_write <= 0) 940 done = 1; 941 942 release_pvec_pages: 943 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 944 pvec.nr ? pvec.pages[0] : NULL); 945 pagevec_release(&pvec); 946 947 if (locked_pages && !done) 948 goto retry; 949 } 950 951 if (should_loop && !done) { 952 /* more to do; loop back to beginning of file */ 953 dout("writepages looping back to beginning of file\n"); 954 should_loop = 0; 955 index = 0; 956 goto retry; 957 } 958 959 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 960 mapping->writeback_index = index; 961 962 out: 963 if (req) 964 ceph_osdc_put_request(req); 965 ceph_put_snap_context(snapc); 966 dout("writepages done, rc = %d\n", rc); 967 return rc; 968 } 969 970 971 972 /* 973 * See if a given @snapc is either writeable, or already written. 974 */ 975 static int context_is_writeable_or_written(struct inode *inode, 976 struct ceph_snap_context *snapc) 977 { 978 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 979 int ret = !oldest || snapc->seq <= oldest->seq; 980 981 ceph_put_snap_context(oldest); 982 return ret; 983 } 984 985 /* 986 * We are only allowed to write into/dirty the page if the page is 987 * clean, or already dirty within the same snap context. 988 * 989 * called with page locked. 990 * return success with page locked, 991 * or any failure (incl -EAGAIN) with page unlocked. 992 */ 993 static int ceph_update_writeable_page(struct file *file, 994 loff_t pos, unsigned len, 995 struct page *page) 996 { 997 struct inode *inode = file_inode(file); 998 struct ceph_inode_info *ci = ceph_inode(inode); 999 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1000 loff_t page_off = pos & PAGE_CACHE_MASK; 1001 int pos_in_page = pos & ~PAGE_CACHE_MASK; 1002 int end_in_page = pos_in_page + len; 1003 loff_t i_size; 1004 int r; 1005 struct ceph_snap_context *snapc, *oldest; 1006 1007 retry_locked: 1008 /* writepages currently holds page lock, but if we change that later, */ 1009 wait_on_page_writeback(page); 1010 1011 /* check snap context */ 1012 BUG_ON(!ci->i_snap_realm); 1013 down_read(&mdsc->snap_rwsem); 1014 BUG_ON(!ci->i_snap_realm->cached_context); 1015 snapc = page_snap_context(page); 1016 if (snapc && snapc != ci->i_head_snapc) { 1017 /* 1018 * this page is already dirty in another (older) snap 1019 * context! is it writeable now? 1020 */ 1021 oldest = get_oldest_context(inode, NULL); 1022 up_read(&mdsc->snap_rwsem); 1023 1024 if (snapc->seq > oldest->seq) { 1025 ceph_put_snap_context(oldest); 1026 dout(" page %p snapc %p not current or oldest\n", 1027 page, snapc); 1028 /* 1029 * queue for writeback, and wait for snapc to 1030 * be writeable or written 1031 */ 1032 snapc = ceph_get_snap_context(snapc); 1033 unlock_page(page); 1034 ceph_queue_writeback(inode); 1035 r = wait_event_interruptible(ci->i_cap_wq, 1036 context_is_writeable_or_written(inode, snapc)); 1037 ceph_put_snap_context(snapc); 1038 if (r == -ERESTARTSYS) 1039 return r; 1040 return -EAGAIN; 1041 } 1042 ceph_put_snap_context(oldest); 1043 1044 /* yay, writeable, do it now (without dropping page lock) */ 1045 dout(" page %p snapc %p not current, but oldest\n", 1046 page, snapc); 1047 if (!clear_page_dirty_for_io(page)) 1048 goto retry_locked; 1049 r = writepage_nounlock(page, NULL); 1050 if (r < 0) 1051 goto fail_nosnap; 1052 goto retry_locked; 1053 } 1054 1055 if (PageUptodate(page)) { 1056 dout(" page %p already uptodate\n", page); 1057 return 0; 1058 } 1059 1060 /* full page? */ 1061 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) 1062 return 0; 1063 1064 /* past end of file? */ 1065 i_size = inode->i_size; /* caller holds i_mutex */ 1066 1067 if (i_size + len > inode->i_sb->s_maxbytes) { 1068 /* file is too big */ 1069 r = -EINVAL; 1070 goto fail; 1071 } 1072 1073 if (page_off >= i_size || 1074 (pos_in_page == 0 && (pos+len) >= i_size && 1075 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { 1076 dout(" zeroing %p 0 - %d and %d - %d\n", 1077 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); 1078 zero_user_segments(page, 1079 0, pos_in_page, 1080 end_in_page, PAGE_CACHE_SIZE); 1081 return 0; 1082 } 1083 1084 /* we need to read it. */ 1085 up_read(&mdsc->snap_rwsem); 1086 r = readpage_nounlock(file, page); 1087 if (r < 0) 1088 goto fail_nosnap; 1089 goto retry_locked; 1090 1091 fail: 1092 up_read(&mdsc->snap_rwsem); 1093 fail_nosnap: 1094 unlock_page(page); 1095 return r; 1096 } 1097 1098 /* 1099 * We are only allowed to write into/dirty the page if the page is 1100 * clean, or already dirty within the same snap context. 1101 */ 1102 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1103 loff_t pos, unsigned len, unsigned flags, 1104 struct page **pagep, void **fsdata) 1105 { 1106 struct inode *inode = file_inode(file); 1107 struct page *page; 1108 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1109 int r; 1110 1111 do { 1112 /* get a page */ 1113 page = grab_cache_page_write_begin(mapping, index, 0); 1114 if (!page) 1115 return -ENOMEM; 1116 *pagep = page; 1117 1118 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1119 inode, page, (int)pos, (int)len); 1120 1121 r = ceph_update_writeable_page(file, pos, len, page); 1122 } while (r == -EAGAIN); 1123 1124 return r; 1125 } 1126 1127 /* 1128 * we don't do anything in here that simple_write_end doesn't do 1129 * except adjust dirty page accounting and drop read lock on 1130 * mdsc->snap_rwsem. 1131 */ 1132 static int ceph_write_end(struct file *file, struct address_space *mapping, 1133 loff_t pos, unsigned len, unsigned copied, 1134 struct page *page, void *fsdata) 1135 { 1136 struct inode *inode = file_inode(file); 1137 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1138 struct ceph_mds_client *mdsc = fsc->mdsc; 1139 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1140 int check_cap = 0; 1141 1142 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1143 inode, page, (int)pos, (int)copied, (int)len); 1144 1145 /* zero the stale part of the page if we did a short copy */ 1146 if (copied < len) 1147 zero_user_segment(page, from+copied, len); 1148 1149 /* did file size increase? */ 1150 /* (no need for i_size_read(); we caller holds i_mutex */ 1151 if (pos+copied > inode->i_size) 1152 check_cap = ceph_inode_set_size(inode, pos+copied); 1153 1154 if (!PageUptodate(page)) 1155 SetPageUptodate(page); 1156 1157 set_page_dirty(page); 1158 1159 unlock_page(page); 1160 up_read(&mdsc->snap_rwsem); 1161 page_cache_release(page); 1162 1163 if (check_cap) 1164 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1165 1166 return copied; 1167 } 1168 1169 /* 1170 * we set .direct_IO to indicate direct io is supported, but since we 1171 * intercept O_DIRECT reads and writes early, this function should 1172 * never get called. 1173 */ 1174 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, 1175 const struct iovec *iov, 1176 loff_t pos, unsigned long nr_segs) 1177 { 1178 WARN_ON(1); 1179 return -EINVAL; 1180 } 1181 1182 const struct address_space_operations ceph_aops = { 1183 .readpage = ceph_readpage, 1184 .readpages = ceph_readpages, 1185 .writepage = ceph_writepage, 1186 .writepages = ceph_writepages_start, 1187 .write_begin = ceph_write_begin, 1188 .write_end = ceph_write_end, 1189 .set_page_dirty = ceph_set_page_dirty, 1190 .invalidatepage = ceph_invalidatepage, 1191 .releasepage = ceph_releasepage, 1192 .direct_IO = ceph_direct_io, 1193 }; 1194 1195 1196 /* 1197 * vm ops 1198 */ 1199 1200 /* 1201 * Reuse write_begin here for simplicity. 1202 */ 1203 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1204 { 1205 struct inode *inode = file_inode(vma->vm_file); 1206 struct page *page = vmf->page; 1207 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1208 loff_t off = page_offset(page); 1209 loff_t size, len; 1210 int ret; 1211 1212 /* Update time before taking page lock */ 1213 file_update_time(vma->vm_file); 1214 1215 size = i_size_read(inode); 1216 if (off + PAGE_CACHE_SIZE <= size) 1217 len = PAGE_CACHE_SIZE; 1218 else 1219 len = size & ~PAGE_CACHE_MASK; 1220 1221 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode, 1222 off, len, page, page->index); 1223 1224 lock_page(page); 1225 1226 ret = VM_FAULT_NOPAGE; 1227 if ((off > size) || 1228 (page->mapping != inode->i_mapping)) 1229 goto out; 1230 1231 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1232 if (ret == 0) { 1233 /* success. we'll keep the page locked. */ 1234 set_page_dirty(page); 1235 up_read(&mdsc->snap_rwsem); 1236 ret = VM_FAULT_LOCKED; 1237 } else { 1238 if (ret == -ENOMEM) 1239 ret = VM_FAULT_OOM; 1240 else 1241 ret = VM_FAULT_SIGBUS; 1242 } 1243 out: 1244 dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret); 1245 if (ret != VM_FAULT_LOCKED) 1246 unlock_page(page); 1247 return ret; 1248 } 1249 1250 static struct vm_operations_struct ceph_vmops = { 1251 .fault = filemap_fault, 1252 .page_mkwrite = ceph_page_mkwrite, 1253 .remap_pages = generic_file_remap_pages, 1254 }; 1255 1256 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1257 { 1258 struct address_space *mapping = file->f_mapping; 1259 1260 if (!mapping->a_ops->readpage) 1261 return -ENOEXEC; 1262 file_accessed(file); 1263 vma->vm_ops = &ceph_vmops; 1264 return 0; 1265 } 1266