1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 #include <linux/uio.h> 24 #include <trace/events/writeback.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "quota.h" 34 #include "trans.h" 35 #include "rgrp.h" 36 #include "super.h" 37 #include "util.h" 38 #include "glops.h" 39 40 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 42 unsigned int from, unsigned int len) 43 { 44 struct buffer_head *head = page_buffers(page); 45 unsigned int bsize = head->b_size; 46 struct buffer_head *bh; 47 unsigned int to = from + len; 48 unsigned int start, end; 49 50 for (bh = head, start = 0; bh != head || !start; 51 bh = bh->b_this_page, start = end) { 52 end = start + bsize; 53 if (end <= from) 54 continue; 55 if (start >= to) 56 break; 57 if (gfs2_is_jdata(ip)) 58 set_buffer_uptodate(bh); 59 gfs2_trans_add_data(ip->i_gl, bh); 60 } 61 } 62 63 /** 64 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 65 * @inode: The inode 66 * @lblock: The block number to look up 67 * @bh_result: The buffer head to return the result in 68 * @create: Non-zero if we may add block to the file 69 * 70 * Returns: errno 71 */ 72 73 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 74 struct buffer_head *bh_result, int create) 75 { 76 int error; 77 78 error = gfs2_block_map(inode, lblock, bh_result, 0); 79 if (error) 80 return error; 81 if (!buffer_mapped(bh_result)) 82 return -EIO; 83 return 0; 84 } 85 86 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 87 struct buffer_head *bh_result, int create) 88 { 89 return gfs2_block_map(inode, lblock, bh_result, 0); 90 } 91 92 /** 93 * gfs2_writepage_common - Common bits of writepage 94 * @page: The page to be written 95 * @wbc: The writeback control 96 * 97 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 98 */ 99 100 static int gfs2_writepage_common(struct page *page, 101 struct writeback_control *wbc) 102 { 103 struct inode *inode = page->mapping->host; 104 struct gfs2_inode *ip = GFS2_I(inode); 105 struct gfs2_sbd *sdp = GFS2_SB(inode); 106 loff_t i_size = i_size_read(inode); 107 pgoff_t end_index = i_size >> PAGE_SHIFT; 108 unsigned offset; 109 110 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 111 goto out; 112 if (current->journal_info) 113 goto redirty; 114 /* Is the page fully outside i_size? (truncate in progress) */ 115 offset = i_size & (PAGE_SIZE-1); 116 if (page->index > end_index || (page->index == end_index && !offset)) { 117 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 118 goto out; 119 } 120 return 1; 121 redirty: 122 redirty_page_for_writepage(wbc, page); 123 out: 124 unlock_page(page); 125 return 0; 126 } 127 128 /** 129 * gfs2_writepage - Write page for writeback mappings 130 * @page: The page 131 * @wbc: The writeback control 132 * 133 */ 134 135 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 136 { 137 int ret; 138 139 ret = gfs2_writepage_common(page, wbc); 140 if (ret <= 0) 141 return ret; 142 143 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 144 } 145 146 /* This is the same as calling block_write_full_page, but it also 147 * writes pages outside of i_size 148 */ 149 static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 150 struct writeback_control *wbc) 151 { 152 struct inode * const inode = page->mapping->host; 153 loff_t i_size = i_size_read(inode); 154 const pgoff_t end_index = i_size >> PAGE_SHIFT; 155 unsigned offset; 156 157 /* 158 * The page straddles i_size. It must be zeroed out on each and every 159 * writepage invocation because it may be mmapped. "A file is mapped 160 * in multiples of the page size. For a file that is not a multiple of 161 * the page size, the remaining memory is zeroed when mapped, and 162 * writes to that region are not written out to the file." 163 */ 164 offset = i_size & (PAGE_SIZE-1); 165 if (page->index == end_index && offset) 166 zero_user_segment(page, offset, PAGE_SIZE); 167 168 return __block_write_full_page(inode, page, get_block, wbc, 169 end_buffer_async_write); 170 } 171 172 /** 173 * __gfs2_jdata_writepage - The core of jdata writepage 174 * @page: The page to write 175 * @wbc: The writeback control 176 * 177 * This is shared between writepage and writepages and implements the 178 * core of the writepage operation. If a transaction is required then 179 * PageChecked will have been set and the transaction will have 180 * already been started before this is called. 181 */ 182 183 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 184 { 185 struct inode *inode = page->mapping->host; 186 struct gfs2_inode *ip = GFS2_I(inode); 187 struct gfs2_sbd *sdp = GFS2_SB(inode); 188 189 if (PageChecked(page)) { 190 ClearPageChecked(page); 191 if (!page_has_buffers(page)) { 192 create_empty_buffers(page, inode->i_sb->s_blocksize, 193 BIT(BH_Dirty)|BIT(BH_Uptodate)); 194 } 195 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize); 196 } 197 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 198 } 199 200 /** 201 * gfs2_jdata_writepage - Write complete page 202 * @page: Page to write 203 * @wbc: The writeback control 204 * 205 * Returns: errno 206 * 207 */ 208 209 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 210 { 211 struct inode *inode = page->mapping->host; 212 struct gfs2_inode *ip = GFS2_I(inode); 213 struct gfs2_sbd *sdp = GFS2_SB(inode); 214 int ret; 215 216 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 217 goto out; 218 if (PageChecked(page) || current->journal_info) 219 goto out_ignore; 220 ret = __gfs2_jdata_writepage(page, wbc); 221 return ret; 222 223 out_ignore: 224 redirty_page_for_writepage(wbc, page); 225 out: 226 unlock_page(page); 227 return 0; 228 } 229 230 /** 231 * gfs2_writepages - Write a bunch of dirty pages back to disk 232 * @mapping: The mapping to write 233 * @wbc: Write-back control 234 * 235 * Used for both ordered and writeback modes. 236 */ 237 static int gfs2_writepages(struct address_space *mapping, 238 struct writeback_control *wbc) 239 { 240 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 241 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 242 243 /* 244 * Even if we didn't write any pages here, we might still be holding 245 * dirty pages in the ail. We forcibly flush the ail because we don't 246 * want balance_dirty_pages() to loop indefinitely trying to write out 247 * pages held in the ail that it can't find. 248 */ 249 if (ret == 0) 250 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 251 252 return ret; 253 } 254 255 /** 256 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 257 * @mapping: The mapping 258 * @wbc: The writeback control 259 * @pvec: The vector of pages 260 * @nr_pages: The number of pages to write 261 * @done_index: Page index 262 * 263 * Returns: non-zero if loop should terminate, zero otherwise 264 */ 265 266 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 267 struct writeback_control *wbc, 268 struct pagevec *pvec, 269 int nr_pages, 270 pgoff_t *done_index) 271 { 272 struct inode *inode = mapping->host; 273 struct gfs2_sbd *sdp = GFS2_SB(inode); 274 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 275 int i; 276 int ret; 277 278 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 279 if (ret < 0) 280 return ret; 281 282 for(i = 0; i < nr_pages; i++) { 283 struct page *page = pvec->pages[i]; 284 285 *done_index = page->index; 286 287 lock_page(page); 288 289 if (unlikely(page->mapping != mapping)) { 290 continue_unlock: 291 unlock_page(page); 292 continue; 293 } 294 295 if (!PageDirty(page)) { 296 /* someone wrote it for us */ 297 goto continue_unlock; 298 } 299 300 if (PageWriteback(page)) { 301 if (wbc->sync_mode != WB_SYNC_NONE) 302 wait_on_page_writeback(page); 303 else 304 goto continue_unlock; 305 } 306 307 BUG_ON(PageWriteback(page)); 308 if (!clear_page_dirty_for_io(page)) 309 goto continue_unlock; 310 311 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 312 313 ret = __gfs2_jdata_writepage(page, wbc); 314 if (unlikely(ret)) { 315 if (ret == AOP_WRITEPAGE_ACTIVATE) { 316 unlock_page(page); 317 ret = 0; 318 } else { 319 320 /* 321 * done_index is set past this page, 322 * so media errors will not choke 323 * background writeout for the entire 324 * file. This has consequences for 325 * range_cyclic semantics (ie. it may 326 * not be suitable for data integrity 327 * writeout). 328 */ 329 *done_index = page->index + 1; 330 ret = 1; 331 break; 332 } 333 } 334 335 /* 336 * We stop writing back only if we are not doing 337 * integrity sync. In case of integrity sync we have to 338 * keep going until we have written all the pages 339 * we tagged for writeback prior to entering this loop. 340 */ 341 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 342 ret = 1; 343 break; 344 } 345 346 } 347 gfs2_trans_end(sdp); 348 return ret; 349 } 350 351 /** 352 * gfs2_write_cache_jdata - Like write_cache_pages but different 353 * @mapping: The mapping to write 354 * @wbc: The writeback control 355 * 356 * The reason that we use our own function here is that we need to 357 * start transactions before we grab page locks. This allows us 358 * to get the ordering right. 359 */ 360 361 static int gfs2_write_cache_jdata(struct address_space *mapping, 362 struct writeback_control *wbc) 363 { 364 int ret = 0; 365 int done = 0; 366 struct pagevec pvec; 367 int nr_pages; 368 pgoff_t uninitialized_var(writeback_index); 369 pgoff_t index; 370 pgoff_t end; 371 pgoff_t done_index; 372 int cycled; 373 int range_whole = 0; 374 int tag; 375 376 pagevec_init(&pvec); 377 if (wbc->range_cyclic) { 378 writeback_index = mapping->writeback_index; /* prev offset */ 379 index = writeback_index; 380 if (index == 0) 381 cycled = 1; 382 else 383 cycled = 0; 384 end = -1; 385 } else { 386 index = wbc->range_start >> PAGE_SHIFT; 387 end = wbc->range_end >> PAGE_SHIFT; 388 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 389 range_whole = 1; 390 cycled = 1; /* ignore range_cyclic tests */ 391 } 392 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 393 tag = PAGECACHE_TAG_TOWRITE; 394 else 395 tag = PAGECACHE_TAG_DIRTY; 396 397 retry: 398 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 399 tag_pages_for_writeback(mapping, index, end); 400 done_index = index; 401 while (!done && (index <= end)) { 402 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 403 tag); 404 if (nr_pages == 0) 405 break; 406 407 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 408 if (ret) 409 done = 1; 410 if (ret > 0) 411 ret = 0; 412 pagevec_release(&pvec); 413 cond_resched(); 414 } 415 416 if (!cycled && !done) { 417 /* 418 * range_cyclic: 419 * We hit the last page and there is more work to be done: wrap 420 * back to the start of the file 421 */ 422 cycled = 1; 423 index = 0; 424 end = writeback_index - 1; 425 goto retry; 426 } 427 428 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 429 mapping->writeback_index = done_index; 430 431 return ret; 432 } 433 434 435 /** 436 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 437 * @mapping: The mapping to write 438 * @wbc: The writeback control 439 * 440 */ 441 442 static int gfs2_jdata_writepages(struct address_space *mapping, 443 struct writeback_control *wbc) 444 { 445 struct gfs2_inode *ip = GFS2_I(mapping->host); 446 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 447 int ret; 448 449 ret = gfs2_write_cache_jdata(mapping, wbc); 450 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 451 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 452 ret = gfs2_write_cache_jdata(mapping, wbc); 453 } 454 return ret; 455 } 456 457 /** 458 * stuffed_readpage - Fill in a Linux page with stuffed file data 459 * @ip: the inode 460 * @page: the page 461 * 462 * Returns: errno 463 */ 464 465 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 466 { 467 struct buffer_head *dibh; 468 u64 dsize = i_size_read(&ip->i_inode); 469 void *kaddr; 470 int error; 471 472 /* 473 * Due to the order of unstuffing files and ->fault(), we can be 474 * asked for a zero page in the case of a stuffed file being extended, 475 * so we need to supply one here. It doesn't happen often. 476 */ 477 if (unlikely(page->index)) { 478 zero_user(page, 0, PAGE_SIZE); 479 SetPageUptodate(page); 480 return 0; 481 } 482 483 error = gfs2_meta_inode_buffer(ip, &dibh); 484 if (error) 485 return error; 486 487 kaddr = kmap_atomic(page); 488 if (dsize > gfs2_max_stuffed_size(ip)) 489 dsize = gfs2_max_stuffed_size(ip); 490 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 491 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 492 kunmap_atomic(kaddr); 493 flush_dcache_page(page); 494 brelse(dibh); 495 SetPageUptodate(page); 496 497 return 0; 498 } 499 500 501 /** 502 * __gfs2_readpage - readpage 503 * @file: The file to read a page for 504 * @page: The page to read 505 * 506 * This is the core of gfs2's readpage. It's used by the internal file 507 * reading code as in that case we already hold the glock. Also it's 508 * called by gfs2_readpage() once the required lock has been granted. 509 */ 510 511 static int __gfs2_readpage(void *file, struct page *page) 512 { 513 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 514 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 515 int error; 516 517 if (gfs2_is_stuffed(ip)) { 518 error = stuffed_readpage(ip, page); 519 unlock_page(page); 520 } else { 521 error = mpage_readpage(page, gfs2_block_map); 522 } 523 524 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 525 return -EIO; 526 527 return error; 528 } 529 530 /** 531 * gfs2_readpage - read a page of a file 532 * @file: The file to read 533 * @page: The page of the file 534 * 535 * This deals with the locking required. We have to unlock and 536 * relock the page in order to get the locking in the right 537 * order. 538 */ 539 540 static int gfs2_readpage(struct file *file, struct page *page) 541 { 542 struct address_space *mapping = page->mapping; 543 struct gfs2_inode *ip = GFS2_I(mapping->host); 544 struct gfs2_holder gh; 545 int error; 546 547 unlock_page(page); 548 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 549 error = gfs2_glock_nq(&gh); 550 if (unlikely(error)) 551 goto out; 552 error = AOP_TRUNCATED_PAGE; 553 lock_page(page); 554 if (page->mapping == mapping && !PageUptodate(page)) 555 error = __gfs2_readpage(file, page); 556 else 557 unlock_page(page); 558 gfs2_glock_dq(&gh); 559 out: 560 gfs2_holder_uninit(&gh); 561 if (error && error != AOP_TRUNCATED_PAGE) 562 lock_page(page); 563 return error; 564 } 565 566 /** 567 * gfs2_internal_read - read an internal file 568 * @ip: The gfs2 inode 569 * @buf: The buffer to fill 570 * @pos: The file position 571 * @size: The amount to read 572 * 573 */ 574 575 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 576 unsigned size) 577 { 578 struct address_space *mapping = ip->i_inode.i_mapping; 579 unsigned long index = *pos / PAGE_SIZE; 580 unsigned offset = *pos & (PAGE_SIZE - 1); 581 unsigned copied = 0; 582 unsigned amt; 583 struct page *page; 584 void *p; 585 586 do { 587 amt = size - copied; 588 if (offset + size > PAGE_SIZE) 589 amt = PAGE_SIZE - offset; 590 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 591 if (IS_ERR(page)) 592 return PTR_ERR(page); 593 p = kmap_atomic(page); 594 memcpy(buf + copied, p + offset, amt); 595 kunmap_atomic(p); 596 put_page(page); 597 copied += amt; 598 index++; 599 offset = 0; 600 } while(copied < size); 601 (*pos) += size; 602 return size; 603 } 604 605 /** 606 * gfs2_readpages - Read a bunch of pages at once 607 * @file: The file to read from 608 * @mapping: Address space info 609 * @pages: List of pages to read 610 * @nr_pages: Number of pages to read 611 * 612 * Some notes: 613 * 1. This is only for readahead, so we can simply ignore any things 614 * which are slightly inconvenient (such as locking conflicts between 615 * the page lock and the glock) and return having done no I/O. Its 616 * obviously not something we'd want to do on too regular a basis. 617 * Any I/O we ignore at this time will be done via readpage later. 618 * 2. We don't handle stuffed files here we let readpage do the honours. 619 * 3. mpage_readpages() does most of the heavy lifting in the common case. 620 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 621 */ 622 623 static int gfs2_readpages(struct file *file, struct address_space *mapping, 624 struct list_head *pages, unsigned nr_pages) 625 { 626 struct inode *inode = mapping->host; 627 struct gfs2_inode *ip = GFS2_I(inode); 628 struct gfs2_sbd *sdp = GFS2_SB(inode); 629 struct gfs2_holder gh; 630 int ret; 631 632 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 633 ret = gfs2_glock_nq(&gh); 634 if (unlikely(ret)) 635 goto out_uninit; 636 if (!gfs2_is_stuffed(ip)) 637 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 638 gfs2_glock_dq(&gh); 639 out_uninit: 640 gfs2_holder_uninit(&gh); 641 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 642 ret = -EIO; 643 return ret; 644 } 645 646 /** 647 * gfs2_write_begin - Begin to write to a file 648 * @file: The file to write to 649 * @mapping: The mapping in which to write 650 * @pos: The file offset at which to start writing 651 * @len: Length of the write 652 * @flags: Various flags 653 * @pagep: Pointer to return the page 654 * @fsdata: Pointer to return fs data (unused by GFS2) 655 * 656 * Returns: errno 657 */ 658 659 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 660 loff_t pos, unsigned len, unsigned flags, 661 struct page **pagep, void **fsdata) 662 { 663 struct gfs2_inode *ip = GFS2_I(mapping->host); 664 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 665 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 666 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 667 unsigned requested = 0; 668 int alloc_required; 669 int error = 0; 670 pgoff_t index = pos >> PAGE_SHIFT; 671 unsigned from = pos & (PAGE_SIZE - 1); 672 struct page *page; 673 674 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 675 error = gfs2_glock_nq(&ip->i_gh); 676 if (unlikely(error)) 677 goto out_uninit; 678 if (&ip->i_inode == sdp->sd_rindex) { 679 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 680 GL_NOCACHE, &m_ip->i_gh); 681 if (unlikely(error)) { 682 gfs2_glock_dq(&ip->i_gh); 683 goto out_uninit; 684 } 685 } 686 687 alloc_required = gfs2_write_alloc_required(ip, pos, len); 688 689 if (alloc_required || gfs2_is_jdata(ip)) 690 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 691 692 if (alloc_required) { 693 struct gfs2_alloc_parms ap = { .aflags = 0, }; 694 requested = data_blocks + ind_blocks; 695 ap.target = requested; 696 error = gfs2_quota_lock_check(ip, &ap); 697 if (error) 698 goto out_unlock; 699 700 error = gfs2_inplace_reserve(ip, &ap); 701 if (error) 702 goto out_qunlock; 703 } 704 705 rblocks = RES_DINODE + ind_blocks; 706 if (gfs2_is_jdata(ip)) 707 rblocks += data_blocks ? data_blocks : 1; 708 if (ind_blocks || data_blocks) 709 rblocks += RES_STATFS + RES_QUOTA; 710 if (&ip->i_inode == sdp->sd_rindex) 711 rblocks += 2 * RES_STATFS; 712 if (alloc_required) 713 rblocks += gfs2_rg_blocks(ip, requested); 714 715 error = gfs2_trans_begin(sdp, rblocks, 716 PAGE_SIZE/sdp->sd_sb.sb_bsize); 717 if (error) 718 goto out_trans_fail; 719 720 error = -ENOMEM; 721 flags |= AOP_FLAG_NOFS; 722 page = grab_cache_page_write_begin(mapping, index, flags); 723 *pagep = page; 724 if (unlikely(!page)) 725 goto out_endtrans; 726 727 if (gfs2_is_stuffed(ip)) { 728 error = 0; 729 if (pos + len > gfs2_max_stuffed_size(ip)) { 730 error = gfs2_unstuff_dinode(ip, page); 731 if (error == 0) 732 goto prepare_write; 733 } else if (!PageUptodate(page)) { 734 error = stuffed_readpage(ip, page); 735 } 736 goto out; 737 } 738 739 prepare_write: 740 error = __block_write_begin(page, from, len, gfs2_block_map); 741 out: 742 if (error == 0) 743 return 0; 744 745 unlock_page(page); 746 put_page(page); 747 748 gfs2_trans_end(sdp); 749 if (pos + len > ip->i_inode.i_size) 750 gfs2_trim_blocks(&ip->i_inode); 751 goto out_trans_fail; 752 753 out_endtrans: 754 gfs2_trans_end(sdp); 755 out_trans_fail: 756 if (alloc_required) { 757 gfs2_inplace_release(ip); 758 out_qunlock: 759 gfs2_quota_unlock(ip); 760 } 761 out_unlock: 762 if (&ip->i_inode == sdp->sd_rindex) { 763 gfs2_glock_dq(&m_ip->i_gh); 764 gfs2_holder_uninit(&m_ip->i_gh); 765 } 766 gfs2_glock_dq(&ip->i_gh); 767 out_uninit: 768 gfs2_holder_uninit(&ip->i_gh); 769 return error; 770 } 771 772 /** 773 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 774 * @inode: the rindex inode 775 */ 776 static void adjust_fs_space(struct inode *inode) 777 { 778 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 779 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 780 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 781 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 782 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 783 struct buffer_head *m_bh, *l_bh; 784 u64 fs_total, new_free; 785 786 /* Total up the file system space, according to the latest rindex. */ 787 fs_total = gfs2_ri_total(sdp); 788 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 789 return; 790 791 spin_lock(&sdp->sd_statfs_spin); 792 gfs2_statfs_change_in(m_sc, m_bh->b_data + 793 sizeof(struct gfs2_dinode)); 794 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 795 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 796 else 797 new_free = 0; 798 spin_unlock(&sdp->sd_statfs_spin); 799 fs_warn(sdp, "File system extended by %llu blocks.\n", 800 (unsigned long long)new_free); 801 gfs2_statfs_change(sdp, new_free, new_free, 0); 802 803 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 804 goto out; 805 update_statfs(sdp, m_bh, l_bh); 806 brelse(l_bh); 807 out: 808 brelse(m_bh); 809 } 810 811 /** 812 * gfs2_stuffed_write_end - Write end for stuffed files 813 * @inode: The inode 814 * @dibh: The buffer_head containing the on-disk inode 815 * @pos: The file position 816 * @len: The length of the write 817 * @copied: How much was actually copied by the VFS 818 * @page: The page 819 * 820 * This copies the data from the page into the inode block after 821 * the inode data structure itself. 822 * 823 * Returns: errno 824 */ 825 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 826 loff_t pos, unsigned len, unsigned copied, 827 struct page *page) 828 { 829 struct gfs2_inode *ip = GFS2_I(inode); 830 struct gfs2_sbd *sdp = GFS2_SB(inode); 831 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 832 u64 to = pos + copied; 833 void *kaddr; 834 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 835 836 BUG_ON(pos + len > gfs2_max_stuffed_size(ip)); 837 838 kaddr = kmap_atomic(page); 839 memcpy(buf + pos, kaddr + pos, copied); 840 flush_dcache_page(page); 841 kunmap_atomic(kaddr); 842 843 WARN_ON(!PageUptodate(page)); 844 unlock_page(page); 845 put_page(page); 846 847 if (copied) { 848 if (inode->i_size < to) 849 i_size_write(inode, to); 850 mark_inode_dirty(inode); 851 } 852 853 if (inode == sdp->sd_rindex) { 854 adjust_fs_space(inode); 855 sdp->sd_rindex_uptodate = 0; 856 } 857 858 brelse(dibh); 859 gfs2_trans_end(sdp); 860 if (inode == sdp->sd_rindex) { 861 gfs2_glock_dq(&m_ip->i_gh); 862 gfs2_holder_uninit(&m_ip->i_gh); 863 } 864 gfs2_glock_dq(&ip->i_gh); 865 gfs2_holder_uninit(&ip->i_gh); 866 return copied; 867 } 868 869 /** 870 * gfs2_write_end 871 * @file: The file to write to 872 * @mapping: The address space to write to 873 * @pos: The file position 874 * @len: The length of the data 875 * @copied: How much was actually copied by the VFS 876 * @page: The page that has been written 877 * @fsdata: The fsdata (unused in GFS2) 878 * 879 * The main write_end function for GFS2. We have a separate one for 880 * stuffed files as they are slightly different, otherwise we just 881 * put our locking around the VFS provided functions. 882 * 883 * Returns: errno 884 */ 885 886 static int gfs2_write_end(struct file *file, struct address_space *mapping, 887 loff_t pos, unsigned len, unsigned copied, 888 struct page *page, void *fsdata) 889 { 890 struct inode *inode = page->mapping->host; 891 struct gfs2_inode *ip = GFS2_I(inode); 892 struct gfs2_sbd *sdp = GFS2_SB(inode); 893 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 894 struct buffer_head *dibh; 895 int ret; 896 struct gfs2_trans *tr = current->journal_info; 897 BUG_ON(!tr); 898 899 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 900 901 ret = gfs2_meta_inode_buffer(ip, &dibh); 902 if (unlikely(ret)) { 903 unlock_page(page); 904 put_page(page); 905 goto failed; 906 } 907 908 if (gfs2_is_stuffed(ip)) 909 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 910 911 if (!gfs2_is_writeback(ip)) 912 gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len); 913 914 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 915 if (tr->tr_num_buf_new) 916 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 917 else 918 gfs2_trans_add_meta(ip->i_gl, dibh); 919 920 921 if (inode == sdp->sd_rindex) { 922 adjust_fs_space(inode); 923 sdp->sd_rindex_uptodate = 0; 924 } 925 926 brelse(dibh); 927 failed: 928 gfs2_trans_end(sdp); 929 gfs2_inplace_release(ip); 930 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 931 gfs2_quota_unlock(ip); 932 if (inode == sdp->sd_rindex) { 933 gfs2_glock_dq(&m_ip->i_gh); 934 gfs2_holder_uninit(&m_ip->i_gh); 935 } 936 gfs2_glock_dq(&ip->i_gh); 937 gfs2_holder_uninit(&ip->i_gh); 938 return ret; 939 } 940 941 /** 942 * gfs2_set_page_dirty - Page dirtying function 943 * @page: The page to dirty 944 * 945 * Returns: 1 if it dirtyed the page, or 0 otherwise 946 */ 947 948 static int gfs2_set_page_dirty(struct page *page) 949 { 950 SetPageChecked(page); 951 return __set_page_dirty_buffers(page); 952 } 953 954 /** 955 * gfs2_bmap - Block map function 956 * @mapping: Address space info 957 * @lblock: The block to map 958 * 959 * Returns: The disk address for the block or 0 on hole or error 960 */ 961 962 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 963 { 964 struct gfs2_inode *ip = GFS2_I(mapping->host); 965 struct gfs2_holder i_gh; 966 sector_t dblock = 0; 967 int error; 968 969 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 970 if (error) 971 return 0; 972 973 if (!gfs2_is_stuffed(ip)) 974 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 975 976 gfs2_glock_dq_uninit(&i_gh); 977 978 return dblock; 979 } 980 981 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 982 { 983 struct gfs2_bufdata *bd; 984 985 lock_buffer(bh); 986 gfs2_log_lock(sdp); 987 clear_buffer_dirty(bh); 988 bd = bh->b_private; 989 if (bd) { 990 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 991 list_del_init(&bd->bd_list); 992 else 993 gfs2_remove_from_journal(bh, REMOVE_JDATA); 994 } 995 bh->b_bdev = NULL; 996 clear_buffer_mapped(bh); 997 clear_buffer_req(bh); 998 clear_buffer_new(bh); 999 gfs2_log_unlock(sdp); 1000 unlock_buffer(bh); 1001 } 1002 1003 static void gfs2_invalidatepage(struct page *page, unsigned int offset, 1004 unsigned int length) 1005 { 1006 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 1007 unsigned int stop = offset + length; 1008 int partial_page = (offset || length < PAGE_SIZE); 1009 struct buffer_head *bh, *head; 1010 unsigned long pos = 0; 1011 1012 BUG_ON(!PageLocked(page)); 1013 if (!partial_page) 1014 ClearPageChecked(page); 1015 if (!page_has_buffers(page)) 1016 goto out; 1017 1018 bh = head = page_buffers(page); 1019 do { 1020 if (pos + bh->b_size > stop) 1021 return; 1022 1023 if (offset <= pos) 1024 gfs2_discard(sdp, bh); 1025 pos += bh->b_size; 1026 bh = bh->b_this_page; 1027 } while (bh != head); 1028 out: 1029 if (!partial_page) 1030 try_to_release_page(page, 0); 1031 } 1032 1033 /** 1034 * gfs2_ok_for_dio - check that dio is valid on this file 1035 * @ip: The inode 1036 * @offset: The offset at which we are reading or writing 1037 * 1038 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 1039 * 1 (to accept the i/o request) 1040 */ 1041 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) 1042 { 1043 /* 1044 * Should we return an error here? I can't see that O_DIRECT for 1045 * a stuffed file makes any sense. For now we'll silently fall 1046 * back to buffered I/O 1047 */ 1048 if (gfs2_is_stuffed(ip)) 1049 return 0; 1050 1051 if (offset >= i_size_read(&ip->i_inode)) 1052 return 0; 1053 return 1; 1054 } 1055 1056 1057 1058 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1059 { 1060 struct file *file = iocb->ki_filp; 1061 struct inode *inode = file->f_mapping->host; 1062 struct address_space *mapping = inode->i_mapping; 1063 struct gfs2_inode *ip = GFS2_I(inode); 1064 loff_t offset = iocb->ki_pos; 1065 struct gfs2_holder gh; 1066 int rv; 1067 1068 /* 1069 * Deferred lock, even if its a write, since we do no allocation 1070 * on this path. All we need change is atime, and this lock mode 1071 * ensures that other nodes have flushed their buffered read caches 1072 * (i.e. their page cache entries for this inode). We do not, 1073 * unfortunately have the option of only flushing a range like 1074 * the VFS does. 1075 */ 1076 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1077 rv = gfs2_glock_nq(&gh); 1078 if (rv) 1079 goto out_uninit; 1080 rv = gfs2_ok_for_dio(ip, offset); 1081 if (rv != 1) 1082 goto out; /* dio not valid, fall back to buffered i/o */ 1083 1084 /* 1085 * Now since we are holding a deferred (CW) lock at this point, you 1086 * might be wondering why this is ever needed. There is a case however 1087 * where we've granted a deferred local lock against a cached exclusive 1088 * glock. That is ok provided all granted local locks are deferred, but 1089 * it also means that it is possible to encounter pages which are 1090 * cached and possibly also mapped. So here we check for that and sort 1091 * them out ahead of the dio. The glock state machine will take care of 1092 * everything else. 1093 * 1094 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in 1095 * the first place, mapping->nr_pages will always be zero. 1096 */ 1097 if (mapping->nrpages) { 1098 loff_t lstart = offset & ~(PAGE_SIZE - 1); 1099 loff_t len = iov_iter_count(iter); 1100 loff_t end = PAGE_ALIGN(offset + len) - 1; 1101 1102 rv = 0; 1103 if (len == 0) 1104 goto out; 1105 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 1106 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); 1107 rv = filemap_write_and_wait_range(mapping, lstart, end); 1108 if (rv) 1109 goto out; 1110 if (iov_iter_rw(iter) == WRITE) 1111 truncate_inode_pages_range(mapping, lstart, end); 1112 } 1113 1114 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 1115 gfs2_get_block_direct, NULL, NULL, 0); 1116 out: 1117 gfs2_glock_dq(&gh); 1118 out_uninit: 1119 gfs2_holder_uninit(&gh); 1120 return rv; 1121 } 1122 1123 /** 1124 * gfs2_releasepage - free the metadata associated with a page 1125 * @page: the page that's being released 1126 * @gfp_mask: passed from Linux VFS, ignored by us 1127 * 1128 * Call try_to_free_buffers() if the buffers in this page can be 1129 * released. 1130 * 1131 * Returns: 0 1132 */ 1133 1134 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1135 { 1136 struct address_space *mapping = page->mapping; 1137 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1138 struct buffer_head *bh, *head; 1139 struct gfs2_bufdata *bd; 1140 1141 if (!page_has_buffers(page)) 1142 return 0; 1143 1144 /* 1145 * From xfs_vm_releasepage: mm accommodates an old ext3 case where 1146 * clean pages might not have had the dirty bit cleared. Thus, it can 1147 * send actual dirty pages to ->releasepage() via shrink_active_list(). 1148 * 1149 * As a workaround, we skip pages that contain dirty buffers below. 1150 * Once ->releasepage isn't called on dirty pages anymore, we can warn 1151 * on dirty buffers like we used to here again. 1152 */ 1153 1154 gfs2_log_lock(sdp); 1155 spin_lock(&sdp->sd_ail_lock); 1156 head = bh = page_buffers(page); 1157 do { 1158 if (atomic_read(&bh->b_count)) 1159 goto cannot_release; 1160 bd = bh->b_private; 1161 if (bd && bd->bd_tr) 1162 goto cannot_release; 1163 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 1164 goto cannot_release; 1165 bh = bh->b_this_page; 1166 } while(bh != head); 1167 spin_unlock(&sdp->sd_ail_lock); 1168 1169 head = bh = page_buffers(page); 1170 do { 1171 bd = bh->b_private; 1172 if (bd) { 1173 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1174 if (!list_empty(&bd->bd_list)) 1175 list_del_init(&bd->bd_list); 1176 bd->bd_bh = NULL; 1177 bh->b_private = NULL; 1178 kmem_cache_free(gfs2_bufdata_cachep, bd); 1179 } 1180 1181 bh = bh->b_this_page; 1182 } while (bh != head); 1183 gfs2_log_unlock(sdp); 1184 1185 return try_to_free_buffers(page); 1186 1187 cannot_release: 1188 spin_unlock(&sdp->sd_ail_lock); 1189 gfs2_log_unlock(sdp); 1190 return 0; 1191 } 1192 1193 static const struct address_space_operations gfs2_writeback_aops = { 1194 .writepage = gfs2_writepage, 1195 .writepages = gfs2_writepages, 1196 .readpage = gfs2_readpage, 1197 .readpages = gfs2_readpages, 1198 .write_begin = gfs2_write_begin, 1199 .write_end = gfs2_write_end, 1200 .bmap = gfs2_bmap, 1201 .invalidatepage = gfs2_invalidatepage, 1202 .releasepage = gfs2_releasepage, 1203 .direct_IO = gfs2_direct_IO, 1204 .migratepage = buffer_migrate_page, 1205 .is_partially_uptodate = block_is_partially_uptodate, 1206 .error_remove_page = generic_error_remove_page, 1207 }; 1208 1209 static const struct address_space_operations gfs2_ordered_aops = { 1210 .writepage = gfs2_writepage, 1211 .writepages = gfs2_writepages, 1212 .readpage = gfs2_readpage, 1213 .readpages = gfs2_readpages, 1214 .write_begin = gfs2_write_begin, 1215 .write_end = gfs2_write_end, 1216 .set_page_dirty = gfs2_set_page_dirty, 1217 .bmap = gfs2_bmap, 1218 .invalidatepage = gfs2_invalidatepage, 1219 .releasepage = gfs2_releasepage, 1220 .direct_IO = gfs2_direct_IO, 1221 .migratepage = buffer_migrate_page, 1222 .is_partially_uptodate = block_is_partially_uptodate, 1223 .error_remove_page = generic_error_remove_page, 1224 }; 1225 1226 static const struct address_space_operations gfs2_jdata_aops = { 1227 .writepage = gfs2_jdata_writepage, 1228 .writepages = gfs2_jdata_writepages, 1229 .readpage = gfs2_readpage, 1230 .readpages = gfs2_readpages, 1231 .write_begin = gfs2_write_begin, 1232 .write_end = gfs2_write_end, 1233 .set_page_dirty = gfs2_set_page_dirty, 1234 .bmap = gfs2_bmap, 1235 .invalidatepage = gfs2_invalidatepage, 1236 .releasepage = gfs2_releasepage, 1237 .is_partially_uptodate = block_is_partially_uptodate, 1238 .error_remove_page = generic_error_remove_page, 1239 }; 1240 1241 void gfs2_set_aops(struct inode *inode) 1242 { 1243 struct gfs2_inode *ip = GFS2_I(inode); 1244 1245 if (gfs2_is_writeback(ip)) 1246 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1247 else if (gfs2_is_ordered(ip)) 1248 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1249 else if (gfs2_is_jdata(ip)) 1250 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1251 else 1252 BUG(); 1253 } 1254 1255