1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 #include <linux/uio.h> 24 #include <trace/events/writeback.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "quota.h" 34 #include "trans.h" 35 #include "rgrp.h" 36 #include "super.h" 37 #include "util.h" 38 #include "glops.h" 39 40 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 42 unsigned int from, unsigned int to) 43 { 44 struct buffer_head *head = page_buffers(page); 45 unsigned int bsize = head->b_size; 46 struct buffer_head *bh; 47 unsigned int start, end; 48 49 for (bh = head, start = 0; bh != head || !start; 50 bh = bh->b_this_page, start = end) { 51 end = start + bsize; 52 if (end <= from || start >= to) 53 continue; 54 if (gfs2_is_jdata(ip)) 55 set_buffer_uptodate(bh); 56 gfs2_trans_add_data(ip->i_gl, bh); 57 } 58 } 59 60 /** 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 62 * @inode: The inode 63 * @lblock: The block number to look up 64 * @bh_result: The buffer head to return the result in 65 * @create: Non-zero if we may add block to the file 66 * 67 * Returns: errno 68 */ 69 70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 71 struct buffer_head *bh_result, int create) 72 { 73 int error; 74 75 error = gfs2_block_map(inode, lblock, bh_result, 0); 76 if (error) 77 return error; 78 if (!buffer_mapped(bh_result)) 79 return -EIO; 80 return 0; 81 } 82 83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 84 struct buffer_head *bh_result, int create) 85 { 86 return gfs2_block_map(inode, lblock, bh_result, 0); 87 } 88 89 /** 90 * gfs2_writepage_common - Common bits of writepage 91 * @page: The page to be written 92 * @wbc: The writeback control 93 * 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 95 */ 96 97 static int gfs2_writepage_common(struct page *page, 98 struct writeback_control *wbc) 99 { 100 struct inode *inode = page->mapping->host; 101 struct gfs2_inode *ip = GFS2_I(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode); 103 loff_t i_size = i_size_read(inode); 104 pgoff_t end_index = i_size >> PAGE_SHIFT; 105 unsigned offset; 106 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 108 goto out; 109 if (current->journal_info) 110 goto redirty; 111 /* Is the page fully outside i_size? (truncate in progress) */ 112 offset = i_size & (PAGE_SIZE-1); 113 if (page->index > end_index || (page->index == end_index && !offset)) { 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 115 goto out; 116 } 117 return 1; 118 redirty: 119 redirty_page_for_writepage(wbc, page); 120 out: 121 unlock_page(page); 122 return 0; 123 } 124 125 /** 126 * gfs2_writepage - Write page for writeback mappings 127 * @page: The page 128 * @wbc: The writeback control 129 * 130 */ 131 132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 133 { 134 int ret; 135 136 ret = gfs2_writepage_common(page, wbc); 137 if (ret <= 0) 138 return ret; 139 140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 141 } 142 143 /* This is the same as calling block_write_full_page, but it also 144 * writes pages outside of i_size 145 */ 146 static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 147 struct writeback_control *wbc) 148 { 149 struct inode * const inode = page->mapping->host; 150 loff_t i_size = i_size_read(inode); 151 const pgoff_t end_index = i_size >> PAGE_SHIFT; 152 unsigned offset; 153 154 /* 155 * The page straddles i_size. It must be zeroed out on each and every 156 * writepage invocation because it may be mmapped. "A file is mapped 157 * in multiples of the page size. For a file that is not a multiple of 158 * the page size, the remaining memory is zeroed when mapped, and 159 * writes to that region are not written out to the file." 160 */ 161 offset = i_size & (PAGE_SIZE-1); 162 if (page->index == end_index && offset) 163 zero_user_segment(page, offset, PAGE_SIZE); 164 165 return __block_write_full_page(inode, page, get_block, wbc, 166 end_buffer_async_write); 167 } 168 169 /** 170 * __gfs2_jdata_writepage - The core of jdata writepage 171 * @page: The page to write 172 * @wbc: The writeback control 173 * 174 * This is shared between writepage and writepages and implements the 175 * core of the writepage operation. If a transaction is required then 176 * PageChecked will have been set and the transaction will have 177 * already been started before this is called. 178 */ 179 180 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 181 { 182 struct inode *inode = page->mapping->host; 183 struct gfs2_inode *ip = GFS2_I(inode); 184 struct gfs2_sbd *sdp = GFS2_SB(inode); 185 186 if (PageChecked(page)) { 187 ClearPageChecked(page); 188 if (!page_has_buffers(page)) { 189 create_empty_buffers(page, inode->i_sb->s_blocksize, 190 BIT(BH_Dirty)|BIT(BH_Uptodate)); 191 } 192 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); 193 } 194 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 195 } 196 197 /** 198 * gfs2_jdata_writepage - Write complete page 199 * @page: Page to write 200 * @wbc: The writeback control 201 * 202 * Returns: errno 203 * 204 */ 205 206 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 207 { 208 struct inode *inode = page->mapping->host; 209 struct gfs2_inode *ip = GFS2_I(inode); 210 struct gfs2_sbd *sdp = GFS2_SB(inode); 211 int ret; 212 213 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 214 goto out; 215 if (PageChecked(page) || current->journal_info) 216 goto out_ignore; 217 ret = __gfs2_jdata_writepage(page, wbc); 218 return ret; 219 220 out_ignore: 221 redirty_page_for_writepage(wbc, page); 222 out: 223 unlock_page(page); 224 return 0; 225 } 226 227 /** 228 * gfs2_writepages - Write a bunch of dirty pages back to disk 229 * @mapping: The mapping to write 230 * @wbc: Write-back control 231 * 232 * Used for both ordered and writeback modes. 233 */ 234 static int gfs2_writepages(struct address_space *mapping, 235 struct writeback_control *wbc) 236 { 237 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 238 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 239 240 /* 241 * Even if we didn't write any pages here, we might still be holding 242 * dirty pages in the ail. We forcibly flush the ail because we don't 243 * want balance_dirty_pages() to loop indefinitely trying to write out 244 * pages held in the ail that it can't find. 245 */ 246 if (ret == 0) 247 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 248 249 return ret; 250 } 251 252 /** 253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 254 * @mapping: The mapping 255 * @wbc: The writeback control 256 * @pvec: The vector of pages 257 * @nr_pages: The number of pages to write 258 * @end: End position 259 * @done_index: Page index 260 * 261 * Returns: non-zero if loop should terminate, zero otherwise 262 */ 263 264 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 265 struct writeback_control *wbc, 266 struct pagevec *pvec, 267 int nr_pages, pgoff_t end, 268 pgoff_t *done_index) 269 { 270 struct inode *inode = mapping->host; 271 struct gfs2_sbd *sdp = GFS2_SB(inode); 272 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 273 int i; 274 int ret; 275 276 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 277 if (ret < 0) 278 return ret; 279 280 for(i = 0; i < nr_pages; i++) { 281 struct page *page = pvec->pages[i]; 282 283 *done_index = page->index; 284 285 lock_page(page); 286 287 if (unlikely(page->mapping != mapping)) { 288 continue_unlock: 289 unlock_page(page); 290 continue; 291 } 292 293 if (!PageDirty(page)) { 294 /* someone wrote it for us */ 295 goto continue_unlock; 296 } 297 298 if (PageWriteback(page)) { 299 if (wbc->sync_mode != WB_SYNC_NONE) 300 wait_on_page_writeback(page); 301 else 302 goto continue_unlock; 303 } 304 305 BUG_ON(PageWriteback(page)); 306 if (!clear_page_dirty_for_io(page)) 307 goto continue_unlock; 308 309 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 310 311 ret = __gfs2_jdata_writepage(page, wbc); 312 if (unlikely(ret)) { 313 if (ret == AOP_WRITEPAGE_ACTIVATE) { 314 unlock_page(page); 315 ret = 0; 316 } else { 317 318 /* 319 * done_index is set past this page, 320 * so media errors will not choke 321 * background writeout for the entire 322 * file. This has consequences for 323 * range_cyclic semantics (ie. it may 324 * not be suitable for data integrity 325 * writeout). 326 */ 327 *done_index = page->index + 1; 328 ret = 1; 329 break; 330 } 331 } 332 333 /* 334 * We stop writing back only if we are not doing 335 * integrity sync. In case of integrity sync we have to 336 * keep going until we have written all the pages 337 * we tagged for writeback prior to entering this loop. 338 */ 339 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 340 ret = 1; 341 break; 342 } 343 344 } 345 gfs2_trans_end(sdp); 346 return ret; 347 } 348 349 /** 350 * gfs2_write_cache_jdata - Like write_cache_pages but different 351 * @mapping: The mapping to write 352 * @wbc: The writeback control 353 * 354 * The reason that we use our own function here is that we need to 355 * start transactions before we grab page locks. This allows us 356 * to get the ordering right. 357 */ 358 359 static int gfs2_write_cache_jdata(struct address_space *mapping, 360 struct writeback_control *wbc) 361 { 362 int ret = 0; 363 int done = 0; 364 struct pagevec pvec; 365 int nr_pages; 366 pgoff_t uninitialized_var(writeback_index); 367 pgoff_t index; 368 pgoff_t end; 369 pgoff_t done_index; 370 int cycled; 371 int range_whole = 0; 372 int tag; 373 374 pagevec_init(&pvec); 375 if (wbc->range_cyclic) { 376 writeback_index = mapping->writeback_index; /* prev offset */ 377 index = writeback_index; 378 if (index == 0) 379 cycled = 1; 380 else 381 cycled = 0; 382 end = -1; 383 } else { 384 index = wbc->range_start >> PAGE_SHIFT; 385 end = wbc->range_end >> PAGE_SHIFT; 386 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 387 range_whole = 1; 388 cycled = 1; /* ignore range_cyclic tests */ 389 } 390 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 391 tag = PAGECACHE_TAG_TOWRITE; 392 else 393 tag = PAGECACHE_TAG_DIRTY; 394 395 retry: 396 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 397 tag_pages_for_writeback(mapping, index, end); 398 done_index = index; 399 while (!done && (index <= end)) { 400 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 401 tag); 402 if (nr_pages == 0) 403 break; 404 405 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); 406 if (ret) 407 done = 1; 408 if (ret > 0) 409 ret = 0; 410 pagevec_release(&pvec); 411 cond_resched(); 412 } 413 414 if (!cycled && !done) { 415 /* 416 * range_cyclic: 417 * We hit the last page and there is more work to be done: wrap 418 * back to the start of the file 419 */ 420 cycled = 1; 421 index = 0; 422 end = writeback_index - 1; 423 goto retry; 424 } 425 426 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 427 mapping->writeback_index = done_index; 428 429 return ret; 430 } 431 432 433 /** 434 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 435 * @mapping: The mapping to write 436 * @wbc: The writeback control 437 * 438 */ 439 440 static int gfs2_jdata_writepages(struct address_space *mapping, 441 struct writeback_control *wbc) 442 { 443 struct gfs2_inode *ip = GFS2_I(mapping->host); 444 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 445 int ret; 446 447 ret = gfs2_write_cache_jdata(mapping, wbc); 448 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 449 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 450 ret = gfs2_write_cache_jdata(mapping, wbc); 451 } 452 return ret; 453 } 454 455 /** 456 * stuffed_readpage - Fill in a Linux page with stuffed file data 457 * @ip: the inode 458 * @page: the page 459 * 460 * Returns: errno 461 */ 462 463 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 464 { 465 struct buffer_head *dibh; 466 u64 dsize = i_size_read(&ip->i_inode); 467 void *kaddr; 468 int error; 469 470 /* 471 * Due to the order of unstuffing files and ->fault(), we can be 472 * asked for a zero page in the case of a stuffed file being extended, 473 * so we need to supply one here. It doesn't happen often. 474 */ 475 if (unlikely(page->index)) { 476 zero_user(page, 0, PAGE_SIZE); 477 SetPageUptodate(page); 478 return 0; 479 } 480 481 error = gfs2_meta_inode_buffer(ip, &dibh); 482 if (error) 483 return error; 484 485 kaddr = kmap_atomic(page); 486 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 487 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 488 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 489 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 490 kunmap_atomic(kaddr); 491 flush_dcache_page(page); 492 brelse(dibh); 493 SetPageUptodate(page); 494 495 return 0; 496 } 497 498 499 /** 500 * __gfs2_readpage - readpage 501 * @file: The file to read a page for 502 * @page: The page to read 503 * 504 * This is the core of gfs2's readpage. Its used by the internal file 505 * reading code as in that case we already hold the glock. Also its 506 * called by gfs2_readpage() once the required lock has been granted. 507 * 508 */ 509 510 static int __gfs2_readpage(void *file, struct page *page) 511 { 512 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 513 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 514 int error; 515 516 if (gfs2_is_stuffed(ip)) { 517 error = stuffed_readpage(ip, page); 518 unlock_page(page); 519 } else { 520 error = mpage_readpage(page, gfs2_block_map); 521 } 522 523 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 524 return -EIO; 525 526 return error; 527 } 528 529 /** 530 * gfs2_readpage - read a page of a file 531 * @file: The file to read 532 * @page: The page of the file 533 * 534 * This deals with the locking required. We have to unlock and 535 * relock the page in order to get the locking in the right 536 * order. 537 */ 538 539 static int gfs2_readpage(struct file *file, struct page *page) 540 { 541 struct address_space *mapping = page->mapping; 542 struct gfs2_inode *ip = GFS2_I(mapping->host); 543 struct gfs2_holder gh; 544 int error; 545 546 unlock_page(page); 547 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 548 error = gfs2_glock_nq(&gh); 549 if (unlikely(error)) 550 goto out; 551 error = AOP_TRUNCATED_PAGE; 552 lock_page(page); 553 if (page->mapping == mapping && !PageUptodate(page)) 554 error = __gfs2_readpage(file, page); 555 else 556 unlock_page(page); 557 gfs2_glock_dq(&gh); 558 out: 559 gfs2_holder_uninit(&gh); 560 if (error && error != AOP_TRUNCATED_PAGE) 561 lock_page(page); 562 return error; 563 } 564 565 /** 566 * gfs2_internal_read - read an internal file 567 * @ip: The gfs2 inode 568 * @buf: The buffer to fill 569 * @pos: The file position 570 * @size: The amount to read 571 * 572 */ 573 574 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 575 unsigned size) 576 { 577 struct address_space *mapping = ip->i_inode.i_mapping; 578 unsigned long index = *pos / PAGE_SIZE; 579 unsigned offset = *pos & (PAGE_SIZE - 1); 580 unsigned copied = 0; 581 unsigned amt; 582 struct page *page; 583 void *p; 584 585 do { 586 amt = size - copied; 587 if (offset + size > PAGE_SIZE) 588 amt = PAGE_SIZE - offset; 589 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 590 if (IS_ERR(page)) 591 return PTR_ERR(page); 592 p = kmap_atomic(page); 593 memcpy(buf + copied, p + offset, amt); 594 kunmap_atomic(p); 595 put_page(page); 596 copied += amt; 597 index++; 598 offset = 0; 599 } while(copied < size); 600 (*pos) += size; 601 return size; 602 } 603 604 /** 605 * gfs2_readpages - Read a bunch of pages at once 606 * @file: The file to read from 607 * @mapping: Address space info 608 * @pages: List of pages to read 609 * @nr_pages: Number of pages to read 610 * 611 * Some notes: 612 * 1. This is only for readahead, so we can simply ignore any things 613 * which are slightly inconvenient (such as locking conflicts between 614 * the page lock and the glock) and return having done no I/O. Its 615 * obviously not something we'd want to do on too regular a basis. 616 * Any I/O we ignore at this time will be done via readpage later. 617 * 2. We don't handle stuffed files here we let readpage do the honours. 618 * 3. mpage_readpages() does most of the heavy lifting in the common case. 619 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 620 */ 621 622 static int gfs2_readpages(struct file *file, struct address_space *mapping, 623 struct list_head *pages, unsigned nr_pages) 624 { 625 struct inode *inode = mapping->host; 626 struct gfs2_inode *ip = GFS2_I(inode); 627 struct gfs2_sbd *sdp = GFS2_SB(inode); 628 struct gfs2_holder gh; 629 int ret; 630 631 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 632 ret = gfs2_glock_nq(&gh); 633 if (unlikely(ret)) 634 goto out_uninit; 635 if (!gfs2_is_stuffed(ip)) 636 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 637 gfs2_glock_dq(&gh); 638 out_uninit: 639 gfs2_holder_uninit(&gh); 640 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 641 ret = -EIO; 642 return ret; 643 } 644 645 /** 646 * gfs2_write_begin - Begin to write to a file 647 * @file: The file to write to 648 * @mapping: The mapping in which to write 649 * @pos: The file offset at which to start writing 650 * @len: Length of the write 651 * @flags: Various flags 652 * @pagep: Pointer to return the page 653 * @fsdata: Pointer to return fs data (unused by GFS2) 654 * 655 * Returns: errno 656 */ 657 658 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 659 loff_t pos, unsigned len, unsigned flags, 660 struct page **pagep, void **fsdata) 661 { 662 struct gfs2_inode *ip = GFS2_I(mapping->host); 663 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 664 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 665 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 666 unsigned requested = 0; 667 int alloc_required; 668 int error = 0; 669 pgoff_t index = pos >> PAGE_SHIFT; 670 unsigned from = pos & (PAGE_SIZE - 1); 671 struct page *page; 672 673 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 674 error = gfs2_glock_nq(&ip->i_gh); 675 if (unlikely(error)) 676 goto out_uninit; 677 if (&ip->i_inode == sdp->sd_rindex) { 678 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 679 GL_NOCACHE, &m_ip->i_gh); 680 if (unlikely(error)) { 681 gfs2_glock_dq(&ip->i_gh); 682 goto out_uninit; 683 } 684 } 685 686 alloc_required = gfs2_write_alloc_required(ip, pos, len); 687 688 if (alloc_required || gfs2_is_jdata(ip)) 689 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 690 691 if (alloc_required) { 692 struct gfs2_alloc_parms ap = { .aflags = 0, }; 693 requested = data_blocks + ind_blocks; 694 ap.target = requested; 695 error = gfs2_quota_lock_check(ip, &ap); 696 if (error) 697 goto out_unlock; 698 699 error = gfs2_inplace_reserve(ip, &ap); 700 if (error) 701 goto out_qunlock; 702 } 703 704 rblocks = RES_DINODE + ind_blocks; 705 if (gfs2_is_jdata(ip)) 706 rblocks += data_blocks ? data_blocks : 1; 707 if (ind_blocks || data_blocks) 708 rblocks += RES_STATFS + RES_QUOTA; 709 if (&ip->i_inode == sdp->sd_rindex) 710 rblocks += 2 * RES_STATFS; 711 if (alloc_required) 712 rblocks += gfs2_rg_blocks(ip, requested); 713 714 error = gfs2_trans_begin(sdp, rblocks, 715 PAGE_SIZE/sdp->sd_sb.sb_bsize); 716 if (error) 717 goto out_trans_fail; 718 719 error = -ENOMEM; 720 flags |= AOP_FLAG_NOFS; 721 page = grab_cache_page_write_begin(mapping, index, flags); 722 *pagep = page; 723 if (unlikely(!page)) 724 goto out_endtrans; 725 726 if (gfs2_is_stuffed(ip)) { 727 error = 0; 728 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 729 error = gfs2_unstuff_dinode(ip, page); 730 if (error == 0) 731 goto prepare_write; 732 } else if (!PageUptodate(page)) { 733 error = stuffed_readpage(ip, page); 734 } 735 goto out; 736 } 737 738 prepare_write: 739 error = __block_write_begin(page, from, len, gfs2_block_map); 740 out: 741 if (error == 0) 742 return 0; 743 744 unlock_page(page); 745 put_page(page); 746 747 gfs2_trans_end(sdp); 748 if (pos + len > ip->i_inode.i_size) 749 gfs2_trim_blocks(&ip->i_inode); 750 goto out_trans_fail; 751 752 out_endtrans: 753 gfs2_trans_end(sdp); 754 out_trans_fail: 755 if (alloc_required) { 756 gfs2_inplace_release(ip); 757 out_qunlock: 758 gfs2_quota_unlock(ip); 759 } 760 out_unlock: 761 if (&ip->i_inode == sdp->sd_rindex) { 762 gfs2_glock_dq(&m_ip->i_gh); 763 gfs2_holder_uninit(&m_ip->i_gh); 764 } 765 gfs2_glock_dq(&ip->i_gh); 766 out_uninit: 767 gfs2_holder_uninit(&ip->i_gh); 768 return error; 769 } 770 771 /** 772 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 773 * @inode: the rindex inode 774 */ 775 static void adjust_fs_space(struct inode *inode) 776 { 777 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 778 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 779 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 780 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 781 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 782 struct buffer_head *m_bh, *l_bh; 783 u64 fs_total, new_free; 784 785 /* Total up the file system space, according to the latest rindex. */ 786 fs_total = gfs2_ri_total(sdp); 787 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 788 return; 789 790 spin_lock(&sdp->sd_statfs_spin); 791 gfs2_statfs_change_in(m_sc, m_bh->b_data + 792 sizeof(struct gfs2_dinode)); 793 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 794 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 795 else 796 new_free = 0; 797 spin_unlock(&sdp->sd_statfs_spin); 798 fs_warn(sdp, "File system extended by %llu blocks.\n", 799 (unsigned long long)new_free); 800 gfs2_statfs_change(sdp, new_free, new_free, 0); 801 802 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 803 goto out; 804 update_statfs(sdp, m_bh, l_bh); 805 brelse(l_bh); 806 out: 807 brelse(m_bh); 808 } 809 810 /** 811 * gfs2_stuffed_write_end - Write end for stuffed files 812 * @inode: The inode 813 * @dibh: The buffer_head containing the on-disk inode 814 * @pos: The file position 815 * @len: The length of the write 816 * @copied: How much was actually copied by the VFS 817 * @page: The page 818 * 819 * This copies the data from the page into the inode block after 820 * the inode data structure itself. 821 * 822 * Returns: errno 823 */ 824 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 825 loff_t pos, unsigned len, unsigned copied, 826 struct page *page) 827 { 828 struct gfs2_inode *ip = GFS2_I(inode); 829 struct gfs2_sbd *sdp = GFS2_SB(inode); 830 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 831 u64 to = pos + copied; 832 void *kaddr; 833 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 834 835 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 836 kaddr = kmap_atomic(page); 837 memcpy(buf + pos, kaddr + pos, copied); 838 flush_dcache_page(page); 839 kunmap_atomic(kaddr); 840 841 WARN_ON(!PageUptodate(page)); 842 unlock_page(page); 843 put_page(page); 844 845 if (copied) { 846 if (inode->i_size < to) 847 i_size_write(inode, to); 848 mark_inode_dirty(inode); 849 } 850 851 if (inode == sdp->sd_rindex) { 852 adjust_fs_space(inode); 853 sdp->sd_rindex_uptodate = 0; 854 } 855 856 brelse(dibh); 857 gfs2_trans_end(sdp); 858 if (inode == sdp->sd_rindex) { 859 gfs2_glock_dq(&m_ip->i_gh); 860 gfs2_holder_uninit(&m_ip->i_gh); 861 } 862 gfs2_glock_dq(&ip->i_gh); 863 gfs2_holder_uninit(&ip->i_gh); 864 return copied; 865 } 866 867 /** 868 * gfs2_write_end 869 * @file: The file to write to 870 * @mapping: The address space to write to 871 * @pos: The file position 872 * @len: The length of the data 873 * @copied: How much was actually copied by the VFS 874 * @page: The page that has been written 875 * @fsdata: The fsdata (unused in GFS2) 876 * 877 * The main write_end function for GFS2. We have a separate one for 878 * stuffed files as they are slightly different, otherwise we just 879 * put our locking around the VFS provided functions. 880 * 881 * Returns: errno 882 */ 883 884 static int gfs2_write_end(struct file *file, struct address_space *mapping, 885 loff_t pos, unsigned len, unsigned copied, 886 struct page *page, void *fsdata) 887 { 888 struct inode *inode = page->mapping->host; 889 struct gfs2_inode *ip = GFS2_I(inode); 890 struct gfs2_sbd *sdp = GFS2_SB(inode); 891 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 892 struct buffer_head *dibh; 893 unsigned int from = pos & (PAGE_SIZE - 1); 894 unsigned int to = from + len; 895 int ret; 896 struct gfs2_trans *tr = current->journal_info; 897 BUG_ON(!tr); 898 899 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 900 901 ret = gfs2_meta_inode_buffer(ip, &dibh); 902 if (unlikely(ret)) { 903 unlock_page(page); 904 put_page(page); 905 goto failed; 906 } 907 908 if (gfs2_is_stuffed(ip)) 909 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 910 911 if (!gfs2_is_writeback(ip)) 912 gfs2_page_add_databufs(ip, page, from, to); 913 914 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 915 if (tr->tr_num_buf_new) 916 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 917 else 918 gfs2_trans_add_meta(ip->i_gl, dibh); 919 920 921 if (inode == sdp->sd_rindex) { 922 adjust_fs_space(inode); 923 sdp->sd_rindex_uptodate = 0; 924 } 925 926 brelse(dibh); 927 failed: 928 gfs2_trans_end(sdp); 929 gfs2_inplace_release(ip); 930 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 931 gfs2_quota_unlock(ip); 932 if (inode == sdp->sd_rindex) { 933 gfs2_glock_dq(&m_ip->i_gh); 934 gfs2_holder_uninit(&m_ip->i_gh); 935 } 936 gfs2_glock_dq(&ip->i_gh); 937 gfs2_holder_uninit(&ip->i_gh); 938 return ret; 939 } 940 941 /** 942 * gfs2_set_page_dirty - Page dirtying function 943 * @page: The page to dirty 944 * 945 * Returns: 1 if it dirtyed the page, or 0 otherwise 946 */ 947 948 static int gfs2_set_page_dirty(struct page *page) 949 { 950 SetPageChecked(page); 951 return __set_page_dirty_buffers(page); 952 } 953 954 /** 955 * gfs2_bmap - Block map function 956 * @mapping: Address space info 957 * @lblock: The block to map 958 * 959 * Returns: The disk address for the block or 0 on hole or error 960 */ 961 962 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 963 { 964 struct gfs2_inode *ip = GFS2_I(mapping->host); 965 struct gfs2_holder i_gh; 966 sector_t dblock = 0; 967 int error; 968 969 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 970 if (error) 971 return 0; 972 973 if (!gfs2_is_stuffed(ip)) 974 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 975 976 gfs2_glock_dq_uninit(&i_gh); 977 978 return dblock; 979 } 980 981 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 982 { 983 struct gfs2_bufdata *bd; 984 985 lock_buffer(bh); 986 gfs2_log_lock(sdp); 987 clear_buffer_dirty(bh); 988 bd = bh->b_private; 989 if (bd) { 990 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 991 list_del_init(&bd->bd_list); 992 else 993 gfs2_remove_from_journal(bh, REMOVE_JDATA); 994 } 995 bh->b_bdev = NULL; 996 clear_buffer_mapped(bh); 997 clear_buffer_req(bh); 998 clear_buffer_new(bh); 999 gfs2_log_unlock(sdp); 1000 unlock_buffer(bh); 1001 } 1002 1003 static void gfs2_invalidatepage(struct page *page, unsigned int offset, 1004 unsigned int length) 1005 { 1006 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 1007 unsigned int stop = offset + length; 1008 int partial_page = (offset || length < PAGE_SIZE); 1009 struct buffer_head *bh, *head; 1010 unsigned long pos = 0; 1011 1012 BUG_ON(!PageLocked(page)); 1013 if (!partial_page) 1014 ClearPageChecked(page); 1015 if (!page_has_buffers(page)) 1016 goto out; 1017 1018 bh = head = page_buffers(page); 1019 do { 1020 if (pos + bh->b_size > stop) 1021 return; 1022 1023 if (offset <= pos) 1024 gfs2_discard(sdp, bh); 1025 pos += bh->b_size; 1026 bh = bh->b_this_page; 1027 } while (bh != head); 1028 out: 1029 if (!partial_page) 1030 try_to_release_page(page, 0); 1031 } 1032 1033 /** 1034 * gfs2_ok_for_dio - check that dio is valid on this file 1035 * @ip: The inode 1036 * @offset: The offset at which we are reading or writing 1037 * 1038 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 1039 * 1 (to accept the i/o request) 1040 */ 1041 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) 1042 { 1043 /* 1044 * Should we return an error here? I can't see that O_DIRECT for 1045 * a stuffed file makes any sense. For now we'll silently fall 1046 * back to buffered I/O 1047 */ 1048 if (gfs2_is_stuffed(ip)) 1049 return 0; 1050 1051 if (offset >= i_size_read(&ip->i_inode)) 1052 return 0; 1053 return 1; 1054 } 1055 1056 1057 1058 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1059 { 1060 struct file *file = iocb->ki_filp; 1061 struct inode *inode = file->f_mapping->host; 1062 struct address_space *mapping = inode->i_mapping; 1063 struct gfs2_inode *ip = GFS2_I(inode); 1064 loff_t offset = iocb->ki_pos; 1065 struct gfs2_holder gh; 1066 int rv; 1067 1068 /* 1069 * Deferred lock, even if its a write, since we do no allocation 1070 * on this path. All we need change is atime, and this lock mode 1071 * ensures that other nodes have flushed their buffered read caches 1072 * (i.e. their page cache entries for this inode). We do not, 1073 * unfortunately have the option of only flushing a range like 1074 * the VFS does. 1075 */ 1076 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1077 rv = gfs2_glock_nq(&gh); 1078 if (rv) 1079 goto out_uninit; 1080 rv = gfs2_ok_for_dio(ip, offset); 1081 if (rv != 1) 1082 goto out; /* dio not valid, fall back to buffered i/o */ 1083 1084 /* 1085 * Now since we are holding a deferred (CW) lock at this point, you 1086 * might be wondering why this is ever needed. There is a case however 1087 * where we've granted a deferred local lock against a cached exclusive 1088 * glock. That is ok provided all granted local locks are deferred, but 1089 * it also means that it is possible to encounter pages which are 1090 * cached and possibly also mapped. So here we check for that and sort 1091 * them out ahead of the dio. The glock state machine will take care of 1092 * everything else. 1093 * 1094 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in 1095 * the first place, mapping->nr_pages will always be zero. 1096 */ 1097 if (mapping->nrpages) { 1098 loff_t lstart = offset & ~(PAGE_SIZE - 1); 1099 loff_t len = iov_iter_count(iter); 1100 loff_t end = PAGE_ALIGN(offset + len) - 1; 1101 1102 rv = 0; 1103 if (len == 0) 1104 goto out; 1105 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 1106 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); 1107 rv = filemap_write_and_wait_range(mapping, lstart, end); 1108 if (rv) 1109 goto out; 1110 if (iov_iter_rw(iter) == WRITE) 1111 truncate_inode_pages_range(mapping, lstart, end); 1112 } 1113 1114 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 1115 gfs2_get_block_direct, NULL, NULL, 0); 1116 out: 1117 gfs2_glock_dq(&gh); 1118 out_uninit: 1119 gfs2_holder_uninit(&gh); 1120 return rv; 1121 } 1122 1123 /** 1124 * gfs2_releasepage - free the metadata associated with a page 1125 * @page: the page that's being released 1126 * @gfp_mask: passed from Linux VFS, ignored by us 1127 * 1128 * Call try_to_free_buffers() if the buffers in this page can be 1129 * released. 1130 * 1131 * Returns: 0 1132 */ 1133 1134 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1135 { 1136 struct address_space *mapping = page->mapping; 1137 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1138 struct buffer_head *bh, *head; 1139 struct gfs2_bufdata *bd; 1140 1141 if (!page_has_buffers(page)) 1142 return 0; 1143 1144 /* 1145 * From xfs_vm_releasepage: mm accommodates an old ext3 case where 1146 * clean pages might not have had the dirty bit cleared. Thus, it can 1147 * send actual dirty pages to ->releasepage() via shrink_active_list(). 1148 * 1149 * As a workaround, we skip pages that contain dirty buffers below. 1150 * Once ->releasepage isn't called on dirty pages anymore, we can warn 1151 * on dirty buffers like we used to here again. 1152 */ 1153 1154 gfs2_log_lock(sdp); 1155 spin_lock(&sdp->sd_ail_lock); 1156 head = bh = page_buffers(page); 1157 do { 1158 if (atomic_read(&bh->b_count)) 1159 goto cannot_release; 1160 bd = bh->b_private; 1161 if (bd && bd->bd_tr) 1162 goto cannot_release; 1163 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 1164 goto cannot_release; 1165 bh = bh->b_this_page; 1166 } while(bh != head); 1167 spin_unlock(&sdp->sd_ail_lock); 1168 1169 head = bh = page_buffers(page); 1170 do { 1171 bd = bh->b_private; 1172 if (bd) { 1173 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1174 if (!list_empty(&bd->bd_list)) 1175 list_del_init(&bd->bd_list); 1176 bd->bd_bh = NULL; 1177 bh->b_private = NULL; 1178 kmem_cache_free(gfs2_bufdata_cachep, bd); 1179 } 1180 1181 bh = bh->b_this_page; 1182 } while (bh != head); 1183 gfs2_log_unlock(sdp); 1184 1185 return try_to_free_buffers(page); 1186 1187 cannot_release: 1188 spin_unlock(&sdp->sd_ail_lock); 1189 gfs2_log_unlock(sdp); 1190 return 0; 1191 } 1192 1193 static const struct address_space_operations gfs2_writeback_aops = { 1194 .writepage = gfs2_writepage, 1195 .writepages = gfs2_writepages, 1196 .readpage = gfs2_readpage, 1197 .readpages = gfs2_readpages, 1198 .write_begin = gfs2_write_begin, 1199 .write_end = gfs2_write_end, 1200 .bmap = gfs2_bmap, 1201 .invalidatepage = gfs2_invalidatepage, 1202 .releasepage = gfs2_releasepage, 1203 .direct_IO = gfs2_direct_IO, 1204 .migratepage = buffer_migrate_page, 1205 .is_partially_uptodate = block_is_partially_uptodate, 1206 .error_remove_page = generic_error_remove_page, 1207 }; 1208 1209 static const struct address_space_operations gfs2_ordered_aops = { 1210 .writepage = gfs2_writepage, 1211 .writepages = gfs2_writepages, 1212 .readpage = gfs2_readpage, 1213 .readpages = gfs2_readpages, 1214 .write_begin = gfs2_write_begin, 1215 .write_end = gfs2_write_end, 1216 .set_page_dirty = gfs2_set_page_dirty, 1217 .bmap = gfs2_bmap, 1218 .invalidatepage = gfs2_invalidatepage, 1219 .releasepage = gfs2_releasepage, 1220 .direct_IO = gfs2_direct_IO, 1221 .migratepage = buffer_migrate_page, 1222 .is_partially_uptodate = block_is_partially_uptodate, 1223 .error_remove_page = generic_error_remove_page, 1224 }; 1225 1226 static const struct address_space_operations gfs2_jdata_aops = { 1227 .writepage = gfs2_jdata_writepage, 1228 .writepages = gfs2_jdata_writepages, 1229 .readpage = gfs2_readpage, 1230 .readpages = gfs2_readpages, 1231 .write_begin = gfs2_write_begin, 1232 .write_end = gfs2_write_end, 1233 .set_page_dirty = gfs2_set_page_dirty, 1234 .bmap = gfs2_bmap, 1235 .invalidatepage = gfs2_invalidatepage, 1236 .releasepage = gfs2_releasepage, 1237 .is_partially_uptodate = block_is_partially_uptodate, 1238 .error_remove_page = generic_error_remove_page, 1239 }; 1240 1241 void gfs2_set_aops(struct inode *inode) 1242 { 1243 struct gfs2_inode *ip = GFS2_I(inode); 1244 1245 if (gfs2_is_writeback(ip)) 1246 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1247 else if (gfs2_is_ordered(ip)) 1248 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1249 else if (gfs2_is_jdata(ip)) 1250 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1251 else 1252 BUG(); 1253 } 1254 1255