1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 #include <linux/uio.h> 24 #include <trace/events/writeback.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "quota.h" 34 #include "trans.h" 35 #include "rgrp.h" 36 #include "super.h" 37 #include "util.h" 38 #include "glops.h" 39 40 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 42 unsigned int from, unsigned int to) 43 { 44 struct buffer_head *head = page_buffers(page); 45 unsigned int bsize = head->b_size; 46 struct buffer_head *bh; 47 unsigned int start, end; 48 49 for (bh = head, start = 0; bh != head || !start; 50 bh = bh->b_this_page, start = end) { 51 end = start + bsize; 52 if (end <= from || start >= to) 53 continue; 54 if (gfs2_is_jdata(ip)) 55 set_buffer_uptodate(bh); 56 gfs2_trans_add_data(ip->i_gl, bh); 57 } 58 } 59 60 /** 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 62 * @inode: The inode 63 * @lblock: The block number to look up 64 * @bh_result: The buffer head to return the result in 65 * @create: Non-zero if we may add block to the file 66 * 67 * Returns: errno 68 */ 69 70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 71 struct buffer_head *bh_result, int create) 72 { 73 int error; 74 75 error = gfs2_block_map(inode, lblock, bh_result, 0); 76 if (error) 77 return error; 78 if (!buffer_mapped(bh_result)) 79 return -EIO; 80 return 0; 81 } 82 83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 84 struct buffer_head *bh_result, int create) 85 { 86 return gfs2_block_map(inode, lblock, bh_result, 0); 87 } 88 89 /** 90 * gfs2_writepage_common - Common bits of writepage 91 * @page: The page to be written 92 * @wbc: The writeback control 93 * 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 95 */ 96 97 static int gfs2_writepage_common(struct page *page, 98 struct writeback_control *wbc) 99 { 100 struct inode *inode = page->mapping->host; 101 struct gfs2_inode *ip = GFS2_I(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode); 103 loff_t i_size = i_size_read(inode); 104 pgoff_t end_index = i_size >> PAGE_SHIFT; 105 unsigned offset; 106 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 108 goto out; 109 if (current->journal_info) 110 goto redirty; 111 /* Is the page fully outside i_size? (truncate in progress) */ 112 offset = i_size & (PAGE_SIZE-1); 113 if (page->index > end_index || (page->index == end_index && !offset)) { 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 115 goto out; 116 } 117 return 1; 118 redirty: 119 redirty_page_for_writepage(wbc, page); 120 out: 121 unlock_page(page); 122 return 0; 123 } 124 125 /** 126 * gfs2_writepage - Write page for writeback mappings 127 * @page: The page 128 * @wbc: The writeback control 129 * 130 */ 131 132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 133 { 134 int ret; 135 136 ret = gfs2_writepage_common(page, wbc); 137 if (ret <= 0) 138 return ret; 139 140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 141 } 142 143 /* This is the same as calling block_write_full_page, but it also 144 * writes pages outside of i_size 145 */ 146 static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 147 struct writeback_control *wbc) 148 { 149 struct inode * const inode = page->mapping->host; 150 loff_t i_size = i_size_read(inode); 151 const pgoff_t end_index = i_size >> PAGE_SHIFT; 152 unsigned offset; 153 154 /* 155 * The page straddles i_size. It must be zeroed out on each and every 156 * writepage invocation because it may be mmapped. "A file is mapped 157 * in multiples of the page size. For a file that is not a multiple of 158 * the page size, the remaining memory is zeroed when mapped, and 159 * writes to that region are not written out to the file." 160 */ 161 offset = i_size & (PAGE_SIZE-1); 162 if (page->index == end_index && offset) 163 zero_user_segment(page, offset, PAGE_SIZE); 164 165 return __block_write_full_page(inode, page, get_block, wbc, 166 end_buffer_async_write); 167 } 168 169 /** 170 * __gfs2_jdata_writepage - The core of jdata writepage 171 * @page: The page to write 172 * @wbc: The writeback control 173 * 174 * This is shared between writepage and writepages and implements the 175 * core of the writepage operation. If a transaction is required then 176 * PageChecked will have been set and the transaction will have 177 * already been started before this is called. 178 */ 179 180 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 181 { 182 struct inode *inode = page->mapping->host; 183 struct gfs2_inode *ip = GFS2_I(inode); 184 struct gfs2_sbd *sdp = GFS2_SB(inode); 185 186 if (PageChecked(page)) { 187 ClearPageChecked(page); 188 if (!page_has_buffers(page)) { 189 create_empty_buffers(page, inode->i_sb->s_blocksize, 190 BIT(BH_Dirty)|BIT(BH_Uptodate)); 191 } 192 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); 193 } 194 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 195 } 196 197 /** 198 * gfs2_jdata_writepage - Write complete page 199 * @page: Page to write 200 * @wbc: The writeback control 201 * 202 * Returns: errno 203 * 204 */ 205 206 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 207 { 208 struct inode *inode = page->mapping->host; 209 struct gfs2_inode *ip = GFS2_I(inode); 210 struct gfs2_sbd *sdp = GFS2_SB(inode); 211 int ret; 212 213 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 214 goto out; 215 if (PageChecked(page) || current->journal_info) 216 goto out_ignore; 217 ret = __gfs2_jdata_writepage(page, wbc); 218 return ret; 219 220 out_ignore: 221 redirty_page_for_writepage(wbc, page); 222 out: 223 unlock_page(page); 224 return 0; 225 } 226 227 /** 228 * gfs2_writepages - Write a bunch of dirty pages back to disk 229 * @mapping: The mapping to write 230 * @wbc: Write-back control 231 * 232 * Used for both ordered and writeback modes. 233 */ 234 static int gfs2_writepages(struct address_space *mapping, 235 struct writeback_control *wbc) 236 { 237 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 238 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 239 240 /* 241 * Even if we didn't write any pages here, we might still be holding 242 * dirty pages in the ail. We forcibly flush the ail because we don't 243 * want balance_dirty_pages() to loop indefinitely trying to write out 244 * pages held in the ail that it can't find. 245 */ 246 if (ret == 0) 247 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 248 249 return ret; 250 } 251 252 /** 253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 254 * @mapping: The mapping 255 * @wbc: The writeback control 256 * @pvec: The vector of pages 257 * @nr_pages: The number of pages to write 258 * @done_index: Page index 259 * 260 * Returns: non-zero if loop should terminate, zero otherwise 261 */ 262 263 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 264 struct writeback_control *wbc, 265 struct pagevec *pvec, 266 int nr_pages, 267 pgoff_t *done_index) 268 { 269 struct inode *inode = mapping->host; 270 struct gfs2_sbd *sdp = GFS2_SB(inode); 271 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 272 int i; 273 int ret; 274 275 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 276 if (ret < 0) 277 return ret; 278 279 for(i = 0; i < nr_pages; i++) { 280 struct page *page = pvec->pages[i]; 281 282 *done_index = page->index; 283 284 lock_page(page); 285 286 if (unlikely(page->mapping != mapping)) { 287 continue_unlock: 288 unlock_page(page); 289 continue; 290 } 291 292 if (!PageDirty(page)) { 293 /* someone wrote it for us */ 294 goto continue_unlock; 295 } 296 297 if (PageWriteback(page)) { 298 if (wbc->sync_mode != WB_SYNC_NONE) 299 wait_on_page_writeback(page); 300 else 301 goto continue_unlock; 302 } 303 304 BUG_ON(PageWriteback(page)); 305 if (!clear_page_dirty_for_io(page)) 306 goto continue_unlock; 307 308 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 309 310 ret = __gfs2_jdata_writepage(page, wbc); 311 if (unlikely(ret)) { 312 if (ret == AOP_WRITEPAGE_ACTIVATE) { 313 unlock_page(page); 314 ret = 0; 315 } else { 316 317 /* 318 * done_index is set past this page, 319 * so media errors will not choke 320 * background writeout for the entire 321 * file. This has consequences for 322 * range_cyclic semantics (ie. it may 323 * not be suitable for data integrity 324 * writeout). 325 */ 326 *done_index = page->index + 1; 327 ret = 1; 328 break; 329 } 330 } 331 332 /* 333 * We stop writing back only if we are not doing 334 * integrity sync. In case of integrity sync we have to 335 * keep going until we have written all the pages 336 * we tagged for writeback prior to entering this loop. 337 */ 338 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 339 ret = 1; 340 break; 341 } 342 343 } 344 gfs2_trans_end(sdp); 345 return ret; 346 } 347 348 /** 349 * gfs2_write_cache_jdata - Like write_cache_pages but different 350 * @mapping: The mapping to write 351 * @wbc: The writeback control 352 * 353 * The reason that we use our own function here is that we need to 354 * start transactions before we grab page locks. This allows us 355 * to get the ordering right. 356 */ 357 358 static int gfs2_write_cache_jdata(struct address_space *mapping, 359 struct writeback_control *wbc) 360 { 361 int ret = 0; 362 int done = 0; 363 struct pagevec pvec; 364 int nr_pages; 365 pgoff_t uninitialized_var(writeback_index); 366 pgoff_t index; 367 pgoff_t end; 368 pgoff_t done_index; 369 int cycled; 370 int range_whole = 0; 371 int tag; 372 373 pagevec_init(&pvec); 374 if (wbc->range_cyclic) { 375 writeback_index = mapping->writeback_index; /* prev offset */ 376 index = writeback_index; 377 if (index == 0) 378 cycled = 1; 379 else 380 cycled = 0; 381 end = -1; 382 } else { 383 index = wbc->range_start >> PAGE_SHIFT; 384 end = wbc->range_end >> PAGE_SHIFT; 385 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 386 range_whole = 1; 387 cycled = 1; /* ignore range_cyclic tests */ 388 } 389 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 390 tag = PAGECACHE_TAG_TOWRITE; 391 else 392 tag = PAGECACHE_TAG_DIRTY; 393 394 retry: 395 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 396 tag_pages_for_writeback(mapping, index, end); 397 done_index = index; 398 while (!done && (index <= end)) { 399 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 400 tag); 401 if (nr_pages == 0) 402 break; 403 404 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 405 if (ret) 406 done = 1; 407 if (ret > 0) 408 ret = 0; 409 pagevec_release(&pvec); 410 cond_resched(); 411 } 412 413 if (!cycled && !done) { 414 /* 415 * range_cyclic: 416 * We hit the last page and there is more work to be done: wrap 417 * back to the start of the file 418 */ 419 cycled = 1; 420 index = 0; 421 end = writeback_index - 1; 422 goto retry; 423 } 424 425 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 426 mapping->writeback_index = done_index; 427 428 return ret; 429 } 430 431 432 /** 433 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 434 * @mapping: The mapping to write 435 * @wbc: The writeback control 436 * 437 */ 438 439 static int gfs2_jdata_writepages(struct address_space *mapping, 440 struct writeback_control *wbc) 441 { 442 struct gfs2_inode *ip = GFS2_I(mapping->host); 443 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 444 int ret; 445 446 ret = gfs2_write_cache_jdata(mapping, wbc); 447 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 448 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 449 ret = gfs2_write_cache_jdata(mapping, wbc); 450 } 451 return ret; 452 } 453 454 /** 455 * stuffed_readpage - Fill in a Linux page with stuffed file data 456 * @ip: the inode 457 * @page: the page 458 * 459 * Returns: errno 460 */ 461 462 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 463 { 464 struct buffer_head *dibh; 465 u64 dsize = i_size_read(&ip->i_inode); 466 void *kaddr; 467 int error; 468 469 /* 470 * Due to the order of unstuffing files and ->fault(), we can be 471 * asked for a zero page in the case of a stuffed file being extended, 472 * so we need to supply one here. It doesn't happen often. 473 */ 474 if (unlikely(page->index)) { 475 zero_user(page, 0, PAGE_SIZE); 476 SetPageUptodate(page); 477 return 0; 478 } 479 480 error = gfs2_meta_inode_buffer(ip, &dibh); 481 if (error) 482 return error; 483 484 kaddr = kmap_atomic(page); 485 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 486 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 487 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 488 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 489 kunmap_atomic(kaddr); 490 flush_dcache_page(page); 491 brelse(dibh); 492 SetPageUptodate(page); 493 494 return 0; 495 } 496 497 498 /** 499 * __gfs2_readpage - readpage 500 * @file: The file to read a page for 501 * @page: The page to read 502 * 503 * This is the core of gfs2's readpage. It's used by the internal file 504 * reading code as in that case we already hold the glock. Also it's 505 * called by gfs2_readpage() once the required lock has been granted. 506 */ 507 508 static int __gfs2_readpage(void *file, struct page *page) 509 { 510 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 511 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 512 int error; 513 514 if (gfs2_is_stuffed(ip)) { 515 error = stuffed_readpage(ip, page); 516 unlock_page(page); 517 } else { 518 error = mpage_readpage(page, gfs2_block_map); 519 } 520 521 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 522 return -EIO; 523 524 return error; 525 } 526 527 /** 528 * gfs2_readpage - read a page of a file 529 * @file: The file to read 530 * @page: The page of the file 531 * 532 * This deals with the locking required. We have to unlock and 533 * relock the page in order to get the locking in the right 534 * order. 535 */ 536 537 static int gfs2_readpage(struct file *file, struct page *page) 538 { 539 struct address_space *mapping = page->mapping; 540 struct gfs2_inode *ip = GFS2_I(mapping->host); 541 struct gfs2_holder gh; 542 int error; 543 544 unlock_page(page); 545 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 546 error = gfs2_glock_nq(&gh); 547 if (unlikely(error)) 548 goto out; 549 error = AOP_TRUNCATED_PAGE; 550 lock_page(page); 551 if (page->mapping == mapping && !PageUptodate(page)) 552 error = __gfs2_readpage(file, page); 553 else 554 unlock_page(page); 555 gfs2_glock_dq(&gh); 556 out: 557 gfs2_holder_uninit(&gh); 558 if (error && error != AOP_TRUNCATED_PAGE) 559 lock_page(page); 560 return error; 561 } 562 563 /** 564 * gfs2_internal_read - read an internal file 565 * @ip: The gfs2 inode 566 * @buf: The buffer to fill 567 * @pos: The file position 568 * @size: The amount to read 569 * 570 */ 571 572 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 573 unsigned size) 574 { 575 struct address_space *mapping = ip->i_inode.i_mapping; 576 unsigned long index = *pos / PAGE_SIZE; 577 unsigned offset = *pos & (PAGE_SIZE - 1); 578 unsigned copied = 0; 579 unsigned amt; 580 struct page *page; 581 void *p; 582 583 do { 584 amt = size - copied; 585 if (offset + size > PAGE_SIZE) 586 amt = PAGE_SIZE - offset; 587 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 588 if (IS_ERR(page)) 589 return PTR_ERR(page); 590 p = kmap_atomic(page); 591 memcpy(buf + copied, p + offset, amt); 592 kunmap_atomic(p); 593 put_page(page); 594 copied += amt; 595 index++; 596 offset = 0; 597 } while(copied < size); 598 (*pos) += size; 599 return size; 600 } 601 602 /** 603 * gfs2_readpages - Read a bunch of pages at once 604 * @file: The file to read from 605 * @mapping: Address space info 606 * @pages: List of pages to read 607 * @nr_pages: Number of pages to read 608 * 609 * Some notes: 610 * 1. This is only for readahead, so we can simply ignore any things 611 * which are slightly inconvenient (such as locking conflicts between 612 * the page lock and the glock) and return having done no I/O. Its 613 * obviously not something we'd want to do on too regular a basis. 614 * Any I/O we ignore at this time will be done via readpage later. 615 * 2. We don't handle stuffed files here we let readpage do the honours. 616 * 3. mpage_readpages() does most of the heavy lifting in the common case. 617 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 618 */ 619 620 static int gfs2_readpages(struct file *file, struct address_space *mapping, 621 struct list_head *pages, unsigned nr_pages) 622 { 623 struct inode *inode = mapping->host; 624 struct gfs2_inode *ip = GFS2_I(inode); 625 struct gfs2_sbd *sdp = GFS2_SB(inode); 626 struct gfs2_holder gh; 627 int ret; 628 629 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 630 ret = gfs2_glock_nq(&gh); 631 if (unlikely(ret)) 632 goto out_uninit; 633 if (!gfs2_is_stuffed(ip)) 634 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 635 gfs2_glock_dq(&gh); 636 out_uninit: 637 gfs2_holder_uninit(&gh); 638 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 639 ret = -EIO; 640 return ret; 641 } 642 643 /** 644 * gfs2_write_begin - Begin to write to a file 645 * @file: The file to write to 646 * @mapping: The mapping in which to write 647 * @pos: The file offset at which to start writing 648 * @len: Length of the write 649 * @flags: Various flags 650 * @pagep: Pointer to return the page 651 * @fsdata: Pointer to return fs data (unused by GFS2) 652 * 653 * Returns: errno 654 */ 655 656 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 657 loff_t pos, unsigned len, unsigned flags, 658 struct page **pagep, void **fsdata) 659 { 660 struct gfs2_inode *ip = GFS2_I(mapping->host); 661 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 662 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 663 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 664 unsigned requested = 0; 665 int alloc_required; 666 int error = 0; 667 pgoff_t index = pos >> PAGE_SHIFT; 668 unsigned from = pos & (PAGE_SIZE - 1); 669 struct page *page; 670 671 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 672 error = gfs2_glock_nq(&ip->i_gh); 673 if (unlikely(error)) 674 goto out_uninit; 675 if (&ip->i_inode == sdp->sd_rindex) { 676 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 677 GL_NOCACHE, &m_ip->i_gh); 678 if (unlikely(error)) { 679 gfs2_glock_dq(&ip->i_gh); 680 goto out_uninit; 681 } 682 } 683 684 alloc_required = gfs2_write_alloc_required(ip, pos, len); 685 686 if (alloc_required || gfs2_is_jdata(ip)) 687 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 688 689 if (alloc_required) { 690 struct gfs2_alloc_parms ap = { .aflags = 0, }; 691 requested = data_blocks + ind_blocks; 692 ap.target = requested; 693 error = gfs2_quota_lock_check(ip, &ap); 694 if (error) 695 goto out_unlock; 696 697 error = gfs2_inplace_reserve(ip, &ap); 698 if (error) 699 goto out_qunlock; 700 } 701 702 rblocks = RES_DINODE + ind_blocks; 703 if (gfs2_is_jdata(ip)) 704 rblocks += data_blocks ? data_blocks : 1; 705 if (ind_blocks || data_blocks) 706 rblocks += RES_STATFS + RES_QUOTA; 707 if (&ip->i_inode == sdp->sd_rindex) 708 rblocks += 2 * RES_STATFS; 709 if (alloc_required) 710 rblocks += gfs2_rg_blocks(ip, requested); 711 712 error = gfs2_trans_begin(sdp, rblocks, 713 PAGE_SIZE/sdp->sd_sb.sb_bsize); 714 if (error) 715 goto out_trans_fail; 716 717 error = -ENOMEM; 718 flags |= AOP_FLAG_NOFS; 719 page = grab_cache_page_write_begin(mapping, index, flags); 720 *pagep = page; 721 if (unlikely(!page)) 722 goto out_endtrans; 723 724 if (gfs2_is_stuffed(ip)) { 725 error = 0; 726 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 727 error = gfs2_unstuff_dinode(ip, page); 728 if (error == 0) 729 goto prepare_write; 730 } else if (!PageUptodate(page)) { 731 error = stuffed_readpage(ip, page); 732 } 733 goto out; 734 } 735 736 prepare_write: 737 error = __block_write_begin(page, from, len, gfs2_block_map); 738 out: 739 if (error == 0) 740 return 0; 741 742 unlock_page(page); 743 put_page(page); 744 745 gfs2_trans_end(sdp); 746 if (pos + len > ip->i_inode.i_size) 747 gfs2_trim_blocks(&ip->i_inode); 748 goto out_trans_fail; 749 750 out_endtrans: 751 gfs2_trans_end(sdp); 752 out_trans_fail: 753 if (alloc_required) { 754 gfs2_inplace_release(ip); 755 out_qunlock: 756 gfs2_quota_unlock(ip); 757 } 758 out_unlock: 759 if (&ip->i_inode == sdp->sd_rindex) { 760 gfs2_glock_dq(&m_ip->i_gh); 761 gfs2_holder_uninit(&m_ip->i_gh); 762 } 763 gfs2_glock_dq(&ip->i_gh); 764 out_uninit: 765 gfs2_holder_uninit(&ip->i_gh); 766 return error; 767 } 768 769 /** 770 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 771 * @inode: the rindex inode 772 */ 773 static void adjust_fs_space(struct inode *inode) 774 { 775 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 776 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 777 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 778 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 779 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 780 struct buffer_head *m_bh, *l_bh; 781 u64 fs_total, new_free; 782 783 /* Total up the file system space, according to the latest rindex. */ 784 fs_total = gfs2_ri_total(sdp); 785 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 786 return; 787 788 spin_lock(&sdp->sd_statfs_spin); 789 gfs2_statfs_change_in(m_sc, m_bh->b_data + 790 sizeof(struct gfs2_dinode)); 791 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 792 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 793 else 794 new_free = 0; 795 spin_unlock(&sdp->sd_statfs_spin); 796 fs_warn(sdp, "File system extended by %llu blocks.\n", 797 (unsigned long long)new_free); 798 gfs2_statfs_change(sdp, new_free, new_free, 0); 799 800 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 801 goto out; 802 update_statfs(sdp, m_bh, l_bh); 803 brelse(l_bh); 804 out: 805 brelse(m_bh); 806 } 807 808 /** 809 * gfs2_stuffed_write_end - Write end for stuffed files 810 * @inode: The inode 811 * @dibh: The buffer_head containing the on-disk inode 812 * @pos: The file position 813 * @len: The length of the write 814 * @copied: How much was actually copied by the VFS 815 * @page: The page 816 * 817 * This copies the data from the page into the inode block after 818 * the inode data structure itself. 819 * 820 * Returns: errno 821 */ 822 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 823 loff_t pos, unsigned len, unsigned copied, 824 struct page *page) 825 { 826 struct gfs2_inode *ip = GFS2_I(inode); 827 struct gfs2_sbd *sdp = GFS2_SB(inode); 828 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 829 u64 to = pos + copied; 830 void *kaddr; 831 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 832 833 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 834 kaddr = kmap_atomic(page); 835 memcpy(buf + pos, kaddr + pos, copied); 836 flush_dcache_page(page); 837 kunmap_atomic(kaddr); 838 839 WARN_ON(!PageUptodate(page)); 840 unlock_page(page); 841 put_page(page); 842 843 if (copied) { 844 if (inode->i_size < to) 845 i_size_write(inode, to); 846 mark_inode_dirty(inode); 847 } 848 849 if (inode == sdp->sd_rindex) { 850 adjust_fs_space(inode); 851 sdp->sd_rindex_uptodate = 0; 852 } 853 854 brelse(dibh); 855 gfs2_trans_end(sdp); 856 if (inode == sdp->sd_rindex) { 857 gfs2_glock_dq(&m_ip->i_gh); 858 gfs2_holder_uninit(&m_ip->i_gh); 859 } 860 gfs2_glock_dq(&ip->i_gh); 861 gfs2_holder_uninit(&ip->i_gh); 862 return copied; 863 } 864 865 /** 866 * gfs2_write_end 867 * @file: The file to write to 868 * @mapping: The address space to write to 869 * @pos: The file position 870 * @len: The length of the data 871 * @copied: How much was actually copied by the VFS 872 * @page: The page that has been written 873 * @fsdata: The fsdata (unused in GFS2) 874 * 875 * The main write_end function for GFS2. We have a separate one for 876 * stuffed files as they are slightly different, otherwise we just 877 * put our locking around the VFS provided functions. 878 * 879 * Returns: errno 880 */ 881 882 static int gfs2_write_end(struct file *file, struct address_space *mapping, 883 loff_t pos, unsigned len, unsigned copied, 884 struct page *page, void *fsdata) 885 { 886 struct inode *inode = page->mapping->host; 887 struct gfs2_inode *ip = GFS2_I(inode); 888 struct gfs2_sbd *sdp = GFS2_SB(inode); 889 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 890 struct buffer_head *dibh; 891 unsigned int from = pos & (PAGE_SIZE - 1); 892 unsigned int to = from + len; 893 int ret; 894 struct gfs2_trans *tr = current->journal_info; 895 BUG_ON(!tr); 896 897 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 898 899 ret = gfs2_meta_inode_buffer(ip, &dibh); 900 if (unlikely(ret)) { 901 unlock_page(page); 902 put_page(page); 903 goto failed; 904 } 905 906 if (gfs2_is_stuffed(ip)) 907 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 908 909 if (!gfs2_is_writeback(ip)) 910 gfs2_page_add_databufs(ip, page, from, to); 911 912 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 913 if (tr->tr_num_buf_new) 914 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 915 else 916 gfs2_trans_add_meta(ip->i_gl, dibh); 917 918 919 if (inode == sdp->sd_rindex) { 920 adjust_fs_space(inode); 921 sdp->sd_rindex_uptodate = 0; 922 } 923 924 brelse(dibh); 925 failed: 926 gfs2_trans_end(sdp); 927 gfs2_inplace_release(ip); 928 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 929 gfs2_quota_unlock(ip); 930 if (inode == sdp->sd_rindex) { 931 gfs2_glock_dq(&m_ip->i_gh); 932 gfs2_holder_uninit(&m_ip->i_gh); 933 } 934 gfs2_glock_dq(&ip->i_gh); 935 gfs2_holder_uninit(&ip->i_gh); 936 return ret; 937 } 938 939 /** 940 * gfs2_set_page_dirty - Page dirtying function 941 * @page: The page to dirty 942 * 943 * Returns: 1 if it dirtyed the page, or 0 otherwise 944 */ 945 946 static int gfs2_set_page_dirty(struct page *page) 947 { 948 SetPageChecked(page); 949 return __set_page_dirty_buffers(page); 950 } 951 952 /** 953 * gfs2_bmap - Block map function 954 * @mapping: Address space info 955 * @lblock: The block to map 956 * 957 * Returns: The disk address for the block or 0 on hole or error 958 */ 959 960 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 961 { 962 struct gfs2_inode *ip = GFS2_I(mapping->host); 963 struct gfs2_holder i_gh; 964 sector_t dblock = 0; 965 int error; 966 967 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 968 if (error) 969 return 0; 970 971 if (!gfs2_is_stuffed(ip)) 972 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 973 974 gfs2_glock_dq_uninit(&i_gh); 975 976 return dblock; 977 } 978 979 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 980 { 981 struct gfs2_bufdata *bd; 982 983 lock_buffer(bh); 984 gfs2_log_lock(sdp); 985 clear_buffer_dirty(bh); 986 bd = bh->b_private; 987 if (bd) { 988 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 989 list_del_init(&bd->bd_list); 990 else 991 gfs2_remove_from_journal(bh, REMOVE_JDATA); 992 } 993 bh->b_bdev = NULL; 994 clear_buffer_mapped(bh); 995 clear_buffer_req(bh); 996 clear_buffer_new(bh); 997 gfs2_log_unlock(sdp); 998 unlock_buffer(bh); 999 } 1000 1001 static void gfs2_invalidatepage(struct page *page, unsigned int offset, 1002 unsigned int length) 1003 { 1004 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 1005 unsigned int stop = offset + length; 1006 int partial_page = (offset || length < PAGE_SIZE); 1007 struct buffer_head *bh, *head; 1008 unsigned long pos = 0; 1009 1010 BUG_ON(!PageLocked(page)); 1011 if (!partial_page) 1012 ClearPageChecked(page); 1013 if (!page_has_buffers(page)) 1014 goto out; 1015 1016 bh = head = page_buffers(page); 1017 do { 1018 if (pos + bh->b_size > stop) 1019 return; 1020 1021 if (offset <= pos) 1022 gfs2_discard(sdp, bh); 1023 pos += bh->b_size; 1024 bh = bh->b_this_page; 1025 } while (bh != head); 1026 out: 1027 if (!partial_page) 1028 try_to_release_page(page, 0); 1029 } 1030 1031 /** 1032 * gfs2_ok_for_dio - check that dio is valid on this file 1033 * @ip: The inode 1034 * @offset: The offset at which we are reading or writing 1035 * 1036 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 1037 * 1 (to accept the i/o request) 1038 */ 1039 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) 1040 { 1041 /* 1042 * Should we return an error here? I can't see that O_DIRECT for 1043 * a stuffed file makes any sense. For now we'll silently fall 1044 * back to buffered I/O 1045 */ 1046 if (gfs2_is_stuffed(ip)) 1047 return 0; 1048 1049 if (offset >= i_size_read(&ip->i_inode)) 1050 return 0; 1051 return 1; 1052 } 1053 1054 1055 1056 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1057 { 1058 struct file *file = iocb->ki_filp; 1059 struct inode *inode = file->f_mapping->host; 1060 struct address_space *mapping = inode->i_mapping; 1061 struct gfs2_inode *ip = GFS2_I(inode); 1062 loff_t offset = iocb->ki_pos; 1063 struct gfs2_holder gh; 1064 int rv; 1065 1066 /* 1067 * Deferred lock, even if its a write, since we do no allocation 1068 * on this path. All we need change is atime, and this lock mode 1069 * ensures that other nodes have flushed their buffered read caches 1070 * (i.e. their page cache entries for this inode). We do not, 1071 * unfortunately have the option of only flushing a range like 1072 * the VFS does. 1073 */ 1074 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1075 rv = gfs2_glock_nq(&gh); 1076 if (rv) 1077 goto out_uninit; 1078 rv = gfs2_ok_for_dio(ip, offset); 1079 if (rv != 1) 1080 goto out; /* dio not valid, fall back to buffered i/o */ 1081 1082 /* 1083 * Now since we are holding a deferred (CW) lock at this point, you 1084 * might be wondering why this is ever needed. There is a case however 1085 * where we've granted a deferred local lock against a cached exclusive 1086 * glock. That is ok provided all granted local locks are deferred, but 1087 * it also means that it is possible to encounter pages which are 1088 * cached and possibly also mapped. So here we check for that and sort 1089 * them out ahead of the dio. The glock state machine will take care of 1090 * everything else. 1091 * 1092 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in 1093 * the first place, mapping->nr_pages will always be zero. 1094 */ 1095 if (mapping->nrpages) { 1096 loff_t lstart = offset & ~(PAGE_SIZE - 1); 1097 loff_t len = iov_iter_count(iter); 1098 loff_t end = PAGE_ALIGN(offset + len) - 1; 1099 1100 rv = 0; 1101 if (len == 0) 1102 goto out; 1103 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 1104 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); 1105 rv = filemap_write_and_wait_range(mapping, lstart, end); 1106 if (rv) 1107 goto out; 1108 if (iov_iter_rw(iter) == WRITE) 1109 truncate_inode_pages_range(mapping, lstart, end); 1110 } 1111 1112 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 1113 gfs2_get_block_direct, NULL, NULL, 0); 1114 out: 1115 gfs2_glock_dq(&gh); 1116 out_uninit: 1117 gfs2_holder_uninit(&gh); 1118 return rv; 1119 } 1120 1121 /** 1122 * gfs2_releasepage - free the metadata associated with a page 1123 * @page: the page that's being released 1124 * @gfp_mask: passed from Linux VFS, ignored by us 1125 * 1126 * Call try_to_free_buffers() if the buffers in this page can be 1127 * released. 1128 * 1129 * Returns: 0 1130 */ 1131 1132 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1133 { 1134 struct address_space *mapping = page->mapping; 1135 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1136 struct buffer_head *bh, *head; 1137 struct gfs2_bufdata *bd; 1138 1139 if (!page_has_buffers(page)) 1140 return 0; 1141 1142 /* 1143 * From xfs_vm_releasepage: mm accommodates an old ext3 case where 1144 * clean pages might not have had the dirty bit cleared. Thus, it can 1145 * send actual dirty pages to ->releasepage() via shrink_active_list(). 1146 * 1147 * As a workaround, we skip pages that contain dirty buffers below. 1148 * Once ->releasepage isn't called on dirty pages anymore, we can warn 1149 * on dirty buffers like we used to here again. 1150 */ 1151 1152 gfs2_log_lock(sdp); 1153 spin_lock(&sdp->sd_ail_lock); 1154 head = bh = page_buffers(page); 1155 do { 1156 if (atomic_read(&bh->b_count)) 1157 goto cannot_release; 1158 bd = bh->b_private; 1159 if (bd && bd->bd_tr) 1160 goto cannot_release; 1161 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 1162 goto cannot_release; 1163 bh = bh->b_this_page; 1164 } while(bh != head); 1165 spin_unlock(&sdp->sd_ail_lock); 1166 1167 head = bh = page_buffers(page); 1168 do { 1169 bd = bh->b_private; 1170 if (bd) { 1171 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1172 if (!list_empty(&bd->bd_list)) 1173 list_del_init(&bd->bd_list); 1174 bd->bd_bh = NULL; 1175 bh->b_private = NULL; 1176 kmem_cache_free(gfs2_bufdata_cachep, bd); 1177 } 1178 1179 bh = bh->b_this_page; 1180 } while (bh != head); 1181 gfs2_log_unlock(sdp); 1182 1183 return try_to_free_buffers(page); 1184 1185 cannot_release: 1186 spin_unlock(&sdp->sd_ail_lock); 1187 gfs2_log_unlock(sdp); 1188 return 0; 1189 } 1190 1191 static const struct address_space_operations gfs2_writeback_aops = { 1192 .writepage = gfs2_writepage, 1193 .writepages = gfs2_writepages, 1194 .readpage = gfs2_readpage, 1195 .readpages = gfs2_readpages, 1196 .write_begin = gfs2_write_begin, 1197 .write_end = gfs2_write_end, 1198 .bmap = gfs2_bmap, 1199 .invalidatepage = gfs2_invalidatepage, 1200 .releasepage = gfs2_releasepage, 1201 .direct_IO = gfs2_direct_IO, 1202 .migratepage = buffer_migrate_page, 1203 .is_partially_uptodate = block_is_partially_uptodate, 1204 .error_remove_page = generic_error_remove_page, 1205 }; 1206 1207 static const struct address_space_operations gfs2_ordered_aops = { 1208 .writepage = gfs2_writepage, 1209 .writepages = gfs2_writepages, 1210 .readpage = gfs2_readpage, 1211 .readpages = gfs2_readpages, 1212 .write_begin = gfs2_write_begin, 1213 .write_end = gfs2_write_end, 1214 .set_page_dirty = gfs2_set_page_dirty, 1215 .bmap = gfs2_bmap, 1216 .invalidatepage = gfs2_invalidatepage, 1217 .releasepage = gfs2_releasepage, 1218 .direct_IO = gfs2_direct_IO, 1219 .migratepage = buffer_migrate_page, 1220 .is_partially_uptodate = block_is_partially_uptodate, 1221 .error_remove_page = generic_error_remove_page, 1222 }; 1223 1224 static const struct address_space_operations gfs2_jdata_aops = { 1225 .writepage = gfs2_jdata_writepage, 1226 .writepages = gfs2_jdata_writepages, 1227 .readpage = gfs2_readpage, 1228 .readpages = gfs2_readpages, 1229 .write_begin = gfs2_write_begin, 1230 .write_end = gfs2_write_end, 1231 .set_page_dirty = gfs2_set_page_dirty, 1232 .bmap = gfs2_bmap, 1233 .invalidatepage = gfs2_invalidatepage, 1234 .releasepage = gfs2_releasepage, 1235 .is_partially_uptodate = block_is_partially_uptodate, 1236 .error_remove_page = generic_error_remove_page, 1237 }; 1238 1239 void gfs2_set_aops(struct inode *inode) 1240 { 1241 struct gfs2_inode *ip = GFS2_I(inode); 1242 1243 if (gfs2_is_writeback(ip)) 1244 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1245 else if (gfs2_is_ordered(ip)) 1246 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1247 else if (gfs2_is_jdata(ip)) 1248 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1249 else 1250 BUG(); 1251 } 1252 1253