1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 #include <linux/uio.h> 24 #include <trace/events/writeback.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "quota.h" 34 #include "trans.h" 35 #include "rgrp.h" 36 #include "super.h" 37 #include "util.h" 38 #include "glops.h" 39 40 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 42 unsigned int from, unsigned int to) 43 { 44 struct buffer_head *head = page_buffers(page); 45 unsigned int bsize = head->b_size; 46 struct buffer_head *bh; 47 unsigned int start, end; 48 49 for (bh = head, start = 0; bh != head || !start; 50 bh = bh->b_this_page, start = end) { 51 end = start + bsize; 52 if (end <= from || start >= to) 53 continue; 54 if (gfs2_is_jdata(ip)) 55 set_buffer_uptodate(bh); 56 gfs2_trans_add_data(ip->i_gl, bh); 57 } 58 } 59 60 /** 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 62 * @inode: The inode 63 * @lblock: The block number to look up 64 * @bh_result: The buffer head to return the result in 65 * @create: Non-zero if we may add block to the file 66 * 67 * Returns: errno 68 */ 69 70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 71 struct buffer_head *bh_result, int create) 72 { 73 int error; 74 75 error = gfs2_block_map(inode, lblock, bh_result, 0); 76 if (error) 77 return error; 78 if (!buffer_mapped(bh_result)) 79 return -EIO; 80 return 0; 81 } 82 83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 84 struct buffer_head *bh_result, int create) 85 { 86 return gfs2_block_map(inode, lblock, bh_result, 0); 87 } 88 89 /** 90 * gfs2_writepage_common - Common bits of writepage 91 * @page: The page to be written 92 * @wbc: The writeback control 93 * 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 95 */ 96 97 static int gfs2_writepage_common(struct page *page, 98 struct writeback_control *wbc) 99 { 100 struct inode *inode = page->mapping->host; 101 struct gfs2_inode *ip = GFS2_I(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode); 103 loff_t i_size = i_size_read(inode); 104 pgoff_t end_index = i_size >> PAGE_SHIFT; 105 unsigned offset; 106 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 108 goto out; 109 if (current->journal_info) 110 goto redirty; 111 /* Is the page fully outside i_size? (truncate in progress) */ 112 offset = i_size & (PAGE_SIZE-1); 113 if (page->index > end_index || (page->index == end_index && !offset)) { 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 115 goto out; 116 } 117 return 1; 118 redirty: 119 redirty_page_for_writepage(wbc, page); 120 out: 121 unlock_page(page); 122 return 0; 123 } 124 125 /** 126 * gfs2_writepage - Write page for writeback mappings 127 * @page: The page 128 * @wbc: The writeback control 129 * 130 */ 131 132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 133 { 134 int ret; 135 136 ret = gfs2_writepage_common(page, wbc); 137 if (ret <= 0) 138 return ret; 139 140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 141 } 142 143 /* This is the same as calling block_write_full_page, but it also 144 * writes pages outside of i_size 145 */ 146 static int gfs2_write_full_page(struct page *page, get_block_t *get_block, 147 struct writeback_control *wbc) 148 { 149 struct inode * const inode = page->mapping->host; 150 loff_t i_size = i_size_read(inode); 151 const pgoff_t end_index = i_size >> PAGE_SHIFT; 152 unsigned offset; 153 154 /* 155 * The page straddles i_size. It must be zeroed out on each and every 156 * writepage invocation because it may be mmapped. "A file is mapped 157 * in multiples of the page size. For a file that is not a multiple of 158 * the page size, the remaining memory is zeroed when mapped, and 159 * writes to that region are not written out to the file." 160 */ 161 offset = i_size & (PAGE_SIZE-1); 162 if (page->index == end_index && offset) 163 zero_user_segment(page, offset, PAGE_SIZE); 164 165 return __block_write_full_page(inode, page, get_block, wbc, 166 end_buffer_async_write); 167 } 168 169 /** 170 * __gfs2_jdata_writepage - The core of jdata writepage 171 * @page: The page to write 172 * @wbc: The writeback control 173 * 174 * This is shared between writepage and writepages and implements the 175 * core of the writepage operation. If a transaction is required then 176 * PageChecked will have been set and the transaction will have 177 * already been started before this is called. 178 */ 179 180 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 181 { 182 struct inode *inode = page->mapping->host; 183 struct gfs2_inode *ip = GFS2_I(inode); 184 struct gfs2_sbd *sdp = GFS2_SB(inode); 185 186 if (PageChecked(page)) { 187 ClearPageChecked(page); 188 if (!page_has_buffers(page)) { 189 create_empty_buffers(page, inode->i_sb->s_blocksize, 190 BIT(BH_Dirty)|BIT(BH_Uptodate)); 191 } 192 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); 193 } 194 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 195 } 196 197 /** 198 * gfs2_jdata_writepage - Write complete page 199 * @page: Page to write 200 * @wbc: The writeback control 201 * 202 * Returns: errno 203 * 204 */ 205 206 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 207 { 208 struct inode *inode = page->mapping->host; 209 struct gfs2_inode *ip = GFS2_I(inode); 210 struct gfs2_sbd *sdp = GFS2_SB(inode); 211 int ret; 212 213 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 214 goto out; 215 if (PageChecked(page) || current->journal_info) 216 goto out_ignore; 217 ret = __gfs2_jdata_writepage(page, wbc); 218 return ret; 219 220 out_ignore: 221 redirty_page_for_writepage(wbc, page); 222 out: 223 unlock_page(page); 224 return 0; 225 } 226 227 /** 228 * gfs2_writepages - Write a bunch of dirty pages back to disk 229 * @mapping: The mapping to write 230 * @wbc: Write-back control 231 * 232 * Used for both ordered and writeback modes. 233 */ 234 static int gfs2_writepages(struct address_space *mapping, 235 struct writeback_control *wbc) 236 { 237 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 238 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 239 240 /* 241 * Even if we didn't write any pages here, we might still be holding 242 * dirty pages in the ail. We forcibly flush the ail because we don't 243 * want balance_dirty_pages() to loop indefinitely trying to write out 244 * pages held in the ail that it can't find. 245 */ 246 if (ret == 0) 247 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); 248 249 return ret; 250 } 251 252 /** 253 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 254 * @mapping: The mapping 255 * @wbc: The writeback control 256 * @pvec: The vector of pages 257 * @nr_pages: The number of pages to write 258 * @done_index: Page index 259 * 260 * Returns: non-zero if loop should terminate, zero otherwise 261 */ 262 263 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 264 struct writeback_control *wbc, 265 struct pagevec *pvec, 266 int nr_pages, 267 pgoff_t *done_index) 268 { 269 struct inode *inode = mapping->host; 270 struct gfs2_sbd *sdp = GFS2_SB(inode); 271 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 272 int i; 273 int ret; 274 275 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 276 if (ret < 0) 277 return ret; 278 279 for(i = 0; i < nr_pages; i++) { 280 struct page *page = pvec->pages[i]; 281 282 *done_index = page->index; 283 284 lock_page(page); 285 286 if (unlikely(page->mapping != mapping)) { 287 continue_unlock: 288 unlock_page(page); 289 continue; 290 } 291 292 if (!PageDirty(page)) { 293 /* someone wrote it for us */ 294 goto continue_unlock; 295 } 296 297 if (PageWriteback(page)) { 298 if (wbc->sync_mode != WB_SYNC_NONE) 299 wait_on_page_writeback(page); 300 else 301 goto continue_unlock; 302 } 303 304 BUG_ON(PageWriteback(page)); 305 if (!clear_page_dirty_for_io(page)) 306 goto continue_unlock; 307 308 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 309 310 ret = __gfs2_jdata_writepage(page, wbc); 311 if (unlikely(ret)) { 312 if (ret == AOP_WRITEPAGE_ACTIVATE) { 313 unlock_page(page); 314 ret = 0; 315 } else { 316 317 /* 318 * done_index is set past this page, 319 * so media errors will not choke 320 * background writeout for the entire 321 * file. This has consequences for 322 * range_cyclic semantics (ie. it may 323 * not be suitable for data integrity 324 * writeout). 325 */ 326 *done_index = page->index + 1; 327 ret = 1; 328 break; 329 } 330 } 331 332 /* 333 * We stop writing back only if we are not doing 334 * integrity sync. In case of integrity sync we have to 335 * keep going until we have written all the pages 336 * we tagged for writeback prior to entering this loop. 337 */ 338 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 339 ret = 1; 340 break; 341 } 342 343 } 344 gfs2_trans_end(sdp); 345 return ret; 346 } 347 348 /** 349 * gfs2_write_cache_jdata - Like write_cache_pages but different 350 * @mapping: The mapping to write 351 * @wbc: The writeback control 352 * 353 * The reason that we use our own function here is that we need to 354 * start transactions before we grab page locks. This allows us 355 * to get the ordering right. 356 */ 357 358 static int gfs2_write_cache_jdata(struct address_space *mapping, 359 struct writeback_control *wbc) 360 { 361 int ret = 0; 362 int done = 0; 363 struct pagevec pvec; 364 int nr_pages; 365 pgoff_t uninitialized_var(writeback_index); 366 pgoff_t index; 367 pgoff_t end; 368 pgoff_t done_index; 369 int cycled; 370 int range_whole = 0; 371 int tag; 372 373 pagevec_init(&pvec); 374 if (wbc->range_cyclic) { 375 writeback_index = mapping->writeback_index; /* prev offset */ 376 index = writeback_index; 377 if (index == 0) 378 cycled = 1; 379 else 380 cycled = 0; 381 end = -1; 382 } else { 383 index = wbc->range_start >> PAGE_SHIFT; 384 end = wbc->range_end >> PAGE_SHIFT; 385 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 386 range_whole = 1; 387 cycled = 1; /* ignore range_cyclic tests */ 388 } 389 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 390 tag = PAGECACHE_TAG_TOWRITE; 391 else 392 tag = PAGECACHE_TAG_DIRTY; 393 394 retry: 395 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 396 tag_pages_for_writeback(mapping, index, end); 397 done_index = index; 398 while (!done && (index <= end)) { 399 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, 400 tag); 401 if (nr_pages == 0) 402 break; 403 404 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); 405 if (ret) 406 done = 1; 407 if (ret > 0) 408 ret = 0; 409 pagevec_release(&pvec); 410 cond_resched(); 411 } 412 413 if (!cycled && !done) { 414 /* 415 * range_cyclic: 416 * We hit the last page and there is more work to be done: wrap 417 * back to the start of the file 418 */ 419 cycled = 1; 420 index = 0; 421 end = writeback_index - 1; 422 goto retry; 423 } 424 425 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 426 mapping->writeback_index = done_index; 427 428 return ret; 429 } 430 431 432 /** 433 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 434 * @mapping: The mapping to write 435 * @wbc: The writeback control 436 * 437 */ 438 439 static int gfs2_jdata_writepages(struct address_space *mapping, 440 struct writeback_control *wbc) 441 { 442 struct gfs2_inode *ip = GFS2_I(mapping->host); 443 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 444 int ret; 445 446 ret = gfs2_write_cache_jdata(mapping, wbc); 447 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 448 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 449 ret = gfs2_write_cache_jdata(mapping, wbc); 450 } 451 return ret; 452 } 453 454 /** 455 * stuffed_readpage - Fill in a Linux page with stuffed file data 456 * @ip: the inode 457 * @page: the page 458 * 459 * Returns: errno 460 */ 461 462 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 463 { 464 struct buffer_head *dibh; 465 u64 dsize = i_size_read(&ip->i_inode); 466 void *kaddr; 467 int error; 468 469 /* 470 * Due to the order of unstuffing files and ->fault(), we can be 471 * asked for a zero page in the case of a stuffed file being extended, 472 * so we need to supply one here. It doesn't happen often. 473 */ 474 if (unlikely(page->index)) { 475 zero_user(page, 0, PAGE_SIZE); 476 SetPageUptodate(page); 477 return 0; 478 } 479 480 error = gfs2_meta_inode_buffer(ip, &dibh); 481 if (error) 482 return error; 483 484 kaddr = kmap_atomic(page); 485 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 486 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 487 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 488 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 489 kunmap_atomic(kaddr); 490 flush_dcache_page(page); 491 brelse(dibh); 492 SetPageUptodate(page); 493 494 return 0; 495 } 496 497 498 /** 499 * __gfs2_readpage - readpage 500 * @file: The file to read a page for 501 * @page: The page to read 502 * 503 * This is the core of gfs2's readpage. Its used by the internal file 504 * reading code as in that case we already hold the glock. Also its 505 * called by gfs2_readpage() once the required lock has been granted. 506 * 507 */ 508 509 static int __gfs2_readpage(void *file, struct page *page) 510 { 511 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 512 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 513 int error; 514 515 if (gfs2_is_stuffed(ip)) { 516 error = stuffed_readpage(ip, page); 517 unlock_page(page); 518 } else { 519 error = mpage_readpage(page, gfs2_block_map); 520 } 521 522 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 523 return -EIO; 524 525 return error; 526 } 527 528 /** 529 * gfs2_readpage - read a page of a file 530 * @file: The file to read 531 * @page: The page of the file 532 * 533 * This deals with the locking required. We have to unlock and 534 * relock the page in order to get the locking in the right 535 * order. 536 */ 537 538 static int gfs2_readpage(struct file *file, struct page *page) 539 { 540 struct address_space *mapping = page->mapping; 541 struct gfs2_inode *ip = GFS2_I(mapping->host); 542 struct gfs2_holder gh; 543 int error; 544 545 unlock_page(page); 546 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 547 error = gfs2_glock_nq(&gh); 548 if (unlikely(error)) 549 goto out; 550 error = AOP_TRUNCATED_PAGE; 551 lock_page(page); 552 if (page->mapping == mapping && !PageUptodate(page)) 553 error = __gfs2_readpage(file, page); 554 else 555 unlock_page(page); 556 gfs2_glock_dq(&gh); 557 out: 558 gfs2_holder_uninit(&gh); 559 if (error && error != AOP_TRUNCATED_PAGE) 560 lock_page(page); 561 return error; 562 } 563 564 /** 565 * gfs2_internal_read - read an internal file 566 * @ip: The gfs2 inode 567 * @buf: The buffer to fill 568 * @pos: The file position 569 * @size: The amount to read 570 * 571 */ 572 573 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 574 unsigned size) 575 { 576 struct address_space *mapping = ip->i_inode.i_mapping; 577 unsigned long index = *pos / PAGE_SIZE; 578 unsigned offset = *pos & (PAGE_SIZE - 1); 579 unsigned copied = 0; 580 unsigned amt; 581 struct page *page; 582 void *p; 583 584 do { 585 amt = size - copied; 586 if (offset + size > PAGE_SIZE) 587 amt = PAGE_SIZE - offset; 588 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 589 if (IS_ERR(page)) 590 return PTR_ERR(page); 591 p = kmap_atomic(page); 592 memcpy(buf + copied, p + offset, amt); 593 kunmap_atomic(p); 594 put_page(page); 595 copied += amt; 596 index++; 597 offset = 0; 598 } while(copied < size); 599 (*pos) += size; 600 return size; 601 } 602 603 /** 604 * gfs2_readpages - Read a bunch of pages at once 605 * @file: The file to read from 606 * @mapping: Address space info 607 * @pages: List of pages to read 608 * @nr_pages: Number of pages to read 609 * 610 * Some notes: 611 * 1. This is only for readahead, so we can simply ignore any things 612 * which are slightly inconvenient (such as locking conflicts between 613 * the page lock and the glock) and return having done no I/O. Its 614 * obviously not something we'd want to do on too regular a basis. 615 * Any I/O we ignore at this time will be done via readpage later. 616 * 2. We don't handle stuffed files here we let readpage do the honours. 617 * 3. mpage_readpages() does most of the heavy lifting in the common case. 618 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 619 */ 620 621 static int gfs2_readpages(struct file *file, struct address_space *mapping, 622 struct list_head *pages, unsigned nr_pages) 623 { 624 struct inode *inode = mapping->host; 625 struct gfs2_inode *ip = GFS2_I(inode); 626 struct gfs2_sbd *sdp = GFS2_SB(inode); 627 struct gfs2_holder gh; 628 int ret; 629 630 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 631 ret = gfs2_glock_nq(&gh); 632 if (unlikely(ret)) 633 goto out_uninit; 634 if (!gfs2_is_stuffed(ip)) 635 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 636 gfs2_glock_dq(&gh); 637 out_uninit: 638 gfs2_holder_uninit(&gh); 639 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 640 ret = -EIO; 641 return ret; 642 } 643 644 /** 645 * gfs2_write_begin - Begin to write to a file 646 * @file: The file to write to 647 * @mapping: The mapping in which to write 648 * @pos: The file offset at which to start writing 649 * @len: Length of the write 650 * @flags: Various flags 651 * @pagep: Pointer to return the page 652 * @fsdata: Pointer to return fs data (unused by GFS2) 653 * 654 * Returns: errno 655 */ 656 657 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 658 loff_t pos, unsigned len, unsigned flags, 659 struct page **pagep, void **fsdata) 660 { 661 struct gfs2_inode *ip = GFS2_I(mapping->host); 662 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 663 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 664 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 665 unsigned requested = 0; 666 int alloc_required; 667 int error = 0; 668 pgoff_t index = pos >> PAGE_SHIFT; 669 unsigned from = pos & (PAGE_SIZE - 1); 670 struct page *page; 671 672 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 673 error = gfs2_glock_nq(&ip->i_gh); 674 if (unlikely(error)) 675 goto out_uninit; 676 if (&ip->i_inode == sdp->sd_rindex) { 677 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 678 GL_NOCACHE, &m_ip->i_gh); 679 if (unlikely(error)) { 680 gfs2_glock_dq(&ip->i_gh); 681 goto out_uninit; 682 } 683 } 684 685 alloc_required = gfs2_write_alloc_required(ip, pos, len); 686 687 if (alloc_required || gfs2_is_jdata(ip)) 688 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 689 690 if (alloc_required) { 691 struct gfs2_alloc_parms ap = { .aflags = 0, }; 692 requested = data_blocks + ind_blocks; 693 ap.target = requested; 694 error = gfs2_quota_lock_check(ip, &ap); 695 if (error) 696 goto out_unlock; 697 698 error = gfs2_inplace_reserve(ip, &ap); 699 if (error) 700 goto out_qunlock; 701 } 702 703 rblocks = RES_DINODE + ind_blocks; 704 if (gfs2_is_jdata(ip)) 705 rblocks += data_blocks ? data_blocks : 1; 706 if (ind_blocks || data_blocks) 707 rblocks += RES_STATFS + RES_QUOTA; 708 if (&ip->i_inode == sdp->sd_rindex) 709 rblocks += 2 * RES_STATFS; 710 if (alloc_required) 711 rblocks += gfs2_rg_blocks(ip, requested); 712 713 error = gfs2_trans_begin(sdp, rblocks, 714 PAGE_SIZE/sdp->sd_sb.sb_bsize); 715 if (error) 716 goto out_trans_fail; 717 718 error = -ENOMEM; 719 flags |= AOP_FLAG_NOFS; 720 page = grab_cache_page_write_begin(mapping, index, flags); 721 *pagep = page; 722 if (unlikely(!page)) 723 goto out_endtrans; 724 725 if (gfs2_is_stuffed(ip)) { 726 error = 0; 727 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 728 error = gfs2_unstuff_dinode(ip, page); 729 if (error == 0) 730 goto prepare_write; 731 } else if (!PageUptodate(page)) { 732 error = stuffed_readpage(ip, page); 733 } 734 goto out; 735 } 736 737 prepare_write: 738 error = __block_write_begin(page, from, len, gfs2_block_map); 739 out: 740 if (error == 0) 741 return 0; 742 743 unlock_page(page); 744 put_page(page); 745 746 gfs2_trans_end(sdp); 747 if (pos + len > ip->i_inode.i_size) 748 gfs2_trim_blocks(&ip->i_inode); 749 goto out_trans_fail; 750 751 out_endtrans: 752 gfs2_trans_end(sdp); 753 out_trans_fail: 754 if (alloc_required) { 755 gfs2_inplace_release(ip); 756 out_qunlock: 757 gfs2_quota_unlock(ip); 758 } 759 out_unlock: 760 if (&ip->i_inode == sdp->sd_rindex) { 761 gfs2_glock_dq(&m_ip->i_gh); 762 gfs2_holder_uninit(&m_ip->i_gh); 763 } 764 gfs2_glock_dq(&ip->i_gh); 765 out_uninit: 766 gfs2_holder_uninit(&ip->i_gh); 767 return error; 768 } 769 770 /** 771 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 772 * @inode: the rindex inode 773 */ 774 static void adjust_fs_space(struct inode *inode) 775 { 776 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 777 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 778 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 779 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 780 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 781 struct buffer_head *m_bh, *l_bh; 782 u64 fs_total, new_free; 783 784 /* Total up the file system space, according to the latest rindex. */ 785 fs_total = gfs2_ri_total(sdp); 786 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 787 return; 788 789 spin_lock(&sdp->sd_statfs_spin); 790 gfs2_statfs_change_in(m_sc, m_bh->b_data + 791 sizeof(struct gfs2_dinode)); 792 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 793 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 794 else 795 new_free = 0; 796 spin_unlock(&sdp->sd_statfs_spin); 797 fs_warn(sdp, "File system extended by %llu blocks.\n", 798 (unsigned long long)new_free); 799 gfs2_statfs_change(sdp, new_free, new_free, 0); 800 801 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 802 goto out; 803 update_statfs(sdp, m_bh, l_bh); 804 brelse(l_bh); 805 out: 806 brelse(m_bh); 807 } 808 809 /** 810 * gfs2_stuffed_write_end - Write end for stuffed files 811 * @inode: The inode 812 * @dibh: The buffer_head containing the on-disk inode 813 * @pos: The file position 814 * @len: The length of the write 815 * @copied: How much was actually copied by the VFS 816 * @page: The page 817 * 818 * This copies the data from the page into the inode block after 819 * the inode data structure itself. 820 * 821 * Returns: errno 822 */ 823 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 824 loff_t pos, unsigned len, unsigned copied, 825 struct page *page) 826 { 827 struct gfs2_inode *ip = GFS2_I(inode); 828 struct gfs2_sbd *sdp = GFS2_SB(inode); 829 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 830 u64 to = pos + copied; 831 void *kaddr; 832 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 833 834 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 835 kaddr = kmap_atomic(page); 836 memcpy(buf + pos, kaddr + pos, copied); 837 flush_dcache_page(page); 838 kunmap_atomic(kaddr); 839 840 WARN_ON(!PageUptodate(page)); 841 unlock_page(page); 842 put_page(page); 843 844 if (copied) { 845 if (inode->i_size < to) 846 i_size_write(inode, to); 847 mark_inode_dirty(inode); 848 } 849 850 if (inode == sdp->sd_rindex) { 851 adjust_fs_space(inode); 852 sdp->sd_rindex_uptodate = 0; 853 } 854 855 brelse(dibh); 856 gfs2_trans_end(sdp); 857 if (inode == sdp->sd_rindex) { 858 gfs2_glock_dq(&m_ip->i_gh); 859 gfs2_holder_uninit(&m_ip->i_gh); 860 } 861 gfs2_glock_dq(&ip->i_gh); 862 gfs2_holder_uninit(&ip->i_gh); 863 return copied; 864 } 865 866 /** 867 * gfs2_write_end 868 * @file: The file to write to 869 * @mapping: The address space to write to 870 * @pos: The file position 871 * @len: The length of the data 872 * @copied: How much was actually copied by the VFS 873 * @page: The page that has been written 874 * @fsdata: The fsdata (unused in GFS2) 875 * 876 * The main write_end function for GFS2. We have a separate one for 877 * stuffed files as they are slightly different, otherwise we just 878 * put our locking around the VFS provided functions. 879 * 880 * Returns: errno 881 */ 882 883 static int gfs2_write_end(struct file *file, struct address_space *mapping, 884 loff_t pos, unsigned len, unsigned copied, 885 struct page *page, void *fsdata) 886 { 887 struct inode *inode = page->mapping->host; 888 struct gfs2_inode *ip = GFS2_I(inode); 889 struct gfs2_sbd *sdp = GFS2_SB(inode); 890 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 891 struct buffer_head *dibh; 892 unsigned int from = pos & (PAGE_SIZE - 1); 893 unsigned int to = from + len; 894 int ret; 895 struct gfs2_trans *tr = current->journal_info; 896 BUG_ON(!tr); 897 898 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 899 900 ret = gfs2_meta_inode_buffer(ip, &dibh); 901 if (unlikely(ret)) { 902 unlock_page(page); 903 put_page(page); 904 goto failed; 905 } 906 907 if (gfs2_is_stuffed(ip)) 908 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 909 910 if (!gfs2_is_writeback(ip)) 911 gfs2_page_add_databufs(ip, page, from, to); 912 913 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 914 if (tr->tr_num_buf_new) 915 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 916 else 917 gfs2_trans_add_meta(ip->i_gl, dibh); 918 919 920 if (inode == sdp->sd_rindex) { 921 adjust_fs_space(inode); 922 sdp->sd_rindex_uptodate = 0; 923 } 924 925 brelse(dibh); 926 failed: 927 gfs2_trans_end(sdp); 928 gfs2_inplace_release(ip); 929 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 930 gfs2_quota_unlock(ip); 931 if (inode == sdp->sd_rindex) { 932 gfs2_glock_dq(&m_ip->i_gh); 933 gfs2_holder_uninit(&m_ip->i_gh); 934 } 935 gfs2_glock_dq(&ip->i_gh); 936 gfs2_holder_uninit(&ip->i_gh); 937 return ret; 938 } 939 940 /** 941 * gfs2_set_page_dirty - Page dirtying function 942 * @page: The page to dirty 943 * 944 * Returns: 1 if it dirtyed the page, or 0 otherwise 945 */ 946 947 static int gfs2_set_page_dirty(struct page *page) 948 { 949 SetPageChecked(page); 950 return __set_page_dirty_buffers(page); 951 } 952 953 /** 954 * gfs2_bmap - Block map function 955 * @mapping: Address space info 956 * @lblock: The block to map 957 * 958 * Returns: The disk address for the block or 0 on hole or error 959 */ 960 961 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 962 { 963 struct gfs2_inode *ip = GFS2_I(mapping->host); 964 struct gfs2_holder i_gh; 965 sector_t dblock = 0; 966 int error; 967 968 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 969 if (error) 970 return 0; 971 972 if (!gfs2_is_stuffed(ip)) 973 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 974 975 gfs2_glock_dq_uninit(&i_gh); 976 977 return dblock; 978 } 979 980 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 981 { 982 struct gfs2_bufdata *bd; 983 984 lock_buffer(bh); 985 gfs2_log_lock(sdp); 986 clear_buffer_dirty(bh); 987 bd = bh->b_private; 988 if (bd) { 989 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 990 list_del_init(&bd->bd_list); 991 else 992 gfs2_remove_from_journal(bh, REMOVE_JDATA); 993 } 994 bh->b_bdev = NULL; 995 clear_buffer_mapped(bh); 996 clear_buffer_req(bh); 997 clear_buffer_new(bh); 998 gfs2_log_unlock(sdp); 999 unlock_buffer(bh); 1000 } 1001 1002 static void gfs2_invalidatepage(struct page *page, unsigned int offset, 1003 unsigned int length) 1004 { 1005 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 1006 unsigned int stop = offset + length; 1007 int partial_page = (offset || length < PAGE_SIZE); 1008 struct buffer_head *bh, *head; 1009 unsigned long pos = 0; 1010 1011 BUG_ON(!PageLocked(page)); 1012 if (!partial_page) 1013 ClearPageChecked(page); 1014 if (!page_has_buffers(page)) 1015 goto out; 1016 1017 bh = head = page_buffers(page); 1018 do { 1019 if (pos + bh->b_size > stop) 1020 return; 1021 1022 if (offset <= pos) 1023 gfs2_discard(sdp, bh); 1024 pos += bh->b_size; 1025 bh = bh->b_this_page; 1026 } while (bh != head); 1027 out: 1028 if (!partial_page) 1029 try_to_release_page(page, 0); 1030 } 1031 1032 /** 1033 * gfs2_ok_for_dio - check that dio is valid on this file 1034 * @ip: The inode 1035 * @offset: The offset at which we are reading or writing 1036 * 1037 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 1038 * 1 (to accept the i/o request) 1039 */ 1040 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) 1041 { 1042 /* 1043 * Should we return an error here? I can't see that O_DIRECT for 1044 * a stuffed file makes any sense. For now we'll silently fall 1045 * back to buffered I/O 1046 */ 1047 if (gfs2_is_stuffed(ip)) 1048 return 0; 1049 1050 if (offset >= i_size_read(&ip->i_inode)) 1051 return 0; 1052 return 1; 1053 } 1054 1055 1056 1057 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1058 { 1059 struct file *file = iocb->ki_filp; 1060 struct inode *inode = file->f_mapping->host; 1061 struct address_space *mapping = inode->i_mapping; 1062 struct gfs2_inode *ip = GFS2_I(inode); 1063 loff_t offset = iocb->ki_pos; 1064 struct gfs2_holder gh; 1065 int rv; 1066 1067 /* 1068 * Deferred lock, even if its a write, since we do no allocation 1069 * on this path. All we need change is atime, and this lock mode 1070 * ensures that other nodes have flushed their buffered read caches 1071 * (i.e. their page cache entries for this inode). We do not, 1072 * unfortunately have the option of only flushing a range like 1073 * the VFS does. 1074 */ 1075 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1076 rv = gfs2_glock_nq(&gh); 1077 if (rv) 1078 goto out_uninit; 1079 rv = gfs2_ok_for_dio(ip, offset); 1080 if (rv != 1) 1081 goto out; /* dio not valid, fall back to buffered i/o */ 1082 1083 /* 1084 * Now since we are holding a deferred (CW) lock at this point, you 1085 * might be wondering why this is ever needed. There is a case however 1086 * where we've granted a deferred local lock against a cached exclusive 1087 * glock. That is ok provided all granted local locks are deferred, but 1088 * it also means that it is possible to encounter pages which are 1089 * cached and possibly also mapped. So here we check for that and sort 1090 * them out ahead of the dio. The glock state machine will take care of 1091 * everything else. 1092 * 1093 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in 1094 * the first place, mapping->nr_pages will always be zero. 1095 */ 1096 if (mapping->nrpages) { 1097 loff_t lstart = offset & ~(PAGE_SIZE - 1); 1098 loff_t len = iov_iter_count(iter); 1099 loff_t end = PAGE_ALIGN(offset + len) - 1; 1100 1101 rv = 0; 1102 if (len == 0) 1103 goto out; 1104 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 1105 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); 1106 rv = filemap_write_and_wait_range(mapping, lstart, end); 1107 if (rv) 1108 goto out; 1109 if (iov_iter_rw(iter) == WRITE) 1110 truncate_inode_pages_range(mapping, lstart, end); 1111 } 1112 1113 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 1114 gfs2_get_block_direct, NULL, NULL, 0); 1115 out: 1116 gfs2_glock_dq(&gh); 1117 out_uninit: 1118 gfs2_holder_uninit(&gh); 1119 return rv; 1120 } 1121 1122 /** 1123 * gfs2_releasepage - free the metadata associated with a page 1124 * @page: the page that's being released 1125 * @gfp_mask: passed from Linux VFS, ignored by us 1126 * 1127 * Call try_to_free_buffers() if the buffers in this page can be 1128 * released. 1129 * 1130 * Returns: 0 1131 */ 1132 1133 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1134 { 1135 struct address_space *mapping = page->mapping; 1136 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1137 struct buffer_head *bh, *head; 1138 struct gfs2_bufdata *bd; 1139 1140 if (!page_has_buffers(page)) 1141 return 0; 1142 1143 /* 1144 * From xfs_vm_releasepage: mm accommodates an old ext3 case where 1145 * clean pages might not have had the dirty bit cleared. Thus, it can 1146 * send actual dirty pages to ->releasepage() via shrink_active_list(). 1147 * 1148 * As a workaround, we skip pages that contain dirty buffers below. 1149 * Once ->releasepage isn't called on dirty pages anymore, we can warn 1150 * on dirty buffers like we used to here again. 1151 */ 1152 1153 gfs2_log_lock(sdp); 1154 spin_lock(&sdp->sd_ail_lock); 1155 head = bh = page_buffers(page); 1156 do { 1157 if (atomic_read(&bh->b_count)) 1158 goto cannot_release; 1159 bd = bh->b_private; 1160 if (bd && bd->bd_tr) 1161 goto cannot_release; 1162 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh))) 1163 goto cannot_release; 1164 bh = bh->b_this_page; 1165 } while(bh != head); 1166 spin_unlock(&sdp->sd_ail_lock); 1167 1168 head = bh = page_buffers(page); 1169 do { 1170 bd = bh->b_private; 1171 if (bd) { 1172 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1173 if (!list_empty(&bd->bd_list)) 1174 list_del_init(&bd->bd_list); 1175 bd->bd_bh = NULL; 1176 bh->b_private = NULL; 1177 kmem_cache_free(gfs2_bufdata_cachep, bd); 1178 } 1179 1180 bh = bh->b_this_page; 1181 } while (bh != head); 1182 gfs2_log_unlock(sdp); 1183 1184 return try_to_free_buffers(page); 1185 1186 cannot_release: 1187 spin_unlock(&sdp->sd_ail_lock); 1188 gfs2_log_unlock(sdp); 1189 return 0; 1190 } 1191 1192 static const struct address_space_operations gfs2_writeback_aops = { 1193 .writepage = gfs2_writepage, 1194 .writepages = gfs2_writepages, 1195 .readpage = gfs2_readpage, 1196 .readpages = gfs2_readpages, 1197 .write_begin = gfs2_write_begin, 1198 .write_end = gfs2_write_end, 1199 .bmap = gfs2_bmap, 1200 .invalidatepage = gfs2_invalidatepage, 1201 .releasepage = gfs2_releasepage, 1202 .direct_IO = gfs2_direct_IO, 1203 .migratepage = buffer_migrate_page, 1204 .is_partially_uptodate = block_is_partially_uptodate, 1205 .error_remove_page = generic_error_remove_page, 1206 }; 1207 1208 static const struct address_space_operations gfs2_ordered_aops = { 1209 .writepage = gfs2_writepage, 1210 .writepages = gfs2_writepages, 1211 .readpage = gfs2_readpage, 1212 .readpages = gfs2_readpages, 1213 .write_begin = gfs2_write_begin, 1214 .write_end = gfs2_write_end, 1215 .set_page_dirty = gfs2_set_page_dirty, 1216 .bmap = gfs2_bmap, 1217 .invalidatepage = gfs2_invalidatepage, 1218 .releasepage = gfs2_releasepage, 1219 .direct_IO = gfs2_direct_IO, 1220 .migratepage = buffer_migrate_page, 1221 .is_partially_uptodate = block_is_partially_uptodate, 1222 .error_remove_page = generic_error_remove_page, 1223 }; 1224 1225 static const struct address_space_operations gfs2_jdata_aops = { 1226 .writepage = gfs2_jdata_writepage, 1227 .writepages = gfs2_jdata_writepages, 1228 .readpage = gfs2_readpage, 1229 .readpages = gfs2_readpages, 1230 .write_begin = gfs2_write_begin, 1231 .write_end = gfs2_write_end, 1232 .set_page_dirty = gfs2_set_page_dirty, 1233 .bmap = gfs2_bmap, 1234 .invalidatepage = gfs2_invalidatepage, 1235 .releasepage = gfs2_releasepage, 1236 .is_partially_uptodate = block_is_partially_uptodate, 1237 .error_remove_page = generic_error_remove_page, 1238 }; 1239 1240 void gfs2_set_aops(struct inode *inode) 1241 { 1242 struct gfs2_inode *ip = GFS2_I(inode); 1243 1244 if (gfs2_is_writeback(ip)) 1245 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1246 else if (gfs2_is_ordered(ip)) 1247 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1248 else if (gfs2_is_jdata(ip)) 1249 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1250 else 1251 BUG(); 1252 } 1253 1254