1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 #include <linux/uio.h> 24 #include <trace/events/writeback.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "bmap.h" 29 #include "glock.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "quota.h" 34 #include "trans.h" 35 #include "rgrp.h" 36 #include "super.h" 37 #include "util.h" 38 #include "glops.h" 39 40 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 42 unsigned int from, unsigned int to) 43 { 44 struct buffer_head *head = page_buffers(page); 45 unsigned int bsize = head->b_size; 46 struct buffer_head *bh; 47 unsigned int start, end; 48 49 for (bh = head, start = 0; bh != head || !start; 50 bh = bh->b_this_page, start = end) { 51 end = start + bsize; 52 if (end <= from || start >= to) 53 continue; 54 if (gfs2_is_jdata(ip)) 55 set_buffer_uptodate(bh); 56 gfs2_trans_add_data(ip->i_gl, bh); 57 } 58 } 59 60 /** 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 62 * @inode: The inode 63 * @lblock: The block number to look up 64 * @bh_result: The buffer head to return the result in 65 * @create: Non-zero if we may add block to the file 66 * 67 * Returns: errno 68 */ 69 70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 71 struct buffer_head *bh_result, int create) 72 { 73 int error; 74 75 error = gfs2_block_map(inode, lblock, bh_result, 0); 76 if (error) 77 return error; 78 if (!buffer_mapped(bh_result)) 79 return -EIO; 80 return 0; 81 } 82 83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 84 struct buffer_head *bh_result, int create) 85 { 86 return gfs2_block_map(inode, lblock, bh_result, 0); 87 } 88 89 /** 90 * gfs2_writepage_common - Common bits of writepage 91 * @page: The page to be written 92 * @wbc: The writeback control 93 * 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 95 */ 96 97 static int gfs2_writepage_common(struct page *page, 98 struct writeback_control *wbc) 99 { 100 struct inode *inode = page->mapping->host; 101 struct gfs2_inode *ip = GFS2_I(inode); 102 struct gfs2_sbd *sdp = GFS2_SB(inode); 103 loff_t i_size = i_size_read(inode); 104 pgoff_t end_index = i_size >> PAGE_SHIFT; 105 unsigned offset; 106 107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 108 goto out; 109 if (current->journal_info) 110 goto redirty; 111 /* Is the page fully outside i_size? (truncate in progress) */ 112 offset = i_size & (PAGE_SIZE-1); 113 if (page->index > end_index || (page->index == end_index && !offset)) { 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 115 goto out; 116 } 117 return 1; 118 redirty: 119 redirty_page_for_writepage(wbc, page); 120 out: 121 unlock_page(page); 122 return 0; 123 } 124 125 /** 126 * gfs2_writepage - Write page for writeback mappings 127 * @page: The page 128 * @wbc: The writeback control 129 * 130 */ 131 132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) 133 { 134 int ret; 135 136 ret = gfs2_writepage_common(page, wbc); 137 if (ret <= 0) 138 return ret; 139 140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); 141 } 142 143 /* This is the same as calling block_write_full_page, but it also 144 * writes pages outside of i_size 145 */ 146 int gfs2_write_full_page(struct page *page, get_block_t *get_block, 147 struct writeback_control *wbc) 148 { 149 struct inode * const inode = page->mapping->host; 150 loff_t i_size = i_size_read(inode); 151 const pgoff_t end_index = i_size >> PAGE_SHIFT; 152 unsigned offset; 153 154 /* 155 * The page straddles i_size. It must be zeroed out on each and every 156 * writepage invocation because it may be mmapped. "A file is mapped 157 * in multiples of the page size. For a file that is not a multiple of 158 * the page size, the remaining memory is zeroed when mapped, and 159 * writes to that region are not written out to the file." 160 */ 161 offset = i_size & (PAGE_SIZE-1); 162 if (page->index == end_index && offset) 163 zero_user_segment(page, offset, PAGE_SIZE); 164 165 return __block_write_full_page(inode, page, get_block, wbc, 166 end_buffer_async_write); 167 } 168 169 /** 170 * __gfs2_jdata_writepage - The core of jdata writepage 171 * @page: The page to write 172 * @wbc: The writeback control 173 * 174 * This is shared between writepage and writepages and implements the 175 * core of the writepage operation. If a transaction is required then 176 * PageChecked will have been set and the transaction will have 177 * already been started before this is called. 178 */ 179 180 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 181 { 182 struct inode *inode = page->mapping->host; 183 struct gfs2_inode *ip = GFS2_I(inode); 184 struct gfs2_sbd *sdp = GFS2_SB(inode); 185 186 if (PageChecked(page)) { 187 ClearPageChecked(page); 188 if (!page_has_buffers(page)) { 189 create_empty_buffers(page, inode->i_sb->s_blocksize, 190 (1 << BH_Dirty)|(1 << BH_Uptodate)); 191 } 192 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); 193 } 194 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc); 195 } 196 197 /** 198 * gfs2_jdata_writepage - Write complete page 199 * @page: Page to write 200 * @wbc: The writeback control 201 * 202 * Returns: errno 203 * 204 */ 205 206 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 207 { 208 struct inode *inode = page->mapping->host; 209 struct gfs2_inode *ip = GFS2_I(inode); 210 struct gfs2_sbd *sdp = GFS2_SB(inode); 211 int ret; 212 213 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 214 goto out; 215 if (PageChecked(page) || current->journal_info) 216 goto out_ignore; 217 ret = __gfs2_jdata_writepage(page, wbc); 218 return ret; 219 220 out_ignore: 221 redirty_page_for_writepage(wbc, page); 222 out: 223 unlock_page(page); 224 return 0; 225 } 226 227 /** 228 * gfs2_writepages - Write a bunch of dirty pages back to disk 229 * @mapping: The mapping to write 230 * @wbc: Write-back control 231 * 232 * Used for both ordered and writeback modes. 233 */ 234 static int gfs2_writepages(struct address_space *mapping, 235 struct writeback_control *wbc) 236 { 237 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 238 } 239 240 /** 241 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 242 * @mapping: The mapping 243 * @wbc: The writeback control 244 * @pvec: The vector of pages 245 * @nr_pages: The number of pages to write 246 * @end: End position 247 * @done_index: Page index 248 * 249 * Returns: non-zero if loop should terminate, zero otherwise 250 */ 251 252 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 253 struct writeback_control *wbc, 254 struct pagevec *pvec, 255 int nr_pages, pgoff_t end, 256 pgoff_t *done_index) 257 { 258 struct inode *inode = mapping->host; 259 struct gfs2_sbd *sdp = GFS2_SB(inode); 260 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); 261 int i; 262 int ret; 263 264 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 265 if (ret < 0) 266 return ret; 267 268 for(i = 0; i < nr_pages; i++) { 269 struct page *page = pvec->pages[i]; 270 271 /* 272 * At this point, the page may be truncated or 273 * invalidated (changing page->mapping to NULL), or 274 * even swizzled back from swapper_space to tmpfs file 275 * mapping. However, page->index will not change 276 * because we have a reference on the page. 277 */ 278 if (page->index > end) { 279 /* 280 * can't be range_cyclic (1st pass) because 281 * end == -1 in that case. 282 */ 283 ret = 1; 284 break; 285 } 286 287 *done_index = page->index; 288 289 lock_page(page); 290 291 if (unlikely(page->mapping != mapping)) { 292 continue_unlock: 293 unlock_page(page); 294 continue; 295 } 296 297 if (!PageDirty(page)) { 298 /* someone wrote it for us */ 299 goto continue_unlock; 300 } 301 302 if (PageWriteback(page)) { 303 if (wbc->sync_mode != WB_SYNC_NONE) 304 wait_on_page_writeback(page); 305 else 306 goto continue_unlock; 307 } 308 309 BUG_ON(PageWriteback(page)); 310 if (!clear_page_dirty_for_io(page)) 311 goto continue_unlock; 312 313 trace_wbc_writepage(wbc, inode_to_bdi(inode)); 314 315 ret = __gfs2_jdata_writepage(page, wbc); 316 if (unlikely(ret)) { 317 if (ret == AOP_WRITEPAGE_ACTIVATE) { 318 unlock_page(page); 319 ret = 0; 320 } else { 321 322 /* 323 * done_index is set past this page, 324 * so media errors will not choke 325 * background writeout for the entire 326 * file. This has consequences for 327 * range_cyclic semantics (ie. it may 328 * not be suitable for data integrity 329 * writeout). 330 */ 331 *done_index = page->index + 1; 332 ret = 1; 333 break; 334 } 335 } 336 337 /* 338 * We stop writing back only if we are not doing 339 * integrity sync. In case of integrity sync we have to 340 * keep going until we have written all the pages 341 * we tagged for writeback prior to entering this loop. 342 */ 343 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { 344 ret = 1; 345 break; 346 } 347 348 } 349 gfs2_trans_end(sdp); 350 return ret; 351 } 352 353 /** 354 * gfs2_write_cache_jdata - Like write_cache_pages but different 355 * @mapping: The mapping to write 356 * @wbc: The writeback control 357 * 358 * The reason that we use our own function here is that we need to 359 * start transactions before we grab page locks. This allows us 360 * to get the ordering right. 361 */ 362 363 static int gfs2_write_cache_jdata(struct address_space *mapping, 364 struct writeback_control *wbc) 365 { 366 int ret = 0; 367 int done = 0; 368 struct pagevec pvec; 369 int nr_pages; 370 pgoff_t uninitialized_var(writeback_index); 371 pgoff_t index; 372 pgoff_t end; 373 pgoff_t done_index; 374 int cycled; 375 int range_whole = 0; 376 int tag; 377 378 pagevec_init(&pvec, 0); 379 if (wbc->range_cyclic) { 380 writeback_index = mapping->writeback_index; /* prev offset */ 381 index = writeback_index; 382 if (index == 0) 383 cycled = 1; 384 else 385 cycled = 0; 386 end = -1; 387 } else { 388 index = wbc->range_start >> PAGE_SHIFT; 389 end = wbc->range_end >> PAGE_SHIFT; 390 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 391 range_whole = 1; 392 cycled = 1; /* ignore range_cyclic tests */ 393 } 394 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 395 tag = PAGECACHE_TAG_TOWRITE; 396 else 397 tag = PAGECACHE_TAG_DIRTY; 398 399 retry: 400 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 401 tag_pages_for_writeback(mapping, index, end); 402 done_index = index; 403 while (!done && (index <= end)) { 404 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 405 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 406 if (nr_pages == 0) 407 break; 408 409 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); 410 if (ret) 411 done = 1; 412 if (ret > 0) 413 ret = 0; 414 pagevec_release(&pvec); 415 cond_resched(); 416 } 417 418 if (!cycled && !done) { 419 /* 420 * range_cyclic: 421 * We hit the last page and there is more work to be done: wrap 422 * back to the start of the file 423 */ 424 cycled = 1; 425 index = 0; 426 end = writeback_index - 1; 427 goto retry; 428 } 429 430 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 431 mapping->writeback_index = done_index; 432 433 return ret; 434 } 435 436 437 /** 438 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 439 * @mapping: The mapping to write 440 * @wbc: The writeback control 441 * 442 */ 443 444 static int gfs2_jdata_writepages(struct address_space *mapping, 445 struct writeback_control *wbc) 446 { 447 struct gfs2_inode *ip = GFS2_I(mapping->host); 448 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 449 int ret; 450 451 ret = gfs2_write_cache_jdata(mapping, wbc); 452 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 453 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 454 ret = gfs2_write_cache_jdata(mapping, wbc); 455 } 456 return ret; 457 } 458 459 /** 460 * stuffed_readpage - Fill in a Linux page with stuffed file data 461 * @ip: the inode 462 * @page: the page 463 * 464 * Returns: errno 465 */ 466 467 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 468 { 469 struct buffer_head *dibh; 470 u64 dsize = i_size_read(&ip->i_inode); 471 void *kaddr; 472 int error; 473 474 /* 475 * Due to the order of unstuffing files and ->fault(), we can be 476 * asked for a zero page in the case of a stuffed file being extended, 477 * so we need to supply one here. It doesn't happen often. 478 */ 479 if (unlikely(page->index)) { 480 zero_user(page, 0, PAGE_SIZE); 481 SetPageUptodate(page); 482 return 0; 483 } 484 485 error = gfs2_meta_inode_buffer(ip, &dibh); 486 if (error) 487 return error; 488 489 kaddr = kmap_atomic(page); 490 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 491 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 492 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 493 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 494 kunmap_atomic(kaddr); 495 flush_dcache_page(page); 496 brelse(dibh); 497 SetPageUptodate(page); 498 499 return 0; 500 } 501 502 503 /** 504 * __gfs2_readpage - readpage 505 * @file: The file to read a page for 506 * @page: The page to read 507 * 508 * This is the core of gfs2's readpage. Its used by the internal file 509 * reading code as in that case we already hold the glock. Also its 510 * called by gfs2_readpage() once the required lock has been granted. 511 * 512 */ 513 514 static int __gfs2_readpage(void *file, struct page *page) 515 { 516 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 517 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 518 int error; 519 520 if (gfs2_is_stuffed(ip)) { 521 error = stuffed_readpage(ip, page); 522 unlock_page(page); 523 } else { 524 error = mpage_readpage(page, gfs2_block_map); 525 } 526 527 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 528 return -EIO; 529 530 return error; 531 } 532 533 /** 534 * gfs2_readpage - read a page of a file 535 * @file: The file to read 536 * @page: The page of the file 537 * 538 * This deals with the locking required. We have to unlock and 539 * relock the page in order to get the locking in the right 540 * order. 541 */ 542 543 static int gfs2_readpage(struct file *file, struct page *page) 544 { 545 struct address_space *mapping = page->mapping; 546 struct gfs2_inode *ip = GFS2_I(mapping->host); 547 struct gfs2_holder gh; 548 int error; 549 550 unlock_page(page); 551 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 552 error = gfs2_glock_nq(&gh); 553 if (unlikely(error)) 554 goto out; 555 error = AOP_TRUNCATED_PAGE; 556 lock_page(page); 557 if (page->mapping == mapping && !PageUptodate(page)) 558 error = __gfs2_readpage(file, page); 559 else 560 unlock_page(page); 561 gfs2_glock_dq(&gh); 562 out: 563 gfs2_holder_uninit(&gh); 564 if (error && error != AOP_TRUNCATED_PAGE) 565 lock_page(page); 566 return error; 567 } 568 569 /** 570 * gfs2_internal_read - read an internal file 571 * @ip: The gfs2 inode 572 * @buf: The buffer to fill 573 * @pos: The file position 574 * @size: The amount to read 575 * 576 */ 577 578 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, 579 unsigned size) 580 { 581 struct address_space *mapping = ip->i_inode.i_mapping; 582 unsigned long index = *pos / PAGE_SIZE; 583 unsigned offset = *pos & (PAGE_SIZE - 1); 584 unsigned copied = 0; 585 unsigned amt; 586 struct page *page; 587 void *p; 588 589 do { 590 amt = size - copied; 591 if (offset + size > PAGE_SIZE) 592 amt = PAGE_SIZE - offset; 593 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 594 if (IS_ERR(page)) 595 return PTR_ERR(page); 596 p = kmap_atomic(page); 597 memcpy(buf + copied, p + offset, amt); 598 kunmap_atomic(p); 599 put_page(page); 600 copied += amt; 601 index++; 602 offset = 0; 603 } while(copied < size); 604 (*pos) += size; 605 return size; 606 } 607 608 /** 609 * gfs2_readpages - Read a bunch of pages at once 610 * @file: The file to read from 611 * @mapping: Address space info 612 * @pages: List of pages to read 613 * @nr_pages: Number of pages to read 614 * 615 * Some notes: 616 * 1. This is only for readahead, so we can simply ignore any things 617 * which are slightly inconvenient (such as locking conflicts between 618 * the page lock and the glock) and return having done no I/O. Its 619 * obviously not something we'd want to do on too regular a basis. 620 * Any I/O we ignore at this time will be done via readpage later. 621 * 2. We don't handle stuffed files here we let readpage do the honours. 622 * 3. mpage_readpages() does most of the heavy lifting in the common case. 623 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 624 */ 625 626 static int gfs2_readpages(struct file *file, struct address_space *mapping, 627 struct list_head *pages, unsigned nr_pages) 628 { 629 struct inode *inode = mapping->host; 630 struct gfs2_inode *ip = GFS2_I(inode); 631 struct gfs2_sbd *sdp = GFS2_SB(inode); 632 struct gfs2_holder gh; 633 int ret; 634 635 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 636 ret = gfs2_glock_nq(&gh); 637 if (unlikely(ret)) 638 goto out_uninit; 639 if (!gfs2_is_stuffed(ip)) 640 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 641 gfs2_glock_dq(&gh); 642 out_uninit: 643 gfs2_holder_uninit(&gh); 644 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 645 ret = -EIO; 646 return ret; 647 } 648 649 /** 650 * gfs2_write_begin - Begin to write to a file 651 * @file: The file to write to 652 * @mapping: The mapping in which to write 653 * @pos: The file offset at which to start writing 654 * @len: Length of the write 655 * @flags: Various flags 656 * @pagep: Pointer to return the page 657 * @fsdata: Pointer to return fs data (unused by GFS2) 658 * 659 * Returns: errno 660 */ 661 662 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 663 loff_t pos, unsigned len, unsigned flags, 664 struct page **pagep, void **fsdata) 665 { 666 struct gfs2_inode *ip = GFS2_I(mapping->host); 667 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 668 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 669 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 670 unsigned requested = 0; 671 int alloc_required; 672 int error = 0; 673 pgoff_t index = pos >> PAGE_SHIFT; 674 unsigned from = pos & (PAGE_SIZE - 1); 675 struct page *page; 676 677 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 678 error = gfs2_glock_nq(&ip->i_gh); 679 if (unlikely(error)) 680 goto out_uninit; 681 if (&ip->i_inode == sdp->sd_rindex) { 682 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 683 GL_NOCACHE, &m_ip->i_gh); 684 if (unlikely(error)) { 685 gfs2_glock_dq(&ip->i_gh); 686 goto out_uninit; 687 } 688 } 689 690 alloc_required = gfs2_write_alloc_required(ip, pos, len); 691 692 if (alloc_required || gfs2_is_jdata(ip)) 693 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 694 695 if (alloc_required) { 696 struct gfs2_alloc_parms ap = { .aflags = 0, }; 697 requested = data_blocks + ind_blocks; 698 ap.target = requested; 699 error = gfs2_quota_lock_check(ip, &ap); 700 if (error) 701 goto out_unlock; 702 703 error = gfs2_inplace_reserve(ip, &ap); 704 if (error) 705 goto out_qunlock; 706 } 707 708 rblocks = RES_DINODE + ind_blocks; 709 if (gfs2_is_jdata(ip)) 710 rblocks += data_blocks ? data_blocks : 1; 711 if (ind_blocks || data_blocks) 712 rblocks += RES_STATFS + RES_QUOTA; 713 if (&ip->i_inode == sdp->sd_rindex) 714 rblocks += 2 * RES_STATFS; 715 if (alloc_required) 716 rblocks += gfs2_rg_blocks(ip, requested); 717 718 error = gfs2_trans_begin(sdp, rblocks, 719 PAGE_SIZE/sdp->sd_sb.sb_bsize); 720 if (error) 721 goto out_trans_fail; 722 723 error = -ENOMEM; 724 flags |= AOP_FLAG_NOFS; 725 page = grab_cache_page_write_begin(mapping, index, flags); 726 *pagep = page; 727 if (unlikely(!page)) 728 goto out_endtrans; 729 730 if (gfs2_is_stuffed(ip)) { 731 error = 0; 732 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 733 error = gfs2_unstuff_dinode(ip, page); 734 if (error == 0) 735 goto prepare_write; 736 } else if (!PageUptodate(page)) { 737 error = stuffed_readpage(ip, page); 738 } 739 goto out; 740 } 741 742 prepare_write: 743 error = __block_write_begin(page, from, len, gfs2_block_map); 744 out: 745 if (error == 0) 746 return 0; 747 748 unlock_page(page); 749 put_page(page); 750 751 gfs2_trans_end(sdp); 752 if (pos + len > ip->i_inode.i_size) 753 gfs2_trim_blocks(&ip->i_inode); 754 goto out_trans_fail; 755 756 out_endtrans: 757 gfs2_trans_end(sdp); 758 out_trans_fail: 759 if (alloc_required) { 760 gfs2_inplace_release(ip); 761 out_qunlock: 762 gfs2_quota_unlock(ip); 763 } 764 out_unlock: 765 if (&ip->i_inode == sdp->sd_rindex) { 766 gfs2_glock_dq(&m_ip->i_gh); 767 gfs2_holder_uninit(&m_ip->i_gh); 768 } 769 gfs2_glock_dq(&ip->i_gh); 770 out_uninit: 771 gfs2_holder_uninit(&ip->i_gh); 772 return error; 773 } 774 775 /** 776 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 777 * @inode: the rindex inode 778 */ 779 static void adjust_fs_space(struct inode *inode) 780 { 781 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 782 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 783 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 784 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 785 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 786 struct buffer_head *m_bh, *l_bh; 787 u64 fs_total, new_free; 788 789 /* Total up the file system space, according to the latest rindex. */ 790 fs_total = gfs2_ri_total(sdp); 791 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 792 return; 793 794 spin_lock(&sdp->sd_statfs_spin); 795 gfs2_statfs_change_in(m_sc, m_bh->b_data + 796 sizeof(struct gfs2_dinode)); 797 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 798 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 799 else 800 new_free = 0; 801 spin_unlock(&sdp->sd_statfs_spin); 802 fs_warn(sdp, "File system extended by %llu blocks.\n", 803 (unsigned long long)new_free); 804 gfs2_statfs_change(sdp, new_free, new_free, 0); 805 806 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 807 goto out; 808 update_statfs(sdp, m_bh, l_bh); 809 brelse(l_bh); 810 out: 811 brelse(m_bh); 812 } 813 814 /** 815 * gfs2_stuffed_write_end - Write end for stuffed files 816 * @inode: The inode 817 * @dibh: The buffer_head containing the on-disk inode 818 * @pos: The file position 819 * @len: The length of the write 820 * @copied: How much was actually copied by the VFS 821 * @page: The page 822 * 823 * This copies the data from the page into the inode block after 824 * the inode data structure itself. 825 * 826 * Returns: errno 827 */ 828 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 829 loff_t pos, unsigned len, unsigned copied, 830 struct page *page) 831 { 832 struct gfs2_inode *ip = GFS2_I(inode); 833 struct gfs2_sbd *sdp = GFS2_SB(inode); 834 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 835 u64 to = pos + copied; 836 void *kaddr; 837 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 838 839 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 840 kaddr = kmap_atomic(page); 841 memcpy(buf + pos, kaddr + pos, copied); 842 memset(kaddr + pos + copied, 0, len - copied); 843 flush_dcache_page(page); 844 kunmap_atomic(kaddr); 845 846 if (!PageUptodate(page)) 847 SetPageUptodate(page); 848 unlock_page(page); 849 put_page(page); 850 851 if (copied) { 852 if (inode->i_size < to) 853 i_size_write(inode, to); 854 mark_inode_dirty(inode); 855 } 856 857 if (inode == sdp->sd_rindex) { 858 adjust_fs_space(inode); 859 sdp->sd_rindex_uptodate = 0; 860 } 861 862 brelse(dibh); 863 gfs2_trans_end(sdp); 864 if (inode == sdp->sd_rindex) { 865 gfs2_glock_dq(&m_ip->i_gh); 866 gfs2_holder_uninit(&m_ip->i_gh); 867 } 868 gfs2_glock_dq(&ip->i_gh); 869 gfs2_holder_uninit(&ip->i_gh); 870 return copied; 871 } 872 873 /** 874 * gfs2_write_end 875 * @file: The file to write to 876 * @mapping: The address space to write to 877 * @pos: The file position 878 * @len: The length of the data 879 * @copied: How much was actually copied by the VFS 880 * @page: The page that has been written 881 * @fsdata: The fsdata (unused in GFS2) 882 * 883 * The main write_end function for GFS2. We have a separate one for 884 * stuffed files as they are slightly different, otherwise we just 885 * put our locking around the VFS provided functions. 886 * 887 * Returns: errno 888 */ 889 890 static int gfs2_write_end(struct file *file, struct address_space *mapping, 891 loff_t pos, unsigned len, unsigned copied, 892 struct page *page, void *fsdata) 893 { 894 struct inode *inode = page->mapping->host; 895 struct gfs2_inode *ip = GFS2_I(inode); 896 struct gfs2_sbd *sdp = GFS2_SB(inode); 897 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 898 struct buffer_head *dibh; 899 unsigned int from = pos & (PAGE_SIZE - 1); 900 unsigned int to = from + len; 901 int ret; 902 struct gfs2_trans *tr = current->journal_info; 903 BUG_ON(!tr); 904 905 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 906 907 ret = gfs2_meta_inode_buffer(ip, &dibh); 908 if (unlikely(ret)) { 909 unlock_page(page); 910 put_page(page); 911 goto failed; 912 } 913 914 if (gfs2_is_stuffed(ip)) 915 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 916 917 if (!gfs2_is_writeback(ip)) 918 gfs2_page_add_databufs(ip, page, from, to); 919 920 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 921 if (tr->tr_num_buf_new) 922 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 923 else 924 gfs2_trans_add_meta(ip->i_gl, dibh); 925 926 927 if (inode == sdp->sd_rindex) { 928 adjust_fs_space(inode); 929 sdp->sd_rindex_uptodate = 0; 930 } 931 932 brelse(dibh); 933 failed: 934 gfs2_trans_end(sdp); 935 gfs2_inplace_release(ip); 936 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 937 gfs2_quota_unlock(ip); 938 if (inode == sdp->sd_rindex) { 939 gfs2_glock_dq(&m_ip->i_gh); 940 gfs2_holder_uninit(&m_ip->i_gh); 941 } 942 gfs2_glock_dq(&ip->i_gh); 943 gfs2_holder_uninit(&ip->i_gh); 944 return ret; 945 } 946 947 /** 948 * gfs2_set_page_dirty - Page dirtying function 949 * @page: The page to dirty 950 * 951 * Returns: 1 if it dirtyed the page, or 0 otherwise 952 */ 953 954 static int gfs2_set_page_dirty(struct page *page) 955 { 956 SetPageChecked(page); 957 return __set_page_dirty_buffers(page); 958 } 959 960 /** 961 * gfs2_bmap - Block map function 962 * @mapping: Address space info 963 * @lblock: The block to map 964 * 965 * Returns: The disk address for the block or 0 on hole or error 966 */ 967 968 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 969 { 970 struct gfs2_inode *ip = GFS2_I(mapping->host); 971 struct gfs2_holder i_gh; 972 sector_t dblock = 0; 973 int error; 974 975 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 976 if (error) 977 return 0; 978 979 if (!gfs2_is_stuffed(ip)) 980 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 981 982 gfs2_glock_dq_uninit(&i_gh); 983 984 return dblock; 985 } 986 987 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 988 { 989 struct gfs2_bufdata *bd; 990 991 lock_buffer(bh); 992 gfs2_log_lock(sdp); 993 clear_buffer_dirty(bh); 994 bd = bh->b_private; 995 if (bd) { 996 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) 997 list_del_init(&bd->bd_list); 998 else 999 gfs2_remove_from_journal(bh, REMOVE_JDATA); 1000 } 1001 bh->b_bdev = NULL; 1002 clear_buffer_mapped(bh); 1003 clear_buffer_req(bh); 1004 clear_buffer_new(bh); 1005 gfs2_log_unlock(sdp); 1006 unlock_buffer(bh); 1007 } 1008 1009 static void gfs2_invalidatepage(struct page *page, unsigned int offset, 1010 unsigned int length) 1011 { 1012 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 1013 unsigned int stop = offset + length; 1014 int partial_page = (offset || length < PAGE_SIZE); 1015 struct buffer_head *bh, *head; 1016 unsigned long pos = 0; 1017 1018 BUG_ON(!PageLocked(page)); 1019 if (!partial_page) 1020 ClearPageChecked(page); 1021 if (!page_has_buffers(page)) 1022 goto out; 1023 1024 bh = head = page_buffers(page); 1025 do { 1026 if (pos + bh->b_size > stop) 1027 return; 1028 1029 if (offset <= pos) 1030 gfs2_discard(sdp, bh); 1031 pos += bh->b_size; 1032 bh = bh->b_this_page; 1033 } while (bh != head); 1034 out: 1035 if (!partial_page) 1036 try_to_release_page(page, 0); 1037 } 1038 1039 /** 1040 * gfs2_ok_for_dio - check that dio is valid on this file 1041 * @ip: The inode 1042 * @offset: The offset at which we are reading or writing 1043 * 1044 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 1045 * 1 (to accept the i/o request) 1046 */ 1047 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) 1048 { 1049 /* 1050 * Should we return an error here? I can't see that O_DIRECT for 1051 * a stuffed file makes any sense. For now we'll silently fall 1052 * back to buffered I/O 1053 */ 1054 if (gfs2_is_stuffed(ip)) 1055 return 0; 1056 1057 if (offset >= i_size_read(&ip->i_inode)) 1058 return 0; 1059 return 1; 1060 } 1061 1062 1063 1064 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1065 { 1066 struct file *file = iocb->ki_filp; 1067 struct inode *inode = file->f_mapping->host; 1068 struct address_space *mapping = inode->i_mapping; 1069 struct gfs2_inode *ip = GFS2_I(inode); 1070 loff_t offset = iocb->ki_pos; 1071 struct gfs2_holder gh; 1072 int rv; 1073 1074 /* 1075 * Deferred lock, even if its a write, since we do no allocation 1076 * on this path. All we need change is atime, and this lock mode 1077 * ensures that other nodes have flushed their buffered read caches 1078 * (i.e. their page cache entries for this inode). We do not, 1079 * unfortunately have the option of only flushing a range like 1080 * the VFS does. 1081 */ 1082 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1083 rv = gfs2_glock_nq(&gh); 1084 if (rv) 1085 goto out_uninit; 1086 rv = gfs2_ok_for_dio(ip, offset); 1087 if (rv != 1) 1088 goto out; /* dio not valid, fall back to buffered i/o */ 1089 1090 /* 1091 * Now since we are holding a deferred (CW) lock at this point, you 1092 * might be wondering why this is ever needed. There is a case however 1093 * where we've granted a deferred local lock against a cached exclusive 1094 * glock. That is ok provided all granted local locks are deferred, but 1095 * it also means that it is possible to encounter pages which are 1096 * cached and possibly also mapped. So here we check for that and sort 1097 * them out ahead of the dio. The glock state machine will take care of 1098 * everything else. 1099 * 1100 * If in fact the cached glock state (gl->gl_state) is deferred (CW) in 1101 * the first place, mapping->nr_pages will always be zero. 1102 */ 1103 if (mapping->nrpages) { 1104 loff_t lstart = offset & ~(PAGE_SIZE - 1); 1105 loff_t len = iov_iter_count(iter); 1106 loff_t end = PAGE_ALIGN(offset + len) - 1; 1107 1108 rv = 0; 1109 if (len == 0) 1110 goto out; 1111 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 1112 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); 1113 rv = filemap_write_and_wait_range(mapping, lstart, end); 1114 if (rv) 1115 goto out; 1116 if (iov_iter_rw(iter) == WRITE) 1117 truncate_inode_pages_range(mapping, lstart, end); 1118 } 1119 1120 rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 1121 gfs2_get_block_direct, NULL, NULL, 0); 1122 out: 1123 gfs2_glock_dq(&gh); 1124 out_uninit: 1125 gfs2_holder_uninit(&gh); 1126 return rv; 1127 } 1128 1129 /** 1130 * gfs2_releasepage - free the metadata associated with a page 1131 * @page: the page that's being released 1132 * @gfp_mask: passed from Linux VFS, ignored by us 1133 * 1134 * Call try_to_free_buffers() if the buffers in this page can be 1135 * released. 1136 * 1137 * Returns: 0 1138 */ 1139 1140 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1141 { 1142 struct address_space *mapping = page->mapping; 1143 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 1144 struct buffer_head *bh, *head; 1145 struct gfs2_bufdata *bd; 1146 1147 if (!page_has_buffers(page)) 1148 return 0; 1149 1150 gfs2_log_lock(sdp); 1151 spin_lock(&sdp->sd_ail_lock); 1152 head = bh = page_buffers(page); 1153 do { 1154 if (atomic_read(&bh->b_count)) 1155 goto cannot_release; 1156 bd = bh->b_private; 1157 if (bd && bd->bd_tr) 1158 goto cannot_release; 1159 if (buffer_pinned(bh) || buffer_dirty(bh)) 1160 goto not_possible; 1161 bh = bh->b_this_page; 1162 } while(bh != head); 1163 spin_unlock(&sdp->sd_ail_lock); 1164 1165 head = bh = page_buffers(page); 1166 do { 1167 bd = bh->b_private; 1168 if (bd) { 1169 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1170 if (!list_empty(&bd->bd_list)) 1171 list_del_init(&bd->bd_list); 1172 bd->bd_bh = NULL; 1173 bh->b_private = NULL; 1174 kmem_cache_free(gfs2_bufdata_cachep, bd); 1175 } 1176 1177 bh = bh->b_this_page; 1178 } while (bh != head); 1179 gfs2_log_unlock(sdp); 1180 1181 return try_to_free_buffers(page); 1182 1183 not_possible: /* Should never happen */ 1184 WARN_ON(buffer_dirty(bh)); 1185 WARN_ON(buffer_pinned(bh)); 1186 cannot_release: 1187 spin_unlock(&sdp->sd_ail_lock); 1188 gfs2_log_unlock(sdp); 1189 return 0; 1190 } 1191 1192 static const struct address_space_operations gfs2_writeback_aops = { 1193 .writepage = gfs2_writepage, 1194 .writepages = gfs2_writepages, 1195 .readpage = gfs2_readpage, 1196 .readpages = gfs2_readpages, 1197 .write_begin = gfs2_write_begin, 1198 .write_end = gfs2_write_end, 1199 .bmap = gfs2_bmap, 1200 .invalidatepage = gfs2_invalidatepage, 1201 .releasepage = gfs2_releasepage, 1202 .direct_IO = gfs2_direct_IO, 1203 .migratepage = buffer_migrate_page, 1204 .is_partially_uptodate = block_is_partially_uptodate, 1205 .error_remove_page = generic_error_remove_page, 1206 }; 1207 1208 static const struct address_space_operations gfs2_ordered_aops = { 1209 .writepage = gfs2_writepage, 1210 .writepages = gfs2_writepages, 1211 .readpage = gfs2_readpage, 1212 .readpages = gfs2_readpages, 1213 .write_begin = gfs2_write_begin, 1214 .write_end = gfs2_write_end, 1215 .set_page_dirty = gfs2_set_page_dirty, 1216 .bmap = gfs2_bmap, 1217 .invalidatepage = gfs2_invalidatepage, 1218 .releasepage = gfs2_releasepage, 1219 .direct_IO = gfs2_direct_IO, 1220 .migratepage = buffer_migrate_page, 1221 .is_partially_uptodate = block_is_partially_uptodate, 1222 .error_remove_page = generic_error_remove_page, 1223 }; 1224 1225 static const struct address_space_operations gfs2_jdata_aops = { 1226 .writepage = gfs2_jdata_writepage, 1227 .writepages = gfs2_jdata_writepages, 1228 .readpage = gfs2_readpage, 1229 .readpages = gfs2_readpages, 1230 .write_begin = gfs2_write_begin, 1231 .write_end = gfs2_write_end, 1232 .set_page_dirty = gfs2_set_page_dirty, 1233 .bmap = gfs2_bmap, 1234 .invalidatepage = gfs2_invalidatepage, 1235 .releasepage = gfs2_releasepage, 1236 .is_partially_uptodate = block_is_partially_uptodate, 1237 .error_remove_page = generic_error_remove_page, 1238 }; 1239 1240 void gfs2_set_aops(struct inode *inode) 1241 { 1242 struct gfs2_inode *ip = GFS2_I(inode); 1243 1244 if (gfs2_is_writeback(ip)) 1245 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1246 else if (gfs2_is_ordered(ip)) 1247 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1248 else if (gfs2_is_jdata(ip)) 1249 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1250 else 1251 BUG(); 1252 } 1253 1254