1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mpage.h> 18 #include <linux/fs.h> 19 #include <linux/writeback.h> 20 #include <linux/swap.h> 21 #include <linux/gfs2_ondisk.h> 22 #include <linux/backing-dev.h> 23 24 #include "gfs2.h" 25 #include "incore.h" 26 #include "bmap.h" 27 #include "glock.h" 28 #include "inode.h" 29 #include "log.h" 30 #include "meta_io.h" 31 #include "quota.h" 32 #include "trans.h" 33 #include "rgrp.h" 34 #include "super.h" 35 #include "util.h" 36 #include "glops.h" 37 38 39 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, 40 unsigned int from, unsigned int to) 41 { 42 struct buffer_head *head = page_buffers(page); 43 unsigned int bsize = head->b_size; 44 struct buffer_head *bh; 45 unsigned int start, end; 46 47 for (bh = head, start = 0; bh != head || !start; 48 bh = bh->b_this_page, start = end) { 49 end = start + bsize; 50 if (end <= from || start >= to) 51 continue; 52 if (gfs2_is_jdata(ip)) 53 set_buffer_uptodate(bh); 54 gfs2_trans_add_bh(ip->i_gl, bh, 0); 55 } 56 } 57 58 /** 59 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 60 * @inode: The inode 61 * @lblock: The block number to look up 62 * @bh_result: The buffer head to return the result in 63 * @create: Non-zero if we may add block to the file 64 * 65 * Returns: errno 66 */ 67 68 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, 69 struct buffer_head *bh_result, int create) 70 { 71 int error; 72 73 error = gfs2_block_map(inode, lblock, bh_result, 0); 74 if (error) 75 return error; 76 if (!buffer_mapped(bh_result)) 77 return -EIO; 78 return 0; 79 } 80 81 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, 82 struct buffer_head *bh_result, int create) 83 { 84 return gfs2_block_map(inode, lblock, bh_result, 0); 85 } 86 87 /** 88 * gfs2_writepage_common - Common bits of writepage 89 * @page: The page to be written 90 * @wbc: The writeback control 91 * 92 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. 93 */ 94 95 static int gfs2_writepage_common(struct page *page, 96 struct writeback_control *wbc) 97 { 98 struct inode *inode = page->mapping->host; 99 struct gfs2_inode *ip = GFS2_I(inode); 100 struct gfs2_sbd *sdp = GFS2_SB(inode); 101 loff_t i_size = i_size_read(inode); 102 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 103 unsigned offset; 104 105 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) 106 goto out; 107 if (current->journal_info) 108 goto redirty; 109 /* Is the page fully outside i_size? (truncate in progress) */ 110 offset = i_size & (PAGE_CACHE_SIZE-1); 111 if (page->index > end_index || (page->index == end_index && !offset)) { 112 page->mapping->a_ops->invalidatepage(page, 0); 113 goto out; 114 } 115 return 1; 116 redirty: 117 redirty_page_for_writepage(wbc, page); 118 out: 119 unlock_page(page); 120 return 0; 121 } 122 123 /** 124 * gfs2_writeback_writepage - Write page for writeback mappings 125 * @page: The page 126 * @wbc: The writeback control 127 * 128 */ 129 130 static int gfs2_writeback_writepage(struct page *page, 131 struct writeback_control *wbc) 132 { 133 int ret; 134 135 ret = gfs2_writepage_common(page, wbc); 136 if (ret <= 0) 137 return ret; 138 139 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc); 140 if (ret == -EAGAIN) 141 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc); 142 return ret; 143 } 144 145 /** 146 * gfs2_ordered_writepage - Write page for ordered data files 147 * @page: The page to write 148 * @wbc: The writeback control 149 * 150 */ 151 152 static int gfs2_ordered_writepage(struct page *page, 153 struct writeback_control *wbc) 154 { 155 struct inode *inode = page->mapping->host; 156 struct gfs2_inode *ip = GFS2_I(inode); 157 int ret; 158 159 ret = gfs2_writepage_common(page, wbc); 160 if (ret <= 0) 161 return ret; 162 163 if (!page_has_buffers(page)) { 164 create_empty_buffers(page, inode->i_sb->s_blocksize, 165 (1 << BH_Dirty)|(1 << BH_Uptodate)); 166 } 167 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1); 168 return block_write_full_page(page, gfs2_get_block_noalloc, wbc); 169 } 170 171 /** 172 * __gfs2_jdata_writepage - The core of jdata writepage 173 * @page: The page to write 174 * @wbc: The writeback control 175 * 176 * This is shared between writepage and writepages and implements the 177 * core of the writepage operation. If a transaction is required then 178 * PageChecked will have been set and the transaction will have 179 * already been started before this is called. 180 */ 181 182 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 183 { 184 struct inode *inode = page->mapping->host; 185 struct gfs2_inode *ip = GFS2_I(inode); 186 struct gfs2_sbd *sdp = GFS2_SB(inode); 187 188 if (PageChecked(page)) { 189 ClearPageChecked(page); 190 if (!page_has_buffers(page)) { 191 create_empty_buffers(page, inode->i_sb->s_blocksize, 192 (1 << BH_Dirty)|(1 << BH_Uptodate)); 193 } 194 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); 195 } 196 return block_write_full_page(page, gfs2_get_block_noalloc, wbc); 197 } 198 199 /** 200 * gfs2_jdata_writepage - Write complete page 201 * @page: Page to write 202 * 203 * Returns: errno 204 * 205 */ 206 207 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) 208 { 209 struct inode *inode = page->mapping->host; 210 struct gfs2_sbd *sdp = GFS2_SB(inode); 211 int ret; 212 int done_trans = 0; 213 214 if (PageChecked(page)) { 215 if (wbc->sync_mode != WB_SYNC_ALL) 216 goto out_ignore; 217 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); 218 if (ret) 219 goto out_ignore; 220 done_trans = 1; 221 } 222 ret = gfs2_writepage_common(page, wbc); 223 if (ret > 0) 224 ret = __gfs2_jdata_writepage(page, wbc); 225 if (done_trans) 226 gfs2_trans_end(sdp); 227 return ret; 228 229 out_ignore: 230 redirty_page_for_writepage(wbc, page); 231 unlock_page(page); 232 return 0; 233 } 234 235 /** 236 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk 237 * @mapping: The mapping to write 238 * @wbc: Write-back control 239 * 240 * For the data=writeback case we can already ignore buffer heads 241 * and write whole extents at once. This is a big reduction in the 242 * number of I/O requests we send and the bmap calls we make in this case. 243 */ 244 static int gfs2_writeback_writepages(struct address_space *mapping, 245 struct writeback_control *wbc) 246 { 247 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); 248 } 249 250 /** 251 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 252 * @mapping: The mapping 253 * @wbc: The writeback control 254 * @writepage: The writepage function to call for each page 255 * @pvec: The vector of pages 256 * @nr_pages: The number of pages to write 257 * 258 * Returns: non-zero if loop should terminate, zero otherwise 259 */ 260 261 static int gfs2_write_jdata_pagevec(struct address_space *mapping, 262 struct writeback_control *wbc, 263 struct pagevec *pvec, 264 int nr_pages, pgoff_t end) 265 { 266 struct inode *inode = mapping->host; 267 struct gfs2_sbd *sdp = GFS2_SB(inode); 268 loff_t i_size = i_size_read(inode); 269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1); 271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 272 int i; 273 int ret; 274 275 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); 276 if (ret < 0) 277 return ret; 278 279 for(i = 0; i < nr_pages; i++) { 280 struct page *page = pvec->pages[i]; 281 282 lock_page(page); 283 284 if (unlikely(page->mapping != mapping)) { 285 unlock_page(page); 286 continue; 287 } 288 289 if (!wbc->range_cyclic && page->index > end) { 290 ret = 1; 291 unlock_page(page); 292 continue; 293 } 294 295 if (wbc->sync_mode != WB_SYNC_NONE) 296 wait_on_page_writeback(page); 297 298 if (PageWriteback(page) || 299 !clear_page_dirty_for_io(page)) { 300 unlock_page(page); 301 continue; 302 } 303 304 /* Is the page fully outside i_size? (truncate in progress) */ 305 if (page->index > end_index || (page->index == end_index && !offset)) { 306 page->mapping->a_ops->invalidatepage(page, 0); 307 unlock_page(page); 308 continue; 309 } 310 311 ret = __gfs2_jdata_writepage(page, wbc); 312 313 if (ret || (--(wbc->nr_to_write) <= 0)) 314 ret = 1; 315 } 316 gfs2_trans_end(sdp); 317 return ret; 318 } 319 320 /** 321 * gfs2_write_cache_jdata - Like write_cache_pages but different 322 * @mapping: The mapping to write 323 * @wbc: The writeback control 324 * @writepage: The writepage function to call 325 * @data: The data to pass to writepage 326 * 327 * The reason that we use our own function here is that we need to 328 * start transactions before we grab page locks. This allows us 329 * to get the ordering right. 330 */ 331 332 static int gfs2_write_cache_jdata(struct address_space *mapping, 333 struct writeback_control *wbc) 334 { 335 int ret = 0; 336 int done = 0; 337 struct pagevec pvec; 338 int nr_pages; 339 pgoff_t index; 340 pgoff_t end; 341 int scanned = 0; 342 int range_whole = 0; 343 344 pagevec_init(&pvec, 0); 345 if (wbc->range_cyclic) { 346 index = mapping->writeback_index; /* Start from prev offset */ 347 end = -1; 348 } else { 349 index = wbc->range_start >> PAGE_CACHE_SHIFT; 350 end = wbc->range_end >> PAGE_CACHE_SHIFT; 351 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 352 range_whole = 1; 353 scanned = 1; 354 } 355 356 retry: 357 while (!done && (index <= end) && 358 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 359 PAGECACHE_TAG_DIRTY, 360 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 361 scanned = 1; 362 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end); 363 if (ret) 364 done = 1; 365 if (ret > 0) 366 ret = 0; 367 368 pagevec_release(&pvec); 369 cond_resched(); 370 } 371 372 if (!scanned && !done) { 373 /* 374 * We hit the last page and there is more work to be done: wrap 375 * back to the start of the file 376 */ 377 scanned = 1; 378 index = 0; 379 goto retry; 380 } 381 382 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 383 mapping->writeback_index = index; 384 return ret; 385 } 386 387 388 /** 389 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk 390 * @mapping: The mapping to write 391 * @wbc: The writeback control 392 * 393 */ 394 395 static int gfs2_jdata_writepages(struct address_space *mapping, 396 struct writeback_control *wbc) 397 { 398 struct gfs2_inode *ip = GFS2_I(mapping->host); 399 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 400 int ret; 401 402 ret = gfs2_write_cache_jdata(mapping, wbc); 403 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { 404 gfs2_log_flush(sdp, ip->i_gl); 405 ret = gfs2_write_cache_jdata(mapping, wbc); 406 } 407 return ret; 408 } 409 410 /** 411 * stuffed_readpage - Fill in a Linux page with stuffed file data 412 * @ip: the inode 413 * @page: the page 414 * 415 * Returns: errno 416 */ 417 418 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) 419 { 420 struct buffer_head *dibh; 421 void *kaddr; 422 int error; 423 424 /* 425 * Due to the order of unstuffing files and ->fault(), we can be 426 * asked for a zero page in the case of a stuffed file being extended, 427 * so we need to supply one here. It doesn't happen often. 428 */ 429 if (unlikely(page->index)) { 430 zero_user(page, 0, PAGE_CACHE_SIZE); 431 SetPageUptodate(page); 432 return 0; 433 } 434 435 error = gfs2_meta_inode_buffer(ip, &dibh); 436 if (error) 437 return error; 438 439 kaddr = kmap_atomic(page, KM_USER0); 440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), 441 ip->i_disksize); 442 memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize); 443 kunmap_atomic(kaddr, KM_USER0); 444 flush_dcache_page(page); 445 brelse(dibh); 446 SetPageUptodate(page); 447 448 return 0; 449 } 450 451 452 /** 453 * __gfs2_readpage - readpage 454 * @file: The file to read a page for 455 * @page: The page to read 456 * 457 * This is the core of gfs2's readpage. Its used by the internal file 458 * reading code as in that case we already hold the glock. Also its 459 * called by gfs2_readpage() once the required lock has been granted. 460 * 461 */ 462 463 static int __gfs2_readpage(void *file, struct page *page) 464 { 465 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 466 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 467 int error; 468 469 if (gfs2_is_stuffed(ip)) { 470 error = stuffed_readpage(ip, page); 471 unlock_page(page); 472 } else { 473 error = mpage_readpage(page, gfs2_block_map); 474 } 475 476 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 477 return -EIO; 478 479 return error; 480 } 481 482 /** 483 * gfs2_readpage - read a page of a file 484 * @file: The file to read 485 * @page: The page of the file 486 * 487 * This deals with the locking required. We have to unlock and 488 * relock the page in order to get the locking in the right 489 * order. 490 */ 491 492 static int gfs2_readpage(struct file *file, struct page *page) 493 { 494 struct address_space *mapping = page->mapping; 495 struct gfs2_inode *ip = GFS2_I(mapping->host); 496 struct gfs2_holder gh; 497 int error; 498 499 unlock_page(page); 500 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 501 error = gfs2_glock_nq(&gh); 502 if (unlikely(error)) 503 goto out; 504 error = AOP_TRUNCATED_PAGE; 505 lock_page(page); 506 if (page->mapping == mapping && !PageUptodate(page)) 507 error = __gfs2_readpage(file, page); 508 else 509 unlock_page(page); 510 gfs2_glock_dq(&gh); 511 out: 512 gfs2_holder_uninit(&gh); 513 if (error && error != AOP_TRUNCATED_PAGE) 514 lock_page(page); 515 return error; 516 } 517 518 /** 519 * gfs2_internal_read - read an internal file 520 * @ip: The gfs2 inode 521 * @ra_state: The readahead state (or NULL for no readahead) 522 * @buf: The buffer to fill 523 * @pos: The file position 524 * @size: The amount to read 525 * 526 */ 527 528 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, 529 char *buf, loff_t *pos, unsigned size) 530 { 531 struct address_space *mapping = ip->i_inode.i_mapping; 532 unsigned long index = *pos / PAGE_CACHE_SIZE; 533 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); 534 unsigned copied = 0; 535 unsigned amt; 536 struct page *page; 537 void *p; 538 539 do { 540 amt = size - copied; 541 if (offset + size > PAGE_CACHE_SIZE) 542 amt = PAGE_CACHE_SIZE - offset; 543 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 544 if (IS_ERR(page)) 545 return PTR_ERR(page); 546 p = kmap_atomic(page, KM_USER0); 547 memcpy(buf + copied, p + offset, amt); 548 kunmap_atomic(p, KM_USER0); 549 mark_page_accessed(page); 550 page_cache_release(page); 551 copied += amt; 552 index++; 553 offset = 0; 554 } while(copied < size); 555 (*pos) += size; 556 return size; 557 } 558 559 /** 560 * gfs2_readpages - Read a bunch of pages at once 561 * 562 * Some notes: 563 * 1. This is only for readahead, so we can simply ignore any things 564 * which are slightly inconvenient (such as locking conflicts between 565 * the page lock and the glock) and return having done no I/O. Its 566 * obviously not something we'd want to do on too regular a basis. 567 * Any I/O we ignore at this time will be done via readpage later. 568 * 2. We don't handle stuffed files here we let readpage do the honours. 569 * 3. mpage_readpages() does most of the heavy lifting in the common case. 570 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. 571 */ 572 573 static int gfs2_readpages(struct file *file, struct address_space *mapping, 574 struct list_head *pages, unsigned nr_pages) 575 { 576 struct inode *inode = mapping->host; 577 struct gfs2_inode *ip = GFS2_I(inode); 578 struct gfs2_sbd *sdp = GFS2_SB(inode); 579 struct gfs2_holder gh; 580 int ret; 581 582 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 583 ret = gfs2_glock_nq(&gh); 584 if (unlikely(ret)) 585 goto out_uninit; 586 if (!gfs2_is_stuffed(ip)) 587 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); 588 gfs2_glock_dq(&gh); 589 out_uninit: 590 gfs2_holder_uninit(&gh); 591 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 592 ret = -EIO; 593 return ret; 594 } 595 596 /** 597 * gfs2_write_begin - Begin to write to a file 598 * @file: The file to write to 599 * @mapping: The mapping in which to write 600 * @pos: The file offset at which to start writing 601 * @len: Length of the write 602 * @flags: Various flags 603 * @pagep: Pointer to return the page 604 * @fsdata: Pointer to return fs data (unused by GFS2) 605 * 606 * Returns: errno 607 */ 608 609 static int gfs2_write_begin(struct file *file, struct address_space *mapping, 610 loff_t pos, unsigned len, unsigned flags, 611 struct page **pagep, void **fsdata) 612 { 613 struct gfs2_inode *ip = GFS2_I(mapping->host); 614 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 615 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 616 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 617 int alloc_required; 618 int error = 0; 619 struct gfs2_alloc *al; 620 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 621 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 622 unsigned to = from + len; 623 struct page *page; 624 625 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 626 error = gfs2_glock_nq(&ip->i_gh); 627 if (unlikely(error)) 628 goto out_uninit; 629 if (&ip->i_inode == sdp->sd_rindex) { 630 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 631 GL_NOCACHE, &m_ip->i_gh); 632 if (unlikely(error)) { 633 gfs2_glock_dq(&ip->i_gh); 634 goto out_uninit; 635 } 636 } 637 638 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); 639 if (error) 640 goto out_unlock; 641 642 if (alloc_required || gfs2_is_jdata(ip)) 643 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 644 645 if (alloc_required) { 646 al = gfs2_alloc_get(ip); 647 if (!al) { 648 error = -ENOMEM; 649 goto out_unlock; 650 } 651 652 error = gfs2_quota_lock_check(ip); 653 if (error) 654 goto out_alloc_put; 655 656 al->al_requested = data_blocks + ind_blocks; 657 error = gfs2_inplace_reserve(ip); 658 if (error) 659 goto out_qunlock; 660 } 661 662 rblocks = RES_DINODE + ind_blocks; 663 if (gfs2_is_jdata(ip)) 664 rblocks += data_blocks ? data_blocks : 1; 665 if (ind_blocks || data_blocks) 666 rblocks += RES_STATFS + RES_QUOTA; 667 if (&ip->i_inode == sdp->sd_rindex) 668 rblocks += 2 * RES_STATFS; 669 670 error = gfs2_trans_begin(sdp, rblocks, 671 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 672 if (error) 673 goto out_trans_fail; 674 675 error = -ENOMEM; 676 flags |= AOP_FLAG_NOFS; 677 page = grab_cache_page_write_begin(mapping, index, flags); 678 *pagep = page; 679 if (unlikely(!page)) 680 goto out_endtrans; 681 682 if (gfs2_is_stuffed(ip)) { 683 error = 0; 684 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 685 error = gfs2_unstuff_dinode(ip, page); 686 if (error == 0) 687 goto prepare_write; 688 } else if (!PageUptodate(page)) { 689 error = stuffed_readpage(ip, page); 690 } 691 goto out; 692 } 693 694 prepare_write: 695 error = block_prepare_write(page, from, to, gfs2_block_map); 696 out: 697 if (error == 0) 698 return 0; 699 700 page_cache_release(page); 701 if (pos + len > ip->i_inode.i_size) 702 vmtruncate(&ip->i_inode, ip->i_inode.i_size); 703 out_endtrans: 704 gfs2_trans_end(sdp); 705 out_trans_fail: 706 if (alloc_required) { 707 gfs2_inplace_release(ip); 708 out_qunlock: 709 gfs2_quota_unlock(ip); 710 out_alloc_put: 711 gfs2_alloc_put(ip); 712 } 713 out_unlock: 714 if (&ip->i_inode == sdp->sd_rindex) { 715 gfs2_glock_dq(&m_ip->i_gh); 716 gfs2_holder_uninit(&m_ip->i_gh); 717 } 718 gfs2_glock_dq(&ip->i_gh); 719 out_uninit: 720 gfs2_holder_uninit(&ip->i_gh); 721 return error; 722 } 723 724 /** 725 * adjust_fs_space - Adjusts the free space available due to gfs2_grow 726 * @inode: the rindex inode 727 */ 728 static void adjust_fs_space(struct inode *inode) 729 { 730 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 731 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 732 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 733 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 734 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 735 struct buffer_head *m_bh, *l_bh; 736 u64 fs_total, new_free; 737 738 /* Total up the file system space, according to the latest rindex. */ 739 fs_total = gfs2_ri_total(sdp); 740 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) 741 return; 742 743 spin_lock(&sdp->sd_statfs_spin); 744 gfs2_statfs_change_in(m_sc, m_bh->b_data + 745 sizeof(struct gfs2_dinode)); 746 if (fs_total > (m_sc->sc_total + l_sc->sc_total)) 747 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); 748 else 749 new_free = 0; 750 spin_unlock(&sdp->sd_statfs_spin); 751 fs_warn(sdp, "File system extended by %llu blocks.\n", 752 (unsigned long long)new_free); 753 gfs2_statfs_change(sdp, new_free, new_free, 0); 754 755 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) 756 goto out; 757 update_statfs(sdp, m_bh, l_bh); 758 brelse(l_bh); 759 out: 760 brelse(m_bh); 761 } 762 763 /** 764 * gfs2_stuffed_write_end - Write end for stuffed files 765 * @inode: The inode 766 * @dibh: The buffer_head containing the on-disk inode 767 * @pos: The file position 768 * @len: The length of the write 769 * @copied: How much was actually copied by the VFS 770 * @page: The page 771 * 772 * This copies the data from the page into the inode block after 773 * the inode data structure itself. 774 * 775 * Returns: errno 776 */ 777 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, 778 loff_t pos, unsigned len, unsigned copied, 779 struct page *page) 780 { 781 struct gfs2_inode *ip = GFS2_I(inode); 782 struct gfs2_sbd *sdp = GFS2_SB(inode); 783 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 784 u64 to = pos + copied; 785 void *kaddr; 786 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 787 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; 788 789 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 790 kaddr = kmap_atomic(page, KM_USER0); 791 memcpy(buf + pos, kaddr + pos, copied); 792 memset(kaddr + pos + copied, 0, len - copied); 793 flush_dcache_page(page); 794 kunmap_atomic(kaddr, KM_USER0); 795 796 if (!PageUptodate(page)) 797 SetPageUptodate(page); 798 unlock_page(page); 799 page_cache_release(page); 800 801 if (copied) { 802 if (inode->i_size < to) { 803 i_size_write(inode, to); 804 ip->i_disksize = inode->i_size; 805 } 806 gfs2_dinode_out(ip, di); 807 mark_inode_dirty(inode); 808 } 809 810 if (inode == sdp->sd_rindex) { 811 adjust_fs_space(inode); 812 ip->i_gh.gh_flags |= GL_NOCACHE; 813 } 814 815 brelse(dibh); 816 gfs2_trans_end(sdp); 817 if (inode == sdp->sd_rindex) { 818 gfs2_glock_dq(&m_ip->i_gh); 819 gfs2_holder_uninit(&m_ip->i_gh); 820 } 821 gfs2_glock_dq(&ip->i_gh); 822 gfs2_holder_uninit(&ip->i_gh); 823 return copied; 824 } 825 826 /** 827 * gfs2_write_end 828 * @file: The file to write to 829 * @mapping: The address space to write to 830 * @pos: The file position 831 * @len: The length of the data 832 * @copied: 833 * @page: The page that has been written 834 * @fsdata: The fsdata (unused in GFS2) 835 * 836 * The main write_end function for GFS2. We have a separate one for 837 * stuffed files as they are slightly different, otherwise we just 838 * put our locking around the VFS provided functions. 839 * 840 * Returns: errno 841 */ 842 843 static int gfs2_write_end(struct file *file, struct address_space *mapping, 844 loff_t pos, unsigned len, unsigned copied, 845 struct page *page, void *fsdata) 846 { 847 struct inode *inode = page->mapping->host; 848 struct gfs2_inode *ip = GFS2_I(inode); 849 struct gfs2_sbd *sdp = GFS2_SB(inode); 850 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 851 struct buffer_head *dibh; 852 struct gfs2_alloc *al = ip->i_alloc; 853 unsigned int from = pos & (PAGE_CACHE_SIZE - 1); 854 unsigned int to = from + len; 855 int ret; 856 857 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); 858 859 ret = gfs2_meta_inode_buffer(ip, &dibh); 860 if (unlikely(ret)) { 861 unlock_page(page); 862 page_cache_release(page); 863 goto failed; 864 } 865 866 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 867 868 if (gfs2_is_stuffed(ip)) 869 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); 870 871 if (!gfs2_is_writeback(ip)) 872 gfs2_page_add_databufs(ip, page, from, to); 873 874 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 875 if (ret > 0) { 876 if (inode->i_size > ip->i_disksize) 877 ip->i_disksize = inode->i_size; 878 gfs2_dinode_out(ip, dibh->b_data); 879 mark_inode_dirty(inode); 880 } 881 882 if (inode == sdp->sd_rindex) { 883 adjust_fs_space(inode); 884 ip->i_gh.gh_flags |= GL_NOCACHE; 885 } 886 887 brelse(dibh); 888 gfs2_trans_end(sdp); 889 failed: 890 if (al) { 891 gfs2_inplace_release(ip); 892 gfs2_quota_unlock(ip); 893 gfs2_alloc_put(ip); 894 } 895 if (inode == sdp->sd_rindex) { 896 gfs2_glock_dq(&m_ip->i_gh); 897 gfs2_holder_uninit(&m_ip->i_gh); 898 } 899 gfs2_glock_dq(&ip->i_gh); 900 gfs2_holder_uninit(&ip->i_gh); 901 return ret; 902 } 903 904 /** 905 * gfs2_set_page_dirty - Page dirtying function 906 * @page: The page to dirty 907 * 908 * Returns: 1 if it dirtyed the page, or 0 otherwise 909 */ 910 911 static int gfs2_set_page_dirty(struct page *page) 912 { 913 SetPageChecked(page); 914 return __set_page_dirty_buffers(page); 915 } 916 917 /** 918 * gfs2_bmap - Block map function 919 * @mapping: Address space info 920 * @lblock: The block to map 921 * 922 * Returns: The disk address for the block or 0 on hole or error 923 */ 924 925 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) 926 { 927 struct gfs2_inode *ip = GFS2_I(mapping->host); 928 struct gfs2_holder i_gh; 929 sector_t dblock = 0; 930 int error; 931 932 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 933 if (error) 934 return 0; 935 936 if (!gfs2_is_stuffed(ip)) 937 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); 938 939 gfs2_glock_dq_uninit(&i_gh); 940 941 return dblock; 942 } 943 944 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) 945 { 946 struct gfs2_bufdata *bd; 947 948 lock_buffer(bh); 949 gfs2_log_lock(sdp); 950 clear_buffer_dirty(bh); 951 bd = bh->b_private; 952 if (bd) { 953 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh)) 954 list_del_init(&bd->bd_le.le_list); 955 else 956 gfs2_remove_from_journal(bh, current->journal_info, 0); 957 } 958 bh->b_bdev = NULL; 959 clear_buffer_mapped(bh); 960 clear_buffer_req(bh); 961 clear_buffer_new(bh); 962 gfs2_log_unlock(sdp); 963 unlock_buffer(bh); 964 } 965 966 static void gfs2_invalidatepage(struct page *page, unsigned long offset) 967 { 968 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); 969 struct buffer_head *bh, *head; 970 unsigned long pos = 0; 971 972 BUG_ON(!PageLocked(page)); 973 if (offset == 0) 974 ClearPageChecked(page); 975 if (!page_has_buffers(page)) 976 goto out; 977 978 bh = head = page_buffers(page); 979 do { 980 if (offset <= pos) 981 gfs2_discard(sdp, bh); 982 pos += bh->b_size; 983 bh = bh->b_this_page; 984 } while (bh != head); 985 out: 986 if (offset == 0) 987 try_to_release_page(page, 0); 988 } 989 990 /** 991 * gfs2_ok_for_dio - check that dio is valid on this file 992 * @ip: The inode 993 * @rw: READ or WRITE 994 * @offset: The offset at which we are reading or writing 995 * 996 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) 997 * 1 (to accept the i/o request) 998 */ 999 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset) 1000 { 1001 /* 1002 * Should we return an error here? I can't see that O_DIRECT for 1003 * a stuffed file makes any sense. For now we'll silently fall 1004 * back to buffered I/O 1005 */ 1006 if (gfs2_is_stuffed(ip)) 1007 return 0; 1008 1009 if (offset >= i_size_read(&ip->i_inode)) 1010 return 0; 1011 return 1; 1012 } 1013 1014 1015 1016 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, 1017 const struct iovec *iov, loff_t offset, 1018 unsigned long nr_segs) 1019 { 1020 struct file *file = iocb->ki_filp; 1021 struct inode *inode = file->f_mapping->host; 1022 struct gfs2_inode *ip = GFS2_I(inode); 1023 struct gfs2_holder gh; 1024 int rv; 1025 1026 /* 1027 * Deferred lock, even if its a write, since we do no allocation 1028 * on this path. All we need change is atime, and this lock mode 1029 * ensures that other nodes have flushed their buffered read caches 1030 * (i.e. their page cache entries for this inode). We do not, 1031 * unfortunately have the option of only flushing a range like 1032 * the VFS does. 1033 */ 1034 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); 1035 rv = gfs2_glock_nq(&gh); 1036 if (rv) 1037 return rv; 1038 rv = gfs2_ok_for_dio(ip, rw, offset); 1039 if (rv != 1) 1040 goto out; /* dio not valid, fall back to buffered i/o */ 1041 1042 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, 1043 iov, offset, nr_segs, 1044 gfs2_get_block_direct, NULL); 1045 out: 1046 gfs2_glock_dq_m(1, &gh); 1047 gfs2_holder_uninit(&gh); 1048 return rv; 1049 } 1050 1051 /** 1052 * gfs2_releasepage - free the metadata associated with a page 1053 * @page: the page that's being released 1054 * @gfp_mask: passed from Linux VFS, ignored by us 1055 * 1056 * Call try_to_free_buffers() if the buffers in this page can be 1057 * released. 1058 * 1059 * Returns: 0 1060 */ 1061 1062 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) 1063 { 1064 struct inode *aspace = page->mapping->host; 1065 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; 1066 struct buffer_head *bh, *head; 1067 struct gfs2_bufdata *bd; 1068 1069 if (!page_has_buffers(page)) 1070 return 0; 1071 1072 gfs2_log_lock(sdp); 1073 head = bh = page_buffers(page); 1074 do { 1075 if (atomic_read(&bh->b_count)) 1076 goto cannot_release; 1077 bd = bh->b_private; 1078 if (bd && bd->bd_ail) 1079 goto cannot_release; 1080 gfs2_assert_warn(sdp, !buffer_pinned(bh)); 1081 gfs2_assert_warn(sdp, !buffer_dirty(bh)); 1082 bh = bh->b_this_page; 1083 } while(bh != head); 1084 gfs2_log_unlock(sdp); 1085 1086 head = bh = page_buffers(page); 1087 do { 1088 gfs2_log_lock(sdp); 1089 bd = bh->b_private; 1090 if (bd) { 1091 gfs2_assert_warn(sdp, bd->bd_bh == bh); 1092 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); 1093 if (!list_empty(&bd->bd_le.le_list)) { 1094 if (!buffer_pinned(bh)) 1095 list_del_init(&bd->bd_le.le_list); 1096 else 1097 bd = NULL; 1098 } 1099 if (bd) 1100 bd->bd_bh = NULL; 1101 bh->b_private = NULL; 1102 } 1103 gfs2_log_unlock(sdp); 1104 if (bd) 1105 kmem_cache_free(gfs2_bufdata_cachep, bd); 1106 1107 bh = bh->b_this_page; 1108 } while (bh != head); 1109 1110 return try_to_free_buffers(page); 1111 cannot_release: 1112 gfs2_log_unlock(sdp); 1113 return 0; 1114 } 1115 1116 static const struct address_space_operations gfs2_writeback_aops = { 1117 .writepage = gfs2_writeback_writepage, 1118 .writepages = gfs2_writeback_writepages, 1119 .readpage = gfs2_readpage, 1120 .readpages = gfs2_readpages, 1121 .sync_page = block_sync_page, 1122 .write_begin = gfs2_write_begin, 1123 .write_end = gfs2_write_end, 1124 .bmap = gfs2_bmap, 1125 .invalidatepage = gfs2_invalidatepage, 1126 .releasepage = gfs2_releasepage, 1127 .direct_IO = gfs2_direct_IO, 1128 .migratepage = buffer_migrate_page, 1129 .is_partially_uptodate = block_is_partially_uptodate, 1130 .error_remove_page = generic_error_remove_page, 1131 }; 1132 1133 static const struct address_space_operations gfs2_ordered_aops = { 1134 .writepage = gfs2_ordered_writepage, 1135 .readpage = gfs2_readpage, 1136 .readpages = gfs2_readpages, 1137 .sync_page = block_sync_page, 1138 .write_begin = gfs2_write_begin, 1139 .write_end = gfs2_write_end, 1140 .set_page_dirty = gfs2_set_page_dirty, 1141 .bmap = gfs2_bmap, 1142 .invalidatepage = gfs2_invalidatepage, 1143 .releasepage = gfs2_releasepage, 1144 .direct_IO = gfs2_direct_IO, 1145 .migratepage = buffer_migrate_page, 1146 .is_partially_uptodate = block_is_partially_uptodate, 1147 .error_remove_page = generic_error_remove_page, 1148 }; 1149 1150 static const struct address_space_operations gfs2_jdata_aops = { 1151 .writepage = gfs2_jdata_writepage, 1152 .writepages = gfs2_jdata_writepages, 1153 .readpage = gfs2_readpage, 1154 .readpages = gfs2_readpages, 1155 .sync_page = block_sync_page, 1156 .write_begin = gfs2_write_begin, 1157 .write_end = gfs2_write_end, 1158 .set_page_dirty = gfs2_set_page_dirty, 1159 .bmap = gfs2_bmap, 1160 .invalidatepage = gfs2_invalidatepage, 1161 .releasepage = gfs2_releasepage, 1162 .is_partially_uptodate = block_is_partially_uptodate, 1163 .error_remove_page = generic_error_remove_page, 1164 }; 1165 1166 void gfs2_set_aops(struct inode *inode) 1167 { 1168 struct gfs2_inode *ip = GFS2_I(inode); 1169 1170 if (gfs2_is_writeback(ip)) 1171 inode->i_mapping->a_ops = &gfs2_writeback_aops; 1172 else if (gfs2_is_ordered(ip)) 1173 inode->i_mapping->a_ops = &gfs2_ordered_aops; 1174 else if (gfs2_is_jdata(ip)) 1175 inode->i_mapping->a_ops = &gfs2_jdata_aops; 1176 else 1177 BUG(); 1178 } 1179 1180