1 /* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7 /* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 9 * 10 * Removed a lot of unnecessary code and simplified things now that 11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 12 * 13 * Speed up hash, lru, and free list operations. Use gfp() for allocating 14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 15 * 16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 17 * 18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/syscalls.h> 23 #include <linux/fs.h> 24 #include <linux/mm.h> 25 #include <linux/percpu.h> 26 #include <linux/slab.h> 27 #include <linux/capability.h> 28 #include <linux/blkdev.h> 29 #include <linux/file.h> 30 #include <linux/quotaops.h> 31 #include <linux/highmem.h> 32 #include <linux/export.h> 33 #include <linux/writeback.h> 34 #include <linux/hash.h> 35 #include <linux/suspend.h> 36 #include <linux/buffer_head.h> 37 #include <linux/task_io_accounting_ops.h> 38 #include <linux/bio.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/bitops.h> 42 #include <linux/mpage.h> 43 #include <linux/bit_spinlock.h> 44 #include <trace/events/block.h> 45 46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 47 48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 49 50 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 51 { 52 bh->b_end_io = handler; 53 bh->b_private = private; 54 } 55 EXPORT_SYMBOL(init_buffer); 56 57 inline void touch_buffer(struct buffer_head *bh) 58 { 59 trace_block_touch_buffer(bh); 60 mark_page_accessed(bh->b_page); 61 } 62 EXPORT_SYMBOL(touch_buffer); 63 64 void __lock_buffer(struct buffer_head *bh) 65 { 66 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 67 } 68 EXPORT_SYMBOL(__lock_buffer); 69 70 void unlock_buffer(struct buffer_head *bh) 71 { 72 clear_bit_unlock(BH_Lock, &bh->b_state); 73 smp_mb__after_atomic(); 74 wake_up_bit(&bh->b_state, BH_Lock); 75 } 76 EXPORT_SYMBOL(unlock_buffer); 77 78 /* 79 * Returns if the page has dirty or writeback buffers. If all the buffers 80 * are unlocked and clean then the PageDirty information is stale. If 81 * any of the pages are locked, it is assumed they are locked for IO. 82 */ 83 void buffer_check_dirty_writeback(struct page *page, 84 bool *dirty, bool *writeback) 85 { 86 struct buffer_head *head, *bh; 87 *dirty = false; 88 *writeback = false; 89 90 BUG_ON(!PageLocked(page)); 91 92 if (!page_has_buffers(page)) 93 return; 94 95 if (PageWriteback(page)) 96 *writeback = true; 97 98 head = page_buffers(page); 99 bh = head; 100 do { 101 if (buffer_locked(bh)) 102 *writeback = true; 103 104 if (buffer_dirty(bh)) 105 *dirty = true; 106 107 bh = bh->b_this_page; 108 } while (bh != head); 109 } 110 EXPORT_SYMBOL(buffer_check_dirty_writeback); 111 112 /* 113 * Block until a buffer comes unlocked. This doesn't stop it 114 * from becoming locked again - you have to lock it yourself 115 * if you want to preserve its state. 116 */ 117 void __wait_on_buffer(struct buffer_head * bh) 118 { 119 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 120 } 121 EXPORT_SYMBOL(__wait_on_buffer); 122 123 static void 124 __clear_page_buffers(struct page *page) 125 { 126 ClearPagePrivate(page); 127 set_page_private(page, 0); 128 page_cache_release(page); 129 } 130 131 132 static int quiet_error(struct buffer_head *bh) 133 { 134 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) 135 return 0; 136 return 1; 137 } 138 139 140 static void buffer_io_error(struct buffer_head *bh) 141 { 142 char b[BDEVNAME_SIZE]; 143 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 144 bdevname(bh->b_bdev, b), 145 (unsigned long long)bh->b_blocknr); 146 } 147 148 /* 149 * End-of-IO handler helper function which does not touch the bh after 150 * unlocking it. 151 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 152 * a race there is benign: unlock_buffer() only use the bh's address for 153 * hashing after unlocking the buffer, so it doesn't actually touch the bh 154 * itself. 155 */ 156 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 157 { 158 if (uptodate) { 159 set_buffer_uptodate(bh); 160 } else { 161 /* This happens, due to failed READA attempts. */ 162 clear_buffer_uptodate(bh); 163 } 164 unlock_buffer(bh); 165 } 166 167 /* 168 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 169 * unlock the buffer. This is what ll_rw_block uses too. 170 */ 171 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 172 { 173 __end_buffer_read_notouch(bh, uptodate); 174 put_bh(bh); 175 } 176 EXPORT_SYMBOL(end_buffer_read_sync); 177 178 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 179 { 180 char b[BDEVNAME_SIZE]; 181 182 if (uptodate) { 183 set_buffer_uptodate(bh); 184 } else { 185 if (!quiet_error(bh)) { 186 buffer_io_error(bh); 187 printk(KERN_WARNING "lost page write due to " 188 "I/O error on %s\n", 189 bdevname(bh->b_bdev, b)); 190 } 191 set_buffer_write_io_error(bh); 192 clear_buffer_uptodate(bh); 193 } 194 unlock_buffer(bh); 195 put_bh(bh); 196 } 197 EXPORT_SYMBOL(end_buffer_write_sync); 198 199 /* 200 * Various filesystems appear to want __find_get_block to be non-blocking. 201 * But it's the page lock which protects the buffers. To get around this, 202 * we get exclusion from try_to_free_buffers with the blockdev mapping's 203 * private_lock. 204 * 205 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 206 * may be quite high. This code could TryLock the page, and if that 207 * succeeds, there is no need to take private_lock. (But if 208 * private_lock is contended then so is mapping->tree_lock). 209 */ 210 static struct buffer_head * 211 __find_get_block_slow(struct block_device *bdev, sector_t block) 212 { 213 struct inode *bd_inode = bdev->bd_inode; 214 struct address_space *bd_mapping = bd_inode->i_mapping; 215 struct buffer_head *ret = NULL; 216 pgoff_t index; 217 struct buffer_head *bh; 218 struct buffer_head *head; 219 struct page *page; 220 int all_mapped = 1; 221 222 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 223 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 224 if (!page) 225 goto out; 226 227 spin_lock(&bd_mapping->private_lock); 228 if (!page_has_buffers(page)) 229 goto out_unlock; 230 head = page_buffers(page); 231 bh = head; 232 do { 233 if (!buffer_mapped(bh)) 234 all_mapped = 0; 235 else if (bh->b_blocknr == block) { 236 ret = bh; 237 get_bh(bh); 238 goto out_unlock; 239 } 240 bh = bh->b_this_page; 241 } while (bh != head); 242 243 /* we might be here because some of the buffers on this page are 244 * not mapped. This is due to various races between 245 * file io on the block device and getblk. It gets dealt with 246 * elsewhere, don't buffer_error if we had some unmapped buffers 247 */ 248 if (all_mapped) { 249 char b[BDEVNAME_SIZE]; 250 251 printk("__find_get_block_slow() failed. " 252 "block=%llu, b_blocknr=%llu\n", 253 (unsigned long long)block, 254 (unsigned long long)bh->b_blocknr); 255 printk("b_state=0x%08lx, b_size=%zu\n", 256 bh->b_state, bh->b_size); 257 printk("device %s blocksize: %d\n", bdevname(bdev, b), 258 1 << bd_inode->i_blkbits); 259 } 260 out_unlock: 261 spin_unlock(&bd_mapping->private_lock); 262 page_cache_release(page); 263 out: 264 return ret; 265 } 266 267 /* 268 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 269 */ 270 static void free_more_memory(void) 271 { 272 struct zone *zone; 273 int nid; 274 275 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); 276 yield(); 277 278 for_each_online_node(nid) { 279 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 280 gfp_zone(GFP_NOFS), NULL, 281 &zone); 282 if (zone) 283 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 284 GFP_NOFS, NULL); 285 } 286 } 287 288 /* 289 * I/O completion handler for block_read_full_page() - pages 290 * which come unlocked at the end of I/O. 291 */ 292 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 293 { 294 unsigned long flags; 295 struct buffer_head *first; 296 struct buffer_head *tmp; 297 struct page *page; 298 int page_uptodate = 1; 299 300 BUG_ON(!buffer_async_read(bh)); 301 302 page = bh->b_page; 303 if (uptodate) { 304 set_buffer_uptodate(bh); 305 } else { 306 clear_buffer_uptodate(bh); 307 if (!quiet_error(bh)) 308 buffer_io_error(bh); 309 SetPageError(page); 310 } 311 312 /* 313 * Be _very_ careful from here on. Bad things can happen if 314 * two buffer heads end IO at almost the same time and both 315 * decide that the page is now completely done. 316 */ 317 first = page_buffers(page); 318 local_irq_save(flags); 319 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 320 clear_buffer_async_read(bh); 321 unlock_buffer(bh); 322 tmp = bh; 323 do { 324 if (!buffer_uptodate(tmp)) 325 page_uptodate = 0; 326 if (buffer_async_read(tmp)) { 327 BUG_ON(!buffer_locked(tmp)); 328 goto still_busy; 329 } 330 tmp = tmp->b_this_page; 331 } while (tmp != bh); 332 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 333 local_irq_restore(flags); 334 335 /* 336 * If none of the buffers had errors and they are all 337 * uptodate then we can set the page uptodate. 338 */ 339 if (page_uptodate && !PageError(page)) 340 SetPageUptodate(page); 341 unlock_page(page); 342 return; 343 344 still_busy: 345 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 346 local_irq_restore(flags); 347 return; 348 } 349 350 /* 351 * Completion handler for block_write_full_page() - pages which are unlocked 352 * during I/O, and which have PageWriteback cleared upon I/O completion. 353 */ 354 void end_buffer_async_write(struct buffer_head *bh, int uptodate) 355 { 356 char b[BDEVNAME_SIZE]; 357 unsigned long flags; 358 struct buffer_head *first; 359 struct buffer_head *tmp; 360 struct page *page; 361 362 BUG_ON(!buffer_async_write(bh)); 363 364 page = bh->b_page; 365 if (uptodate) { 366 set_buffer_uptodate(bh); 367 } else { 368 if (!quiet_error(bh)) { 369 buffer_io_error(bh); 370 printk(KERN_WARNING "lost page write due to " 371 "I/O error on %s\n", 372 bdevname(bh->b_bdev, b)); 373 } 374 set_bit(AS_EIO, &page->mapping->flags); 375 set_buffer_write_io_error(bh); 376 clear_buffer_uptodate(bh); 377 SetPageError(page); 378 } 379 380 first = page_buffers(page); 381 local_irq_save(flags); 382 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 383 384 clear_buffer_async_write(bh); 385 unlock_buffer(bh); 386 tmp = bh->b_this_page; 387 while (tmp != bh) { 388 if (buffer_async_write(tmp)) { 389 BUG_ON(!buffer_locked(tmp)); 390 goto still_busy; 391 } 392 tmp = tmp->b_this_page; 393 } 394 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 395 local_irq_restore(flags); 396 end_page_writeback(page); 397 return; 398 399 still_busy: 400 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 401 local_irq_restore(flags); 402 return; 403 } 404 EXPORT_SYMBOL(end_buffer_async_write); 405 406 /* 407 * If a page's buffers are under async readin (end_buffer_async_read 408 * completion) then there is a possibility that another thread of 409 * control could lock one of the buffers after it has completed 410 * but while some of the other buffers have not completed. This 411 * locked buffer would confuse end_buffer_async_read() into not unlocking 412 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 413 * that this buffer is not under async I/O. 414 * 415 * The page comes unlocked when it has no locked buffer_async buffers 416 * left. 417 * 418 * PageLocked prevents anyone starting new async I/O reads any of 419 * the buffers. 420 * 421 * PageWriteback is used to prevent simultaneous writeout of the same 422 * page. 423 * 424 * PageLocked prevents anyone from starting writeback of a page which is 425 * under read I/O (PageWriteback is only ever set against a locked page). 426 */ 427 static void mark_buffer_async_read(struct buffer_head *bh) 428 { 429 bh->b_end_io = end_buffer_async_read; 430 set_buffer_async_read(bh); 431 } 432 433 static void mark_buffer_async_write_endio(struct buffer_head *bh, 434 bh_end_io_t *handler) 435 { 436 bh->b_end_io = handler; 437 set_buffer_async_write(bh); 438 } 439 440 void mark_buffer_async_write(struct buffer_head *bh) 441 { 442 mark_buffer_async_write_endio(bh, end_buffer_async_write); 443 } 444 EXPORT_SYMBOL(mark_buffer_async_write); 445 446 447 /* 448 * fs/buffer.c contains helper functions for buffer-backed address space's 449 * fsync functions. A common requirement for buffer-based filesystems is 450 * that certain data from the backing blockdev needs to be written out for 451 * a successful fsync(). For example, ext2 indirect blocks need to be 452 * written back and waited upon before fsync() returns. 453 * 454 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 455 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 456 * management of a list of dependent buffers at ->i_mapping->private_list. 457 * 458 * Locking is a little subtle: try_to_free_buffers() will remove buffers 459 * from their controlling inode's queue when they are being freed. But 460 * try_to_free_buffers() will be operating against the *blockdev* mapping 461 * at the time, not against the S_ISREG file which depends on those buffers. 462 * So the locking for private_list is via the private_lock in the address_space 463 * which backs the buffers. Which is different from the address_space 464 * against which the buffers are listed. So for a particular address_space, 465 * mapping->private_lock does *not* protect mapping->private_list! In fact, 466 * mapping->private_list will always be protected by the backing blockdev's 467 * ->private_lock. 468 * 469 * Which introduces a requirement: all buffers on an address_space's 470 * ->private_list must be from the same address_space: the blockdev's. 471 * 472 * address_spaces which do not place buffers at ->private_list via these 473 * utility functions are free to use private_lock and private_list for 474 * whatever they want. The only requirement is that list_empty(private_list) 475 * be true at clear_inode() time. 476 * 477 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 478 * filesystems should do that. invalidate_inode_buffers() should just go 479 * BUG_ON(!list_empty). 480 * 481 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 482 * take an address_space, not an inode. And it should be called 483 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 484 * queued up. 485 * 486 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 487 * list if it is already on a list. Because if the buffer is on a list, 488 * it *must* already be on the right one. If not, the filesystem is being 489 * silly. This will save a ton of locking. But first we have to ensure 490 * that buffers are taken *off* the old inode's list when they are freed 491 * (presumably in truncate). That requires careful auditing of all 492 * filesystems (do it inside bforget()). It could also be done by bringing 493 * b_inode back. 494 */ 495 496 /* 497 * The buffer's backing address_space's private_lock must be held 498 */ 499 static void __remove_assoc_queue(struct buffer_head *bh) 500 { 501 list_del_init(&bh->b_assoc_buffers); 502 WARN_ON(!bh->b_assoc_map); 503 if (buffer_write_io_error(bh)) 504 set_bit(AS_EIO, &bh->b_assoc_map->flags); 505 bh->b_assoc_map = NULL; 506 } 507 508 int inode_has_buffers(struct inode *inode) 509 { 510 return !list_empty(&inode->i_data.private_list); 511 } 512 513 /* 514 * osync is designed to support O_SYNC io. It waits synchronously for 515 * all already-submitted IO to complete, but does not queue any new 516 * writes to the disk. 517 * 518 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 519 * you dirty the buffers, and then use osync_inode_buffers to wait for 520 * completion. Any other dirty buffers which are not yet queued for 521 * write will not be flushed to disk by the osync. 522 */ 523 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 524 { 525 struct buffer_head *bh; 526 struct list_head *p; 527 int err = 0; 528 529 spin_lock(lock); 530 repeat: 531 list_for_each_prev(p, list) { 532 bh = BH_ENTRY(p); 533 if (buffer_locked(bh)) { 534 get_bh(bh); 535 spin_unlock(lock); 536 wait_on_buffer(bh); 537 if (!buffer_uptodate(bh)) 538 err = -EIO; 539 brelse(bh); 540 spin_lock(lock); 541 goto repeat; 542 } 543 } 544 spin_unlock(lock); 545 return err; 546 } 547 548 static void do_thaw_one(struct super_block *sb, void *unused) 549 { 550 char b[BDEVNAME_SIZE]; 551 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) 552 printk(KERN_WARNING "Emergency Thaw on %s\n", 553 bdevname(sb->s_bdev, b)); 554 } 555 556 static void do_thaw_all(struct work_struct *work) 557 { 558 iterate_supers(do_thaw_one, NULL); 559 kfree(work); 560 printk(KERN_WARNING "Emergency Thaw complete\n"); 561 } 562 563 /** 564 * emergency_thaw_all -- forcibly thaw every frozen filesystem 565 * 566 * Used for emergency unfreeze of all filesystems via SysRq 567 */ 568 void emergency_thaw_all(void) 569 { 570 struct work_struct *work; 571 572 work = kmalloc(sizeof(*work), GFP_ATOMIC); 573 if (work) { 574 INIT_WORK(work, do_thaw_all); 575 schedule_work(work); 576 } 577 } 578 579 /** 580 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 581 * @mapping: the mapping which wants those buffers written 582 * 583 * Starts I/O against the buffers at mapping->private_list, and waits upon 584 * that I/O. 585 * 586 * Basically, this is a convenience function for fsync(). 587 * @mapping is a file or directory which needs those buffers to be written for 588 * a successful fsync(). 589 */ 590 int sync_mapping_buffers(struct address_space *mapping) 591 { 592 struct address_space *buffer_mapping = mapping->private_data; 593 594 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 595 return 0; 596 597 return fsync_buffers_list(&buffer_mapping->private_lock, 598 &mapping->private_list); 599 } 600 EXPORT_SYMBOL(sync_mapping_buffers); 601 602 /* 603 * Called when we've recently written block `bblock', and it is known that 604 * `bblock' was for a buffer_boundary() buffer. This means that the block at 605 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 606 * dirty, schedule it for IO. So that indirects merge nicely with their data. 607 */ 608 void write_boundary_block(struct block_device *bdev, 609 sector_t bblock, unsigned blocksize) 610 { 611 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 612 if (bh) { 613 if (buffer_dirty(bh)) 614 ll_rw_block(WRITE, 1, &bh); 615 put_bh(bh); 616 } 617 } 618 619 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 620 { 621 struct address_space *mapping = inode->i_mapping; 622 struct address_space *buffer_mapping = bh->b_page->mapping; 623 624 mark_buffer_dirty(bh); 625 if (!mapping->private_data) { 626 mapping->private_data = buffer_mapping; 627 } else { 628 BUG_ON(mapping->private_data != buffer_mapping); 629 } 630 if (!bh->b_assoc_map) { 631 spin_lock(&buffer_mapping->private_lock); 632 list_move_tail(&bh->b_assoc_buffers, 633 &mapping->private_list); 634 bh->b_assoc_map = mapping; 635 spin_unlock(&buffer_mapping->private_lock); 636 } 637 } 638 EXPORT_SYMBOL(mark_buffer_dirty_inode); 639 640 /* 641 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode 642 * dirty. 643 * 644 * If warn is true, then emit a warning if the page is not uptodate and has 645 * not been truncated. 646 */ 647 static void __set_page_dirty(struct page *page, 648 struct address_space *mapping, int warn) 649 { 650 unsigned long flags; 651 652 spin_lock_irqsave(&mapping->tree_lock, flags); 653 if (page->mapping) { /* Race with truncate? */ 654 WARN_ON_ONCE(warn && !PageUptodate(page)); 655 account_page_dirtied(page, mapping); 656 radix_tree_tag_set(&mapping->page_tree, 657 page_index(page), PAGECACHE_TAG_DIRTY); 658 } 659 spin_unlock_irqrestore(&mapping->tree_lock, flags); 660 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 661 } 662 663 /* 664 * Add a page to the dirty page list. 665 * 666 * It is a sad fact of life that this function is called from several places 667 * deeply under spinlocking. It may not sleep. 668 * 669 * If the page has buffers, the uptodate buffers are set dirty, to preserve 670 * dirty-state coherency between the page and the buffers. It the page does 671 * not have buffers then when they are later attached they will all be set 672 * dirty. 673 * 674 * The buffers are dirtied before the page is dirtied. There's a small race 675 * window in which a writepage caller may see the page cleanness but not the 676 * buffer dirtiness. That's fine. If this code were to set the page dirty 677 * before the buffers, a concurrent writepage caller could clear the page dirty 678 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 679 * page on the dirty page list. 680 * 681 * We use private_lock to lock against try_to_free_buffers while using the 682 * page's buffer list. Also use this to protect against clean buffers being 683 * added to the page after it was set dirty. 684 * 685 * FIXME: may need to call ->reservepage here as well. That's rather up to the 686 * address_space though. 687 */ 688 int __set_page_dirty_buffers(struct page *page) 689 { 690 int newly_dirty; 691 struct address_space *mapping = page_mapping(page); 692 693 if (unlikely(!mapping)) 694 return !TestSetPageDirty(page); 695 696 spin_lock(&mapping->private_lock); 697 if (page_has_buffers(page)) { 698 struct buffer_head *head = page_buffers(page); 699 struct buffer_head *bh = head; 700 701 do { 702 set_buffer_dirty(bh); 703 bh = bh->b_this_page; 704 } while (bh != head); 705 } 706 newly_dirty = !TestSetPageDirty(page); 707 spin_unlock(&mapping->private_lock); 708 709 if (newly_dirty) 710 __set_page_dirty(page, mapping, 1); 711 return newly_dirty; 712 } 713 EXPORT_SYMBOL(__set_page_dirty_buffers); 714 715 /* 716 * Write out and wait upon a list of buffers. 717 * 718 * We have conflicting pressures: we want to make sure that all 719 * initially dirty buffers get waited on, but that any subsequently 720 * dirtied buffers don't. After all, we don't want fsync to last 721 * forever if somebody is actively writing to the file. 722 * 723 * Do this in two main stages: first we copy dirty buffers to a 724 * temporary inode list, queueing the writes as we go. Then we clean 725 * up, waiting for those writes to complete. 726 * 727 * During this second stage, any subsequent updates to the file may end 728 * up refiling the buffer on the original inode's dirty list again, so 729 * there is a chance we will end up with a buffer queued for write but 730 * not yet completed on that list. So, as a final cleanup we go through 731 * the osync code to catch these locked, dirty buffers without requeuing 732 * any newly dirty buffers for write. 733 */ 734 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 735 { 736 struct buffer_head *bh; 737 struct list_head tmp; 738 struct address_space *mapping; 739 int err = 0, err2; 740 struct blk_plug plug; 741 742 INIT_LIST_HEAD(&tmp); 743 blk_start_plug(&plug); 744 745 spin_lock(lock); 746 while (!list_empty(list)) { 747 bh = BH_ENTRY(list->next); 748 mapping = bh->b_assoc_map; 749 __remove_assoc_queue(bh); 750 /* Avoid race with mark_buffer_dirty_inode() which does 751 * a lockless check and we rely on seeing the dirty bit */ 752 smp_mb(); 753 if (buffer_dirty(bh) || buffer_locked(bh)) { 754 list_add(&bh->b_assoc_buffers, &tmp); 755 bh->b_assoc_map = mapping; 756 if (buffer_dirty(bh)) { 757 get_bh(bh); 758 spin_unlock(lock); 759 /* 760 * Ensure any pending I/O completes so that 761 * write_dirty_buffer() actually writes the 762 * current contents - it is a noop if I/O is 763 * still in flight on potentially older 764 * contents. 765 */ 766 write_dirty_buffer(bh, WRITE_SYNC); 767 768 /* 769 * Kick off IO for the previous mapping. Note 770 * that we will not run the very last mapping, 771 * wait_on_buffer() will do that for us 772 * through sync_buffer(). 773 */ 774 brelse(bh); 775 spin_lock(lock); 776 } 777 } 778 } 779 780 spin_unlock(lock); 781 blk_finish_plug(&plug); 782 spin_lock(lock); 783 784 while (!list_empty(&tmp)) { 785 bh = BH_ENTRY(tmp.prev); 786 get_bh(bh); 787 mapping = bh->b_assoc_map; 788 __remove_assoc_queue(bh); 789 /* Avoid race with mark_buffer_dirty_inode() which does 790 * a lockless check and we rely on seeing the dirty bit */ 791 smp_mb(); 792 if (buffer_dirty(bh)) { 793 list_add(&bh->b_assoc_buffers, 794 &mapping->private_list); 795 bh->b_assoc_map = mapping; 796 } 797 spin_unlock(lock); 798 wait_on_buffer(bh); 799 if (!buffer_uptodate(bh)) 800 err = -EIO; 801 brelse(bh); 802 spin_lock(lock); 803 } 804 805 spin_unlock(lock); 806 err2 = osync_buffers_list(lock, list); 807 if (err) 808 return err; 809 else 810 return err2; 811 } 812 813 /* 814 * Invalidate any and all dirty buffers on a given inode. We are 815 * probably unmounting the fs, but that doesn't mean we have already 816 * done a sync(). Just drop the buffers from the inode list. 817 * 818 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 819 * assumes that all the buffers are against the blockdev. Not true 820 * for reiserfs. 821 */ 822 void invalidate_inode_buffers(struct inode *inode) 823 { 824 if (inode_has_buffers(inode)) { 825 struct address_space *mapping = &inode->i_data; 826 struct list_head *list = &mapping->private_list; 827 struct address_space *buffer_mapping = mapping->private_data; 828 829 spin_lock(&buffer_mapping->private_lock); 830 while (!list_empty(list)) 831 __remove_assoc_queue(BH_ENTRY(list->next)); 832 spin_unlock(&buffer_mapping->private_lock); 833 } 834 } 835 EXPORT_SYMBOL(invalidate_inode_buffers); 836 837 /* 838 * Remove any clean buffers from the inode's buffer list. This is called 839 * when we're trying to free the inode itself. Those buffers can pin it. 840 * 841 * Returns true if all buffers were removed. 842 */ 843 int remove_inode_buffers(struct inode *inode) 844 { 845 int ret = 1; 846 847 if (inode_has_buffers(inode)) { 848 struct address_space *mapping = &inode->i_data; 849 struct list_head *list = &mapping->private_list; 850 struct address_space *buffer_mapping = mapping->private_data; 851 852 spin_lock(&buffer_mapping->private_lock); 853 while (!list_empty(list)) { 854 struct buffer_head *bh = BH_ENTRY(list->next); 855 if (buffer_dirty(bh)) { 856 ret = 0; 857 break; 858 } 859 __remove_assoc_queue(bh); 860 } 861 spin_unlock(&buffer_mapping->private_lock); 862 } 863 return ret; 864 } 865 866 /* 867 * Create the appropriate buffers when given a page for data area and 868 * the size of each buffer.. Use the bh->b_this_page linked list to 869 * follow the buffers created. Return NULL if unable to create more 870 * buffers. 871 * 872 * The retry flag is used to differentiate async IO (paging, swapping) 873 * which may not fail from ordinary buffer allocations. 874 */ 875 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 876 int retry) 877 { 878 struct buffer_head *bh, *head; 879 long offset; 880 881 try_again: 882 head = NULL; 883 offset = PAGE_SIZE; 884 while ((offset -= size) >= 0) { 885 bh = alloc_buffer_head(GFP_NOFS); 886 if (!bh) 887 goto no_grow; 888 889 bh->b_this_page = head; 890 bh->b_blocknr = -1; 891 head = bh; 892 893 bh->b_size = size; 894 895 /* Link the buffer to its page */ 896 set_bh_page(bh, page, offset); 897 } 898 return head; 899 /* 900 * In case anything failed, we just free everything we got. 901 */ 902 no_grow: 903 if (head) { 904 do { 905 bh = head; 906 head = head->b_this_page; 907 free_buffer_head(bh); 908 } while (head); 909 } 910 911 /* 912 * Return failure for non-async IO requests. Async IO requests 913 * are not allowed to fail, so we have to wait until buffer heads 914 * become available. But we don't want tasks sleeping with 915 * partially complete buffers, so all were released above. 916 */ 917 if (!retry) 918 return NULL; 919 920 /* We're _really_ low on memory. Now we just 921 * wait for old buffer heads to become free due to 922 * finishing IO. Since this is an async request and 923 * the reserve list is empty, we're sure there are 924 * async buffer heads in use. 925 */ 926 free_more_memory(); 927 goto try_again; 928 } 929 EXPORT_SYMBOL_GPL(alloc_page_buffers); 930 931 static inline void 932 link_dev_buffers(struct page *page, struct buffer_head *head) 933 { 934 struct buffer_head *bh, *tail; 935 936 bh = head; 937 do { 938 tail = bh; 939 bh = bh->b_this_page; 940 } while (bh); 941 tail->b_this_page = head; 942 attach_page_buffers(page, head); 943 } 944 945 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 946 { 947 sector_t retval = ~((sector_t)0); 948 loff_t sz = i_size_read(bdev->bd_inode); 949 950 if (sz) { 951 unsigned int sizebits = blksize_bits(size); 952 retval = (sz >> sizebits); 953 } 954 return retval; 955 } 956 957 /* 958 * Initialise the state of a blockdev page's buffers. 959 */ 960 static sector_t 961 init_page_buffers(struct page *page, struct block_device *bdev, 962 sector_t block, int size) 963 { 964 struct buffer_head *head = page_buffers(page); 965 struct buffer_head *bh = head; 966 int uptodate = PageUptodate(page); 967 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); 968 969 do { 970 if (!buffer_mapped(bh)) { 971 init_buffer(bh, NULL, NULL); 972 bh->b_bdev = bdev; 973 bh->b_blocknr = block; 974 if (uptodate) 975 set_buffer_uptodate(bh); 976 if (block < end_block) 977 set_buffer_mapped(bh); 978 } 979 block++; 980 bh = bh->b_this_page; 981 } while (bh != head); 982 983 /* 984 * Caller needs to validate requested block against end of device. 985 */ 986 return end_block; 987 } 988 989 /* 990 * Create the page-cache page that contains the requested block. 991 * 992 * This is used purely for blockdev mappings. 993 */ 994 static int 995 grow_dev_page(struct block_device *bdev, sector_t block, 996 pgoff_t index, int size, int sizebits) 997 { 998 struct inode *inode = bdev->bd_inode; 999 struct page *page; 1000 struct buffer_head *bh; 1001 sector_t end_block; 1002 int ret = 0; /* Will call free_more_memory() */ 1003 gfp_t gfp_mask; 1004 1005 gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS; 1006 gfp_mask |= __GFP_MOVABLE; 1007 /* 1008 * XXX: __getblk_slow() can not really deal with failure and 1009 * will endlessly loop on improvised global reclaim. Prefer 1010 * looping in the allocator rather than here, at least that 1011 * code knows what it's doing. 1012 */ 1013 gfp_mask |= __GFP_NOFAIL; 1014 1015 page = find_or_create_page(inode->i_mapping, index, gfp_mask); 1016 if (!page) 1017 return ret; 1018 1019 BUG_ON(!PageLocked(page)); 1020 1021 if (page_has_buffers(page)) { 1022 bh = page_buffers(page); 1023 if (bh->b_size == size) { 1024 end_block = init_page_buffers(page, bdev, 1025 (sector_t)index << sizebits, 1026 size); 1027 goto done; 1028 } 1029 if (!try_to_free_buffers(page)) 1030 goto failed; 1031 } 1032 1033 /* 1034 * Allocate some buffers for this page 1035 */ 1036 bh = alloc_page_buffers(page, size, 0); 1037 if (!bh) 1038 goto failed; 1039 1040 /* 1041 * Link the page to the buffers and initialise them. Take the 1042 * lock to be atomic wrt __find_get_block(), which does not 1043 * run under the page lock. 1044 */ 1045 spin_lock(&inode->i_mapping->private_lock); 1046 link_dev_buffers(page, bh); 1047 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, 1048 size); 1049 spin_unlock(&inode->i_mapping->private_lock); 1050 done: 1051 ret = (block < end_block) ? 1 : -ENXIO; 1052 failed: 1053 unlock_page(page); 1054 page_cache_release(page); 1055 return ret; 1056 } 1057 1058 /* 1059 * Create buffers for the specified block device block's page. If 1060 * that page was dirty, the buffers are set dirty also. 1061 */ 1062 static int 1063 grow_buffers(struct block_device *bdev, sector_t block, int size) 1064 { 1065 pgoff_t index; 1066 int sizebits; 1067 1068 sizebits = -1; 1069 do { 1070 sizebits++; 1071 } while ((size << sizebits) < PAGE_SIZE); 1072 1073 index = block >> sizebits; 1074 1075 /* 1076 * Check for a block which wants to lie outside our maximum possible 1077 * pagecache index. (this comparison is done using sector_t types). 1078 */ 1079 if (unlikely(index != block >> sizebits)) { 1080 char b[BDEVNAME_SIZE]; 1081 1082 printk(KERN_ERR "%s: requested out-of-range block %llu for " 1083 "device %s\n", 1084 __func__, (unsigned long long)block, 1085 bdevname(bdev, b)); 1086 return -EIO; 1087 } 1088 1089 /* Create a page with the proper size buffers.. */ 1090 return grow_dev_page(bdev, block, index, size, sizebits); 1091 } 1092 1093 static struct buffer_head * 1094 __getblk_slow(struct block_device *bdev, sector_t block, int size) 1095 { 1096 /* Size must be multiple of hard sectorsize */ 1097 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1098 (size < 512 || size > PAGE_SIZE))) { 1099 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1100 size); 1101 printk(KERN_ERR "logical block size: %d\n", 1102 bdev_logical_block_size(bdev)); 1103 1104 dump_stack(); 1105 return NULL; 1106 } 1107 1108 for (;;) { 1109 struct buffer_head *bh; 1110 int ret; 1111 1112 bh = __find_get_block(bdev, block, size); 1113 if (bh) 1114 return bh; 1115 1116 ret = grow_buffers(bdev, block, size); 1117 if (ret < 0) 1118 return NULL; 1119 if (ret == 0) 1120 free_more_memory(); 1121 } 1122 } 1123 1124 /* 1125 * The relationship between dirty buffers and dirty pages: 1126 * 1127 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1128 * the page is tagged dirty in its radix tree. 1129 * 1130 * At all times, the dirtiness of the buffers represents the dirtiness of 1131 * subsections of the page. If the page has buffers, the page dirty bit is 1132 * merely a hint about the true dirty state. 1133 * 1134 * When a page is set dirty in its entirety, all its buffers are marked dirty 1135 * (if the page has buffers). 1136 * 1137 * When a buffer is marked dirty, its page is dirtied, but the page's other 1138 * buffers are not. 1139 * 1140 * Also. When blockdev buffers are explicitly read with bread(), they 1141 * individually become uptodate. But their backing page remains not 1142 * uptodate - even if all of its buffers are uptodate. A subsequent 1143 * block_read_full_page() against that page will discover all the uptodate 1144 * buffers, will set the page uptodate and will perform no I/O. 1145 */ 1146 1147 /** 1148 * mark_buffer_dirty - mark a buffer_head as needing writeout 1149 * @bh: the buffer_head to mark dirty 1150 * 1151 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 1152 * backing page dirty, then tag the page as dirty in its address_space's radix 1153 * tree and then attach the address_space's inode to its superblock's dirty 1154 * inode list. 1155 * 1156 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1157 * mapping->tree_lock and mapping->host->i_lock. 1158 */ 1159 void mark_buffer_dirty(struct buffer_head *bh) 1160 { 1161 WARN_ON_ONCE(!buffer_uptodate(bh)); 1162 1163 trace_block_dirty_buffer(bh); 1164 1165 /* 1166 * Very *carefully* optimize the it-is-already-dirty case. 1167 * 1168 * Don't let the final "is it dirty" escape to before we 1169 * perhaps modified the buffer. 1170 */ 1171 if (buffer_dirty(bh)) { 1172 smp_mb(); 1173 if (buffer_dirty(bh)) 1174 return; 1175 } 1176 1177 if (!test_set_buffer_dirty(bh)) { 1178 struct page *page = bh->b_page; 1179 if (!TestSetPageDirty(page)) { 1180 struct address_space *mapping = page_mapping(page); 1181 if (mapping) 1182 __set_page_dirty(page, mapping, 0); 1183 } 1184 } 1185 } 1186 EXPORT_SYMBOL(mark_buffer_dirty); 1187 1188 /* 1189 * Decrement a buffer_head's reference count. If all buffers against a page 1190 * have zero reference count, are clean and unlocked, and if the page is clean 1191 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1192 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1193 * a page but it ends up not being freed, and buffers may later be reattached). 1194 */ 1195 void __brelse(struct buffer_head * buf) 1196 { 1197 if (atomic_read(&buf->b_count)) { 1198 put_bh(buf); 1199 return; 1200 } 1201 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1202 } 1203 EXPORT_SYMBOL(__brelse); 1204 1205 /* 1206 * bforget() is like brelse(), except it discards any 1207 * potentially dirty data. 1208 */ 1209 void __bforget(struct buffer_head *bh) 1210 { 1211 clear_buffer_dirty(bh); 1212 if (bh->b_assoc_map) { 1213 struct address_space *buffer_mapping = bh->b_page->mapping; 1214 1215 spin_lock(&buffer_mapping->private_lock); 1216 list_del_init(&bh->b_assoc_buffers); 1217 bh->b_assoc_map = NULL; 1218 spin_unlock(&buffer_mapping->private_lock); 1219 } 1220 __brelse(bh); 1221 } 1222 EXPORT_SYMBOL(__bforget); 1223 1224 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1225 { 1226 lock_buffer(bh); 1227 if (buffer_uptodate(bh)) { 1228 unlock_buffer(bh); 1229 return bh; 1230 } else { 1231 get_bh(bh); 1232 bh->b_end_io = end_buffer_read_sync; 1233 submit_bh(READ, bh); 1234 wait_on_buffer(bh); 1235 if (buffer_uptodate(bh)) 1236 return bh; 1237 } 1238 brelse(bh); 1239 return NULL; 1240 } 1241 1242 /* 1243 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1244 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1245 * refcount elevated by one when they're in an LRU. A buffer can only appear 1246 * once in a particular CPU's LRU. A single buffer can be present in multiple 1247 * CPU's LRUs at the same time. 1248 * 1249 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1250 * sb_find_get_block(). 1251 * 1252 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1253 * a local interrupt disable for that. 1254 */ 1255 1256 #define BH_LRU_SIZE 16 1257 1258 struct bh_lru { 1259 struct buffer_head *bhs[BH_LRU_SIZE]; 1260 }; 1261 1262 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1263 1264 #ifdef CONFIG_SMP 1265 #define bh_lru_lock() local_irq_disable() 1266 #define bh_lru_unlock() local_irq_enable() 1267 #else 1268 #define bh_lru_lock() preempt_disable() 1269 #define bh_lru_unlock() preempt_enable() 1270 #endif 1271 1272 static inline void check_irqs_on(void) 1273 { 1274 #ifdef irqs_disabled 1275 BUG_ON(irqs_disabled()); 1276 #endif 1277 } 1278 1279 /* 1280 * The LRU management algorithm is dopey-but-simple. Sorry. 1281 */ 1282 static void bh_lru_install(struct buffer_head *bh) 1283 { 1284 struct buffer_head *evictee = NULL; 1285 1286 check_irqs_on(); 1287 bh_lru_lock(); 1288 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { 1289 struct buffer_head *bhs[BH_LRU_SIZE]; 1290 int in; 1291 int out = 0; 1292 1293 get_bh(bh); 1294 bhs[out++] = bh; 1295 for (in = 0; in < BH_LRU_SIZE; in++) { 1296 struct buffer_head *bh2 = 1297 __this_cpu_read(bh_lrus.bhs[in]); 1298 1299 if (bh2 == bh) { 1300 __brelse(bh2); 1301 } else { 1302 if (out >= BH_LRU_SIZE) { 1303 BUG_ON(evictee != NULL); 1304 evictee = bh2; 1305 } else { 1306 bhs[out++] = bh2; 1307 } 1308 } 1309 } 1310 while (out < BH_LRU_SIZE) 1311 bhs[out++] = NULL; 1312 memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); 1313 } 1314 bh_lru_unlock(); 1315 1316 if (evictee) 1317 __brelse(evictee); 1318 } 1319 1320 /* 1321 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1322 */ 1323 static struct buffer_head * 1324 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1325 { 1326 struct buffer_head *ret = NULL; 1327 unsigned int i; 1328 1329 check_irqs_on(); 1330 bh_lru_lock(); 1331 for (i = 0; i < BH_LRU_SIZE; i++) { 1332 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1333 1334 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1335 bh->b_size == size) { 1336 if (i) { 1337 while (i) { 1338 __this_cpu_write(bh_lrus.bhs[i], 1339 __this_cpu_read(bh_lrus.bhs[i - 1])); 1340 i--; 1341 } 1342 __this_cpu_write(bh_lrus.bhs[0], bh); 1343 } 1344 get_bh(bh); 1345 ret = bh; 1346 break; 1347 } 1348 } 1349 bh_lru_unlock(); 1350 return ret; 1351 } 1352 1353 /* 1354 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1355 * it in the LRU and mark it as accessed. If it is not present then return 1356 * NULL 1357 */ 1358 struct buffer_head * 1359 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1360 { 1361 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1362 1363 if (bh == NULL) { 1364 /* __find_get_block_slow will mark the page accessed */ 1365 bh = __find_get_block_slow(bdev, block); 1366 if (bh) 1367 bh_lru_install(bh); 1368 } else 1369 touch_buffer(bh); 1370 1371 return bh; 1372 } 1373 EXPORT_SYMBOL(__find_get_block); 1374 1375 /* 1376 * __getblk will locate (and, if necessary, create) the buffer_head 1377 * which corresponds to the passed block_device, block and size. The 1378 * returned buffer has its reference count incremented. 1379 * 1380 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1381 * attempt is failing. FIXME, perhaps? 1382 */ 1383 struct buffer_head * 1384 __getblk(struct block_device *bdev, sector_t block, unsigned size) 1385 { 1386 struct buffer_head *bh = __find_get_block(bdev, block, size); 1387 1388 might_sleep(); 1389 if (bh == NULL) 1390 bh = __getblk_slow(bdev, block, size); 1391 return bh; 1392 } 1393 EXPORT_SYMBOL(__getblk); 1394 1395 /* 1396 * Do async read-ahead on a buffer.. 1397 */ 1398 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1399 { 1400 struct buffer_head *bh = __getblk(bdev, block, size); 1401 if (likely(bh)) { 1402 ll_rw_block(READA, 1, &bh); 1403 brelse(bh); 1404 } 1405 } 1406 EXPORT_SYMBOL(__breadahead); 1407 1408 /** 1409 * __bread() - reads a specified block and returns the bh 1410 * @bdev: the block_device to read from 1411 * @block: number of block 1412 * @size: size (in bytes) to read 1413 * 1414 * Reads a specified block, and returns buffer head that contains it. 1415 * It returns NULL if the block was unreadable. 1416 */ 1417 struct buffer_head * 1418 __bread(struct block_device *bdev, sector_t block, unsigned size) 1419 { 1420 struct buffer_head *bh = __getblk(bdev, block, size); 1421 1422 if (likely(bh) && !buffer_uptodate(bh)) 1423 bh = __bread_slow(bh); 1424 return bh; 1425 } 1426 EXPORT_SYMBOL(__bread); 1427 1428 /* 1429 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1430 * This doesn't race because it runs in each cpu either in irq 1431 * or with preempt disabled. 1432 */ 1433 static void invalidate_bh_lru(void *arg) 1434 { 1435 struct bh_lru *b = &get_cpu_var(bh_lrus); 1436 int i; 1437 1438 for (i = 0; i < BH_LRU_SIZE; i++) { 1439 brelse(b->bhs[i]); 1440 b->bhs[i] = NULL; 1441 } 1442 put_cpu_var(bh_lrus); 1443 } 1444 1445 static bool has_bh_in_lru(int cpu, void *dummy) 1446 { 1447 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1448 int i; 1449 1450 for (i = 0; i < BH_LRU_SIZE; i++) { 1451 if (b->bhs[i]) 1452 return 1; 1453 } 1454 1455 return 0; 1456 } 1457 1458 void invalidate_bh_lrus(void) 1459 { 1460 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); 1461 } 1462 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1463 1464 void set_bh_page(struct buffer_head *bh, 1465 struct page *page, unsigned long offset) 1466 { 1467 bh->b_page = page; 1468 BUG_ON(offset >= PAGE_SIZE); 1469 if (PageHighMem(page)) 1470 /* 1471 * This catches illegal uses and preserves the offset: 1472 */ 1473 bh->b_data = (char *)(0 + offset); 1474 else 1475 bh->b_data = page_address(page) + offset; 1476 } 1477 EXPORT_SYMBOL(set_bh_page); 1478 1479 /* 1480 * Called when truncating a buffer on a page completely. 1481 */ 1482 1483 /* Bits that are cleared during an invalidate */ 1484 #define BUFFER_FLAGS_DISCARD \ 1485 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1486 1 << BH_Delay | 1 << BH_Unwritten) 1487 1488 static void discard_buffer(struct buffer_head * bh) 1489 { 1490 unsigned long b_state, b_state_old; 1491 1492 lock_buffer(bh); 1493 clear_buffer_dirty(bh); 1494 bh->b_bdev = NULL; 1495 b_state = bh->b_state; 1496 for (;;) { 1497 b_state_old = cmpxchg(&bh->b_state, b_state, 1498 (b_state & ~BUFFER_FLAGS_DISCARD)); 1499 if (b_state_old == b_state) 1500 break; 1501 b_state = b_state_old; 1502 } 1503 unlock_buffer(bh); 1504 } 1505 1506 /** 1507 * block_invalidatepage - invalidate part or all of a buffer-backed page 1508 * 1509 * @page: the page which is affected 1510 * @offset: start of the range to invalidate 1511 * @length: length of the range to invalidate 1512 * 1513 * block_invalidatepage() is called when all or part of the page has become 1514 * invalidated by a truncate operation. 1515 * 1516 * block_invalidatepage() does not have to release all buffers, but it must 1517 * ensure that no dirty buffer is left outside @offset and that no I/O 1518 * is underway against any of the blocks which are outside the truncation 1519 * point. Because the caller is about to free (and possibly reuse) those 1520 * blocks on-disk. 1521 */ 1522 void block_invalidatepage(struct page *page, unsigned int offset, 1523 unsigned int length) 1524 { 1525 struct buffer_head *head, *bh, *next; 1526 unsigned int curr_off = 0; 1527 unsigned int stop = length + offset; 1528 1529 BUG_ON(!PageLocked(page)); 1530 if (!page_has_buffers(page)) 1531 goto out; 1532 1533 /* 1534 * Check for overflow 1535 */ 1536 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1537 1538 head = page_buffers(page); 1539 bh = head; 1540 do { 1541 unsigned int next_off = curr_off + bh->b_size; 1542 next = bh->b_this_page; 1543 1544 /* 1545 * Are we still fully in range ? 1546 */ 1547 if (next_off > stop) 1548 goto out; 1549 1550 /* 1551 * is this block fully invalidated? 1552 */ 1553 if (offset <= curr_off) 1554 discard_buffer(bh); 1555 curr_off = next_off; 1556 bh = next; 1557 } while (bh != head); 1558 1559 /* 1560 * We release buffers only if the entire page is being invalidated. 1561 * The get_block cached value has been unconditionally invalidated, 1562 * so real IO is not possible anymore. 1563 */ 1564 if (offset == 0) 1565 try_to_release_page(page, 0); 1566 out: 1567 return; 1568 } 1569 EXPORT_SYMBOL(block_invalidatepage); 1570 1571 1572 /* 1573 * We attach and possibly dirty the buffers atomically wrt 1574 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1575 * is already excluded via the page lock. 1576 */ 1577 void create_empty_buffers(struct page *page, 1578 unsigned long blocksize, unsigned long b_state) 1579 { 1580 struct buffer_head *bh, *head, *tail; 1581 1582 head = alloc_page_buffers(page, blocksize, 1); 1583 bh = head; 1584 do { 1585 bh->b_state |= b_state; 1586 tail = bh; 1587 bh = bh->b_this_page; 1588 } while (bh); 1589 tail->b_this_page = head; 1590 1591 spin_lock(&page->mapping->private_lock); 1592 if (PageUptodate(page) || PageDirty(page)) { 1593 bh = head; 1594 do { 1595 if (PageDirty(page)) 1596 set_buffer_dirty(bh); 1597 if (PageUptodate(page)) 1598 set_buffer_uptodate(bh); 1599 bh = bh->b_this_page; 1600 } while (bh != head); 1601 } 1602 attach_page_buffers(page, head); 1603 spin_unlock(&page->mapping->private_lock); 1604 } 1605 EXPORT_SYMBOL(create_empty_buffers); 1606 1607 /* 1608 * We are taking a block for data and we don't want any output from any 1609 * buffer-cache aliases starting from return from that function and 1610 * until the moment when something will explicitly mark the buffer 1611 * dirty (hopefully that will not happen until we will free that block ;-) 1612 * We don't even need to mark it not-uptodate - nobody can expect 1613 * anything from a newly allocated buffer anyway. We used to used 1614 * unmap_buffer() for such invalidation, but that was wrong. We definitely 1615 * don't want to mark the alias unmapped, for example - it would confuse 1616 * anyone who might pick it with bread() afterwards... 1617 * 1618 * Also.. Note that bforget() doesn't lock the buffer. So there can 1619 * be writeout I/O going on against recently-freed buffers. We don't 1620 * wait on that I/O in bforget() - it's more efficient to wait on the I/O 1621 * only if we really need to. That happens here. 1622 */ 1623 void unmap_underlying_metadata(struct block_device *bdev, sector_t block) 1624 { 1625 struct buffer_head *old_bh; 1626 1627 might_sleep(); 1628 1629 old_bh = __find_get_block_slow(bdev, block); 1630 if (old_bh) { 1631 clear_buffer_dirty(old_bh); 1632 wait_on_buffer(old_bh); 1633 clear_buffer_req(old_bh); 1634 __brelse(old_bh); 1635 } 1636 } 1637 EXPORT_SYMBOL(unmap_underlying_metadata); 1638 1639 /* 1640 * Size is a power-of-two in the range 512..PAGE_SIZE, 1641 * and the case we care about most is PAGE_SIZE. 1642 * 1643 * So this *could* possibly be written with those 1644 * constraints in mind (relevant mostly if some 1645 * architecture has a slow bit-scan instruction) 1646 */ 1647 static inline int block_size_bits(unsigned int blocksize) 1648 { 1649 return ilog2(blocksize); 1650 } 1651 1652 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) 1653 { 1654 BUG_ON(!PageLocked(page)); 1655 1656 if (!page_has_buffers(page)) 1657 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); 1658 return page_buffers(page); 1659 } 1660 1661 /* 1662 * NOTE! All mapped/uptodate combinations are valid: 1663 * 1664 * Mapped Uptodate Meaning 1665 * 1666 * No No "unknown" - must do get_block() 1667 * No Yes "hole" - zero-filled 1668 * Yes No "allocated" - allocated on disk, not read in 1669 * Yes Yes "valid" - allocated and up-to-date in memory. 1670 * 1671 * "Dirty" is valid only with the last case (mapped+uptodate). 1672 */ 1673 1674 /* 1675 * While block_write_full_page is writing back the dirty buffers under 1676 * the page lock, whoever dirtied the buffers may decide to clean them 1677 * again at any time. We handle that by only looking at the buffer 1678 * state inside lock_buffer(). 1679 * 1680 * If block_write_full_page() is called for regular writeback 1681 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1682 * locked buffer. This only can happen if someone has written the buffer 1683 * directly, with submit_bh(). At the address_space level PageWriteback 1684 * prevents this contention from occurring. 1685 * 1686 * If block_write_full_page() is called with wbc->sync_mode == 1687 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this 1688 * causes the writes to be flagged as synchronous writes. 1689 */ 1690 static int __block_write_full_page(struct inode *inode, struct page *page, 1691 get_block_t *get_block, struct writeback_control *wbc, 1692 bh_end_io_t *handler) 1693 { 1694 int err; 1695 sector_t block; 1696 sector_t last_block; 1697 struct buffer_head *bh, *head; 1698 unsigned int blocksize, bbits; 1699 int nr_underway = 0; 1700 int write_op = (wbc->sync_mode == WB_SYNC_ALL ? 1701 WRITE_SYNC : WRITE); 1702 1703 head = create_page_buffers(page, inode, 1704 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1705 1706 /* 1707 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1708 * here, and the (potentially unmapped) buffers may become dirty at 1709 * any time. If a buffer becomes dirty here after we've inspected it 1710 * then we just miss that fact, and the page stays dirty. 1711 * 1712 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 1713 * handle that here by just cleaning them. 1714 */ 1715 1716 bh = head; 1717 blocksize = bh->b_size; 1718 bbits = block_size_bits(blocksize); 1719 1720 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1721 last_block = (i_size_read(inode) - 1) >> bbits; 1722 1723 /* 1724 * Get all the dirty buffers mapped to disk addresses and 1725 * handle any aliases from the underlying blockdev's mapping. 1726 */ 1727 do { 1728 if (block > last_block) { 1729 /* 1730 * mapped buffers outside i_size will occur, because 1731 * this page can be outside i_size when there is a 1732 * truncate in progress. 1733 */ 1734 /* 1735 * The buffer was zeroed by block_write_full_page() 1736 */ 1737 clear_buffer_dirty(bh); 1738 set_buffer_uptodate(bh); 1739 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1740 buffer_dirty(bh)) { 1741 WARN_ON(bh->b_size != blocksize); 1742 err = get_block(inode, block, bh, 1); 1743 if (err) 1744 goto recover; 1745 clear_buffer_delay(bh); 1746 if (buffer_new(bh)) { 1747 /* blockdev mappings never come here */ 1748 clear_buffer_new(bh); 1749 unmap_underlying_metadata(bh->b_bdev, 1750 bh->b_blocknr); 1751 } 1752 } 1753 bh = bh->b_this_page; 1754 block++; 1755 } while (bh != head); 1756 1757 do { 1758 if (!buffer_mapped(bh)) 1759 continue; 1760 /* 1761 * If it's a fully non-blocking write attempt and we cannot 1762 * lock the buffer then redirty the page. Note that this can 1763 * potentially cause a busy-wait loop from writeback threads 1764 * and kswapd activity, but those code paths have their own 1765 * higher-level throttling. 1766 */ 1767 if (wbc->sync_mode != WB_SYNC_NONE) { 1768 lock_buffer(bh); 1769 } else if (!trylock_buffer(bh)) { 1770 redirty_page_for_writepage(wbc, page); 1771 continue; 1772 } 1773 if (test_clear_buffer_dirty(bh)) { 1774 mark_buffer_async_write_endio(bh, handler); 1775 } else { 1776 unlock_buffer(bh); 1777 } 1778 } while ((bh = bh->b_this_page) != head); 1779 1780 /* 1781 * The page and its buffers are protected by PageWriteback(), so we can 1782 * drop the bh refcounts early. 1783 */ 1784 BUG_ON(PageWriteback(page)); 1785 set_page_writeback(page); 1786 1787 do { 1788 struct buffer_head *next = bh->b_this_page; 1789 if (buffer_async_write(bh)) { 1790 submit_bh(write_op, bh); 1791 nr_underway++; 1792 } 1793 bh = next; 1794 } while (bh != head); 1795 unlock_page(page); 1796 1797 err = 0; 1798 done: 1799 if (nr_underway == 0) { 1800 /* 1801 * The page was marked dirty, but the buffers were 1802 * clean. Someone wrote them back by hand with 1803 * ll_rw_block/submit_bh. A rare case. 1804 */ 1805 end_page_writeback(page); 1806 1807 /* 1808 * The page and buffer_heads can be released at any time from 1809 * here on. 1810 */ 1811 } 1812 return err; 1813 1814 recover: 1815 /* 1816 * ENOSPC, or some other error. We may already have added some 1817 * blocks to the file, so we need to write these out to avoid 1818 * exposing stale data. 1819 * The page is currently locked and not marked for writeback 1820 */ 1821 bh = head; 1822 /* Recovery: lock and submit the mapped buffers */ 1823 do { 1824 if (buffer_mapped(bh) && buffer_dirty(bh) && 1825 !buffer_delay(bh)) { 1826 lock_buffer(bh); 1827 mark_buffer_async_write_endio(bh, handler); 1828 } else { 1829 /* 1830 * The buffer may have been set dirty during 1831 * attachment to a dirty page. 1832 */ 1833 clear_buffer_dirty(bh); 1834 } 1835 } while ((bh = bh->b_this_page) != head); 1836 SetPageError(page); 1837 BUG_ON(PageWriteback(page)); 1838 mapping_set_error(page->mapping, err); 1839 set_page_writeback(page); 1840 do { 1841 struct buffer_head *next = bh->b_this_page; 1842 if (buffer_async_write(bh)) { 1843 clear_buffer_dirty(bh); 1844 submit_bh(write_op, bh); 1845 nr_underway++; 1846 } 1847 bh = next; 1848 } while (bh != head); 1849 unlock_page(page); 1850 goto done; 1851 } 1852 1853 /* 1854 * If a page has any new buffers, zero them out here, and mark them uptodate 1855 * and dirty so they'll be written out (in order to prevent uninitialised 1856 * block data from leaking). And clear the new bit. 1857 */ 1858 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1859 { 1860 unsigned int block_start, block_end; 1861 struct buffer_head *head, *bh; 1862 1863 BUG_ON(!PageLocked(page)); 1864 if (!page_has_buffers(page)) 1865 return; 1866 1867 bh = head = page_buffers(page); 1868 block_start = 0; 1869 do { 1870 block_end = block_start + bh->b_size; 1871 1872 if (buffer_new(bh)) { 1873 if (block_end > from && block_start < to) { 1874 if (!PageUptodate(page)) { 1875 unsigned start, size; 1876 1877 start = max(from, block_start); 1878 size = min(to, block_end) - start; 1879 1880 zero_user(page, start, size); 1881 set_buffer_uptodate(bh); 1882 } 1883 1884 clear_buffer_new(bh); 1885 mark_buffer_dirty(bh); 1886 } 1887 } 1888 1889 block_start = block_end; 1890 bh = bh->b_this_page; 1891 } while (bh != head); 1892 } 1893 EXPORT_SYMBOL(page_zero_new_buffers); 1894 1895 int __block_write_begin(struct page *page, loff_t pos, unsigned len, 1896 get_block_t *get_block) 1897 { 1898 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1899 unsigned to = from + len; 1900 struct inode *inode = page->mapping->host; 1901 unsigned block_start, block_end; 1902 sector_t block; 1903 int err = 0; 1904 unsigned blocksize, bbits; 1905 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1906 1907 BUG_ON(!PageLocked(page)); 1908 BUG_ON(from > PAGE_CACHE_SIZE); 1909 BUG_ON(to > PAGE_CACHE_SIZE); 1910 BUG_ON(from > to); 1911 1912 head = create_page_buffers(page, inode, 0); 1913 blocksize = head->b_size; 1914 bbits = block_size_bits(blocksize); 1915 1916 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1917 1918 for(bh = head, block_start = 0; bh != head || !block_start; 1919 block++, block_start=block_end, bh = bh->b_this_page) { 1920 block_end = block_start + blocksize; 1921 if (block_end <= from || block_start >= to) { 1922 if (PageUptodate(page)) { 1923 if (!buffer_uptodate(bh)) 1924 set_buffer_uptodate(bh); 1925 } 1926 continue; 1927 } 1928 if (buffer_new(bh)) 1929 clear_buffer_new(bh); 1930 if (!buffer_mapped(bh)) { 1931 WARN_ON(bh->b_size != blocksize); 1932 err = get_block(inode, block, bh, 1); 1933 if (err) 1934 break; 1935 if (buffer_new(bh)) { 1936 unmap_underlying_metadata(bh->b_bdev, 1937 bh->b_blocknr); 1938 if (PageUptodate(page)) { 1939 clear_buffer_new(bh); 1940 set_buffer_uptodate(bh); 1941 mark_buffer_dirty(bh); 1942 continue; 1943 } 1944 if (block_end > to || block_start < from) 1945 zero_user_segments(page, 1946 to, block_end, 1947 block_start, from); 1948 continue; 1949 } 1950 } 1951 if (PageUptodate(page)) { 1952 if (!buffer_uptodate(bh)) 1953 set_buffer_uptodate(bh); 1954 continue; 1955 } 1956 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1957 !buffer_unwritten(bh) && 1958 (block_start < from || block_end > to)) { 1959 ll_rw_block(READ, 1, &bh); 1960 *wait_bh++=bh; 1961 } 1962 } 1963 /* 1964 * If we issued read requests - let them complete. 1965 */ 1966 while(wait_bh > wait) { 1967 wait_on_buffer(*--wait_bh); 1968 if (!buffer_uptodate(*wait_bh)) 1969 err = -EIO; 1970 } 1971 if (unlikely(err)) 1972 page_zero_new_buffers(page, from, to); 1973 return err; 1974 } 1975 EXPORT_SYMBOL(__block_write_begin); 1976 1977 static int __block_commit_write(struct inode *inode, struct page *page, 1978 unsigned from, unsigned to) 1979 { 1980 unsigned block_start, block_end; 1981 int partial = 0; 1982 unsigned blocksize; 1983 struct buffer_head *bh, *head; 1984 1985 bh = head = page_buffers(page); 1986 blocksize = bh->b_size; 1987 1988 block_start = 0; 1989 do { 1990 block_end = block_start + blocksize; 1991 if (block_end <= from || block_start >= to) { 1992 if (!buffer_uptodate(bh)) 1993 partial = 1; 1994 } else { 1995 set_buffer_uptodate(bh); 1996 mark_buffer_dirty(bh); 1997 } 1998 clear_buffer_new(bh); 1999 2000 block_start = block_end; 2001 bh = bh->b_this_page; 2002 } while (bh != head); 2003 2004 /* 2005 * If this is a partial write which happened to make all buffers 2006 * uptodate then we can optimize away a bogus readpage() for 2007 * the next read(). Here we 'discover' whether the page went 2008 * uptodate as a result of this (potentially partial) write. 2009 */ 2010 if (!partial) 2011 SetPageUptodate(page); 2012 return 0; 2013 } 2014 2015 /* 2016 * block_write_begin takes care of the basic task of block allocation and 2017 * bringing partial write blocks uptodate first. 2018 * 2019 * The filesystem needs to handle block truncation upon failure. 2020 */ 2021 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2022 unsigned flags, struct page **pagep, get_block_t *get_block) 2023 { 2024 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 2025 struct page *page; 2026 int status; 2027 2028 page = grab_cache_page_write_begin(mapping, index, flags); 2029 if (!page) 2030 return -ENOMEM; 2031 2032 status = __block_write_begin(page, pos, len, get_block); 2033 if (unlikely(status)) { 2034 unlock_page(page); 2035 page_cache_release(page); 2036 page = NULL; 2037 } 2038 2039 *pagep = page; 2040 return status; 2041 } 2042 EXPORT_SYMBOL(block_write_begin); 2043 2044 int block_write_end(struct file *file, struct address_space *mapping, 2045 loff_t pos, unsigned len, unsigned copied, 2046 struct page *page, void *fsdata) 2047 { 2048 struct inode *inode = mapping->host; 2049 unsigned start; 2050 2051 start = pos & (PAGE_CACHE_SIZE - 1); 2052 2053 if (unlikely(copied < len)) { 2054 /* 2055 * The buffers that were written will now be uptodate, so we 2056 * don't have to worry about a readpage reading them and 2057 * overwriting a partial write. However if we have encountered 2058 * a short write and only partially written into a buffer, it 2059 * will not be marked uptodate, so a readpage might come in and 2060 * destroy our partial write. 2061 * 2062 * Do the simplest thing, and just treat any short write to a 2063 * non uptodate page as a zero-length write, and force the 2064 * caller to redo the whole thing. 2065 */ 2066 if (!PageUptodate(page)) 2067 copied = 0; 2068 2069 page_zero_new_buffers(page, start+copied, start+len); 2070 } 2071 flush_dcache_page(page); 2072 2073 /* This could be a short (even 0-length) commit */ 2074 __block_commit_write(inode, page, start, start+copied); 2075 2076 return copied; 2077 } 2078 EXPORT_SYMBOL(block_write_end); 2079 2080 int generic_write_end(struct file *file, struct address_space *mapping, 2081 loff_t pos, unsigned len, unsigned copied, 2082 struct page *page, void *fsdata) 2083 { 2084 struct inode *inode = mapping->host; 2085 int i_size_changed = 0; 2086 2087 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2088 2089 /* 2090 * No need to use i_size_read() here, the i_size 2091 * cannot change under us because we hold i_mutex. 2092 * 2093 * But it's important to update i_size while still holding page lock: 2094 * page writeout could otherwise come in and zero beyond i_size. 2095 */ 2096 if (pos+copied > inode->i_size) { 2097 i_size_write(inode, pos+copied); 2098 i_size_changed = 1; 2099 } 2100 2101 unlock_page(page); 2102 page_cache_release(page); 2103 2104 /* 2105 * Don't mark the inode dirty under page lock. First, it unnecessarily 2106 * makes the holding time of page lock longer. Second, it forces lock 2107 * ordering of page lock and transaction start for journaling 2108 * filesystems. 2109 */ 2110 if (i_size_changed) 2111 mark_inode_dirty(inode); 2112 2113 return copied; 2114 } 2115 EXPORT_SYMBOL(generic_write_end); 2116 2117 /* 2118 * block_is_partially_uptodate checks whether buffers within a page are 2119 * uptodate or not. 2120 * 2121 * Returns true if all buffers which correspond to a file portion 2122 * we want to read are uptodate. 2123 */ 2124 int block_is_partially_uptodate(struct page *page, unsigned long from, 2125 unsigned long count) 2126 { 2127 unsigned block_start, block_end, blocksize; 2128 unsigned to; 2129 struct buffer_head *bh, *head; 2130 int ret = 1; 2131 2132 if (!page_has_buffers(page)) 2133 return 0; 2134 2135 head = page_buffers(page); 2136 blocksize = head->b_size; 2137 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); 2138 to = from + to; 2139 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) 2140 return 0; 2141 2142 bh = head; 2143 block_start = 0; 2144 do { 2145 block_end = block_start + blocksize; 2146 if (block_end > from && block_start < to) { 2147 if (!buffer_uptodate(bh)) { 2148 ret = 0; 2149 break; 2150 } 2151 if (block_end >= to) 2152 break; 2153 } 2154 block_start = block_end; 2155 bh = bh->b_this_page; 2156 } while (bh != head); 2157 2158 return ret; 2159 } 2160 EXPORT_SYMBOL(block_is_partially_uptodate); 2161 2162 /* 2163 * Generic "read page" function for block devices that have the normal 2164 * get_block functionality. This is most of the block device filesystems. 2165 * Reads the page asynchronously --- the unlock_buffer() and 2166 * set/clear_buffer_uptodate() functions propagate buffer state into the 2167 * page struct once IO has completed. 2168 */ 2169 int block_read_full_page(struct page *page, get_block_t *get_block) 2170 { 2171 struct inode *inode = page->mapping->host; 2172 sector_t iblock, lblock; 2173 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2174 unsigned int blocksize, bbits; 2175 int nr, i; 2176 int fully_mapped = 1; 2177 2178 head = create_page_buffers(page, inode, 0); 2179 blocksize = head->b_size; 2180 bbits = block_size_bits(blocksize); 2181 2182 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 2183 lblock = (i_size_read(inode)+blocksize-1) >> bbits; 2184 bh = head; 2185 nr = 0; 2186 i = 0; 2187 2188 do { 2189 if (buffer_uptodate(bh)) 2190 continue; 2191 2192 if (!buffer_mapped(bh)) { 2193 int err = 0; 2194 2195 fully_mapped = 0; 2196 if (iblock < lblock) { 2197 WARN_ON(bh->b_size != blocksize); 2198 err = get_block(inode, iblock, bh, 0); 2199 if (err) 2200 SetPageError(page); 2201 } 2202 if (!buffer_mapped(bh)) { 2203 zero_user(page, i * blocksize, blocksize); 2204 if (!err) 2205 set_buffer_uptodate(bh); 2206 continue; 2207 } 2208 /* 2209 * get_block() might have updated the buffer 2210 * synchronously 2211 */ 2212 if (buffer_uptodate(bh)) 2213 continue; 2214 } 2215 arr[nr++] = bh; 2216 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2217 2218 if (fully_mapped) 2219 SetPageMappedToDisk(page); 2220 2221 if (!nr) { 2222 /* 2223 * All buffers are uptodate - we can set the page uptodate 2224 * as well. But not if get_block() returned an error. 2225 */ 2226 if (!PageError(page)) 2227 SetPageUptodate(page); 2228 unlock_page(page); 2229 return 0; 2230 } 2231 2232 /* Stage two: lock the buffers */ 2233 for (i = 0; i < nr; i++) { 2234 bh = arr[i]; 2235 lock_buffer(bh); 2236 mark_buffer_async_read(bh); 2237 } 2238 2239 /* 2240 * Stage 3: start the IO. Check for uptodateness 2241 * inside the buffer lock in case another process reading 2242 * the underlying blockdev brought it uptodate (the sct fix). 2243 */ 2244 for (i = 0; i < nr; i++) { 2245 bh = arr[i]; 2246 if (buffer_uptodate(bh)) 2247 end_buffer_async_read(bh, 1); 2248 else 2249 submit_bh(READ, bh); 2250 } 2251 return 0; 2252 } 2253 EXPORT_SYMBOL(block_read_full_page); 2254 2255 /* utility function for filesystems that need to do work on expanding 2256 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2257 * deal with the hole. 2258 */ 2259 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2260 { 2261 struct address_space *mapping = inode->i_mapping; 2262 struct page *page; 2263 void *fsdata; 2264 int err; 2265 2266 err = inode_newsize_ok(inode, size); 2267 if (err) 2268 goto out; 2269 2270 err = pagecache_write_begin(NULL, mapping, size, 0, 2271 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, 2272 &page, &fsdata); 2273 if (err) 2274 goto out; 2275 2276 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); 2277 BUG_ON(err > 0); 2278 2279 out: 2280 return err; 2281 } 2282 EXPORT_SYMBOL(generic_cont_expand_simple); 2283 2284 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2285 loff_t pos, loff_t *bytes) 2286 { 2287 struct inode *inode = mapping->host; 2288 unsigned blocksize = 1 << inode->i_blkbits; 2289 struct page *page; 2290 void *fsdata; 2291 pgoff_t index, curidx; 2292 loff_t curpos; 2293 unsigned zerofrom, offset, len; 2294 int err = 0; 2295 2296 index = pos >> PAGE_CACHE_SHIFT; 2297 offset = pos & ~PAGE_CACHE_MASK; 2298 2299 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { 2300 zerofrom = curpos & ~PAGE_CACHE_MASK; 2301 if (zerofrom & (blocksize-1)) { 2302 *bytes |= (blocksize-1); 2303 (*bytes)++; 2304 } 2305 len = PAGE_CACHE_SIZE - zerofrom; 2306 2307 err = pagecache_write_begin(file, mapping, curpos, len, 2308 AOP_FLAG_UNINTERRUPTIBLE, 2309 &page, &fsdata); 2310 if (err) 2311 goto out; 2312 zero_user(page, zerofrom, len); 2313 err = pagecache_write_end(file, mapping, curpos, len, len, 2314 page, fsdata); 2315 if (err < 0) 2316 goto out; 2317 BUG_ON(err != len); 2318 err = 0; 2319 2320 balance_dirty_pages_ratelimited(mapping); 2321 2322 if (unlikely(fatal_signal_pending(current))) { 2323 err = -EINTR; 2324 goto out; 2325 } 2326 } 2327 2328 /* page covers the boundary, find the boundary offset */ 2329 if (index == curidx) { 2330 zerofrom = curpos & ~PAGE_CACHE_MASK; 2331 /* if we will expand the thing last block will be filled */ 2332 if (offset <= zerofrom) { 2333 goto out; 2334 } 2335 if (zerofrom & (blocksize-1)) { 2336 *bytes |= (blocksize-1); 2337 (*bytes)++; 2338 } 2339 len = offset - zerofrom; 2340 2341 err = pagecache_write_begin(file, mapping, curpos, len, 2342 AOP_FLAG_UNINTERRUPTIBLE, 2343 &page, &fsdata); 2344 if (err) 2345 goto out; 2346 zero_user(page, zerofrom, len); 2347 err = pagecache_write_end(file, mapping, curpos, len, len, 2348 page, fsdata); 2349 if (err < 0) 2350 goto out; 2351 BUG_ON(err != len); 2352 err = 0; 2353 } 2354 out: 2355 return err; 2356 } 2357 2358 /* 2359 * For moronic filesystems that do not allow holes in file. 2360 * We may have to extend the file. 2361 */ 2362 int cont_write_begin(struct file *file, struct address_space *mapping, 2363 loff_t pos, unsigned len, unsigned flags, 2364 struct page **pagep, void **fsdata, 2365 get_block_t *get_block, loff_t *bytes) 2366 { 2367 struct inode *inode = mapping->host; 2368 unsigned blocksize = 1 << inode->i_blkbits; 2369 unsigned zerofrom; 2370 int err; 2371 2372 err = cont_expand_zero(file, mapping, pos, bytes); 2373 if (err) 2374 return err; 2375 2376 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2377 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2378 *bytes |= (blocksize-1); 2379 (*bytes)++; 2380 } 2381 2382 return block_write_begin(mapping, pos, len, flags, pagep, get_block); 2383 } 2384 EXPORT_SYMBOL(cont_write_begin); 2385 2386 int block_commit_write(struct page *page, unsigned from, unsigned to) 2387 { 2388 struct inode *inode = page->mapping->host; 2389 __block_commit_write(inode,page,from,to); 2390 return 0; 2391 } 2392 EXPORT_SYMBOL(block_commit_write); 2393 2394 /* 2395 * block_page_mkwrite() is not allowed to change the file size as it gets 2396 * called from a page fault handler when a page is first dirtied. Hence we must 2397 * be careful to check for EOF conditions here. We set the page up correctly 2398 * for a written page which means we get ENOSPC checking when writing into 2399 * holes and correct delalloc and unwritten extent mapping on filesystems that 2400 * support these features. 2401 * 2402 * We are not allowed to take the i_mutex here so we have to play games to 2403 * protect against truncate races as the page could now be beyond EOF. Because 2404 * truncate writes the inode size before removing pages, once we have the 2405 * page lock we can determine safely if the page is beyond EOF. If it is not 2406 * beyond EOF, then the page is guaranteed safe against truncation until we 2407 * unlock the page. 2408 * 2409 * Direct callers of this function should protect against filesystem freezing 2410 * using sb_start_write() - sb_end_write() functions. 2411 */ 2412 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2413 get_block_t get_block) 2414 { 2415 struct page *page = vmf->page; 2416 struct inode *inode = file_inode(vma->vm_file); 2417 unsigned long end; 2418 loff_t size; 2419 int ret; 2420 2421 lock_page(page); 2422 size = i_size_read(inode); 2423 if ((page->mapping != inode->i_mapping) || 2424 (page_offset(page) > size)) { 2425 /* We overload EFAULT to mean page got truncated */ 2426 ret = -EFAULT; 2427 goto out_unlock; 2428 } 2429 2430 /* page is wholly or partially inside EOF */ 2431 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2432 end = size & ~PAGE_CACHE_MASK; 2433 else 2434 end = PAGE_CACHE_SIZE; 2435 2436 ret = __block_write_begin(page, 0, end, get_block); 2437 if (!ret) 2438 ret = block_commit_write(page, 0, end); 2439 2440 if (unlikely(ret < 0)) 2441 goto out_unlock; 2442 set_page_dirty(page); 2443 wait_for_stable_page(page); 2444 return 0; 2445 out_unlock: 2446 unlock_page(page); 2447 return ret; 2448 } 2449 EXPORT_SYMBOL(__block_page_mkwrite); 2450 2451 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2452 get_block_t get_block) 2453 { 2454 int ret; 2455 struct super_block *sb = file_inode(vma->vm_file)->i_sb; 2456 2457 sb_start_pagefault(sb); 2458 2459 /* 2460 * Update file times before taking page lock. We may end up failing the 2461 * fault so this update may be superfluous but who really cares... 2462 */ 2463 file_update_time(vma->vm_file); 2464 2465 ret = __block_page_mkwrite(vma, vmf, get_block); 2466 sb_end_pagefault(sb); 2467 return block_page_mkwrite_return(ret); 2468 } 2469 EXPORT_SYMBOL(block_page_mkwrite); 2470 2471 /* 2472 * nobh_write_begin()'s prereads are special: the buffer_heads are freed 2473 * immediately, while under the page lock. So it needs a special end_io 2474 * handler which does not touch the bh after unlocking it. 2475 */ 2476 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 2477 { 2478 __end_buffer_read_notouch(bh, uptodate); 2479 } 2480 2481 /* 2482 * Attach the singly-linked list of buffers created by nobh_write_begin, to 2483 * the page (converting it to circular linked list and taking care of page 2484 * dirty races). 2485 */ 2486 static void attach_nobh_buffers(struct page *page, struct buffer_head *head) 2487 { 2488 struct buffer_head *bh; 2489 2490 BUG_ON(!PageLocked(page)); 2491 2492 spin_lock(&page->mapping->private_lock); 2493 bh = head; 2494 do { 2495 if (PageDirty(page)) 2496 set_buffer_dirty(bh); 2497 if (!bh->b_this_page) 2498 bh->b_this_page = head; 2499 bh = bh->b_this_page; 2500 } while (bh != head); 2501 attach_page_buffers(page, head); 2502 spin_unlock(&page->mapping->private_lock); 2503 } 2504 2505 /* 2506 * On entry, the page is fully not uptodate. 2507 * On exit the page is fully uptodate in the areas outside (from,to) 2508 * The filesystem needs to handle block truncation upon failure. 2509 */ 2510 int nobh_write_begin(struct address_space *mapping, 2511 loff_t pos, unsigned len, unsigned flags, 2512 struct page **pagep, void **fsdata, 2513 get_block_t *get_block) 2514 { 2515 struct inode *inode = mapping->host; 2516 const unsigned blkbits = inode->i_blkbits; 2517 const unsigned blocksize = 1 << blkbits; 2518 struct buffer_head *head, *bh; 2519 struct page *page; 2520 pgoff_t index; 2521 unsigned from, to; 2522 unsigned block_in_page; 2523 unsigned block_start, block_end; 2524 sector_t block_in_file; 2525 int nr_reads = 0; 2526 int ret = 0; 2527 int is_mapped_to_disk = 1; 2528 2529 index = pos >> PAGE_CACHE_SHIFT; 2530 from = pos & (PAGE_CACHE_SIZE - 1); 2531 to = from + len; 2532 2533 page = grab_cache_page_write_begin(mapping, index, flags); 2534 if (!page) 2535 return -ENOMEM; 2536 *pagep = page; 2537 *fsdata = NULL; 2538 2539 if (page_has_buffers(page)) { 2540 ret = __block_write_begin(page, pos, len, get_block); 2541 if (unlikely(ret)) 2542 goto out_release; 2543 return ret; 2544 } 2545 2546 if (PageMappedToDisk(page)) 2547 return 0; 2548 2549 /* 2550 * Allocate buffers so that we can keep track of state, and potentially 2551 * attach them to the page if an error occurs. In the common case of 2552 * no error, they will just be freed again without ever being attached 2553 * to the page (which is all OK, because we're under the page lock). 2554 * 2555 * Be careful: the buffer linked list is a NULL terminated one, rather 2556 * than the circular one we're used to. 2557 */ 2558 head = alloc_page_buffers(page, blocksize, 0); 2559 if (!head) { 2560 ret = -ENOMEM; 2561 goto out_release; 2562 } 2563 2564 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2565 2566 /* 2567 * We loop across all blocks in the page, whether or not they are 2568 * part of the affected region. This is so we can discover if the 2569 * page is fully mapped-to-disk. 2570 */ 2571 for (block_start = 0, block_in_page = 0, bh = head; 2572 block_start < PAGE_CACHE_SIZE; 2573 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2574 int create; 2575 2576 block_end = block_start + blocksize; 2577 bh->b_state = 0; 2578 create = 1; 2579 if (block_start >= to) 2580 create = 0; 2581 ret = get_block(inode, block_in_file + block_in_page, 2582 bh, create); 2583 if (ret) 2584 goto failed; 2585 if (!buffer_mapped(bh)) 2586 is_mapped_to_disk = 0; 2587 if (buffer_new(bh)) 2588 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 2589 if (PageUptodate(page)) { 2590 set_buffer_uptodate(bh); 2591 continue; 2592 } 2593 if (buffer_new(bh) || !buffer_mapped(bh)) { 2594 zero_user_segments(page, block_start, from, 2595 to, block_end); 2596 continue; 2597 } 2598 if (buffer_uptodate(bh)) 2599 continue; /* reiserfs does this */ 2600 if (block_start < from || block_end > to) { 2601 lock_buffer(bh); 2602 bh->b_end_io = end_buffer_read_nobh; 2603 submit_bh(READ, bh); 2604 nr_reads++; 2605 } 2606 } 2607 2608 if (nr_reads) { 2609 /* 2610 * The page is locked, so these buffers are protected from 2611 * any VM or truncate activity. Hence we don't need to care 2612 * for the buffer_head refcounts. 2613 */ 2614 for (bh = head; bh; bh = bh->b_this_page) { 2615 wait_on_buffer(bh); 2616 if (!buffer_uptodate(bh)) 2617 ret = -EIO; 2618 } 2619 if (ret) 2620 goto failed; 2621 } 2622 2623 if (is_mapped_to_disk) 2624 SetPageMappedToDisk(page); 2625 2626 *fsdata = head; /* to be released by nobh_write_end */ 2627 2628 return 0; 2629 2630 failed: 2631 BUG_ON(!ret); 2632 /* 2633 * Error recovery is a bit difficult. We need to zero out blocks that 2634 * were newly allocated, and dirty them to ensure they get written out. 2635 * Buffers need to be attached to the page at this point, otherwise 2636 * the handling of potential IO errors during writeout would be hard 2637 * (could try doing synchronous writeout, but what if that fails too?) 2638 */ 2639 attach_nobh_buffers(page, head); 2640 page_zero_new_buffers(page, from, to); 2641 2642 out_release: 2643 unlock_page(page); 2644 page_cache_release(page); 2645 *pagep = NULL; 2646 2647 return ret; 2648 } 2649 EXPORT_SYMBOL(nobh_write_begin); 2650 2651 int nobh_write_end(struct file *file, struct address_space *mapping, 2652 loff_t pos, unsigned len, unsigned copied, 2653 struct page *page, void *fsdata) 2654 { 2655 struct inode *inode = page->mapping->host; 2656 struct buffer_head *head = fsdata; 2657 struct buffer_head *bh; 2658 BUG_ON(fsdata != NULL && page_has_buffers(page)); 2659 2660 if (unlikely(copied < len) && head) 2661 attach_nobh_buffers(page, head); 2662 if (page_has_buffers(page)) 2663 return generic_write_end(file, mapping, pos, len, 2664 copied, page, fsdata); 2665 2666 SetPageUptodate(page); 2667 set_page_dirty(page); 2668 if (pos+copied > inode->i_size) { 2669 i_size_write(inode, pos+copied); 2670 mark_inode_dirty(inode); 2671 } 2672 2673 unlock_page(page); 2674 page_cache_release(page); 2675 2676 while (head) { 2677 bh = head; 2678 head = head->b_this_page; 2679 free_buffer_head(bh); 2680 } 2681 2682 return copied; 2683 } 2684 EXPORT_SYMBOL(nobh_write_end); 2685 2686 /* 2687 * nobh_writepage() - based on block_full_write_page() except 2688 * that it tries to operate without attaching bufferheads to 2689 * the page. 2690 */ 2691 int nobh_writepage(struct page *page, get_block_t *get_block, 2692 struct writeback_control *wbc) 2693 { 2694 struct inode * const inode = page->mapping->host; 2695 loff_t i_size = i_size_read(inode); 2696 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2697 unsigned offset; 2698 int ret; 2699 2700 /* Is the page fully inside i_size? */ 2701 if (page->index < end_index) 2702 goto out; 2703 2704 /* Is the page fully outside i_size? (truncate in progress) */ 2705 offset = i_size & (PAGE_CACHE_SIZE-1); 2706 if (page->index >= end_index+1 || !offset) { 2707 /* 2708 * The page may have dirty, unmapped buffers. For example, 2709 * they may have been added in ext3_writepage(). Make them 2710 * freeable here, so the page does not leak. 2711 */ 2712 #if 0 2713 /* Not really sure about this - do we need this ? */ 2714 if (page->mapping->a_ops->invalidatepage) 2715 page->mapping->a_ops->invalidatepage(page, offset); 2716 #endif 2717 unlock_page(page); 2718 return 0; /* don't care */ 2719 } 2720 2721 /* 2722 * The page straddles i_size. It must be zeroed out on each and every 2723 * writepage invocation because it may be mmapped. "A file is mapped 2724 * in multiples of the page size. For a file that is not a multiple of 2725 * the page size, the remaining memory is zeroed when mapped, and 2726 * writes to that region are not written out to the file." 2727 */ 2728 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2729 out: 2730 ret = mpage_writepage(page, get_block, wbc); 2731 if (ret == -EAGAIN) 2732 ret = __block_write_full_page(inode, page, get_block, wbc, 2733 end_buffer_async_write); 2734 return ret; 2735 } 2736 EXPORT_SYMBOL(nobh_writepage); 2737 2738 int nobh_truncate_page(struct address_space *mapping, 2739 loff_t from, get_block_t *get_block) 2740 { 2741 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2742 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2743 unsigned blocksize; 2744 sector_t iblock; 2745 unsigned length, pos; 2746 struct inode *inode = mapping->host; 2747 struct page *page; 2748 struct buffer_head map_bh; 2749 int err; 2750 2751 blocksize = 1 << inode->i_blkbits; 2752 length = offset & (blocksize - 1); 2753 2754 /* Block boundary? Nothing to do */ 2755 if (!length) 2756 return 0; 2757 2758 length = blocksize - length; 2759 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2760 2761 page = grab_cache_page(mapping, index); 2762 err = -ENOMEM; 2763 if (!page) 2764 goto out; 2765 2766 if (page_has_buffers(page)) { 2767 has_buffers: 2768 unlock_page(page); 2769 page_cache_release(page); 2770 return block_truncate_page(mapping, from, get_block); 2771 } 2772 2773 /* Find the buffer that contains "offset" */ 2774 pos = blocksize; 2775 while (offset >= pos) { 2776 iblock++; 2777 pos += blocksize; 2778 } 2779 2780 map_bh.b_size = blocksize; 2781 map_bh.b_state = 0; 2782 err = get_block(inode, iblock, &map_bh, 0); 2783 if (err) 2784 goto unlock; 2785 /* unmapped? It's a hole - nothing to do */ 2786 if (!buffer_mapped(&map_bh)) 2787 goto unlock; 2788 2789 /* Ok, it's mapped. Make sure it's up-to-date */ 2790 if (!PageUptodate(page)) { 2791 err = mapping->a_ops->readpage(NULL, page); 2792 if (err) { 2793 page_cache_release(page); 2794 goto out; 2795 } 2796 lock_page(page); 2797 if (!PageUptodate(page)) { 2798 err = -EIO; 2799 goto unlock; 2800 } 2801 if (page_has_buffers(page)) 2802 goto has_buffers; 2803 } 2804 zero_user(page, offset, length); 2805 set_page_dirty(page); 2806 err = 0; 2807 2808 unlock: 2809 unlock_page(page); 2810 page_cache_release(page); 2811 out: 2812 return err; 2813 } 2814 EXPORT_SYMBOL(nobh_truncate_page); 2815 2816 int block_truncate_page(struct address_space *mapping, 2817 loff_t from, get_block_t *get_block) 2818 { 2819 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2820 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2821 unsigned blocksize; 2822 sector_t iblock; 2823 unsigned length, pos; 2824 struct inode *inode = mapping->host; 2825 struct page *page; 2826 struct buffer_head *bh; 2827 int err; 2828 2829 blocksize = 1 << inode->i_blkbits; 2830 length = offset & (blocksize - 1); 2831 2832 /* Block boundary? Nothing to do */ 2833 if (!length) 2834 return 0; 2835 2836 length = blocksize - length; 2837 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2838 2839 page = grab_cache_page(mapping, index); 2840 err = -ENOMEM; 2841 if (!page) 2842 goto out; 2843 2844 if (!page_has_buffers(page)) 2845 create_empty_buffers(page, blocksize, 0); 2846 2847 /* Find the buffer that contains "offset" */ 2848 bh = page_buffers(page); 2849 pos = blocksize; 2850 while (offset >= pos) { 2851 bh = bh->b_this_page; 2852 iblock++; 2853 pos += blocksize; 2854 } 2855 2856 err = 0; 2857 if (!buffer_mapped(bh)) { 2858 WARN_ON(bh->b_size != blocksize); 2859 err = get_block(inode, iblock, bh, 0); 2860 if (err) 2861 goto unlock; 2862 /* unmapped? It's a hole - nothing to do */ 2863 if (!buffer_mapped(bh)) 2864 goto unlock; 2865 } 2866 2867 /* Ok, it's mapped. Make sure it's up-to-date */ 2868 if (PageUptodate(page)) 2869 set_buffer_uptodate(bh); 2870 2871 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2872 err = -EIO; 2873 ll_rw_block(READ, 1, &bh); 2874 wait_on_buffer(bh); 2875 /* Uhhuh. Read error. Complain and punt. */ 2876 if (!buffer_uptodate(bh)) 2877 goto unlock; 2878 } 2879 2880 zero_user(page, offset, length); 2881 mark_buffer_dirty(bh); 2882 err = 0; 2883 2884 unlock: 2885 unlock_page(page); 2886 page_cache_release(page); 2887 out: 2888 return err; 2889 } 2890 EXPORT_SYMBOL(block_truncate_page); 2891 2892 /* 2893 * The generic ->writepage function for buffer-backed address_spaces 2894 */ 2895 int block_write_full_page(struct page *page, get_block_t *get_block, 2896 struct writeback_control *wbc) 2897 { 2898 struct inode * const inode = page->mapping->host; 2899 loff_t i_size = i_size_read(inode); 2900 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2901 unsigned offset; 2902 2903 /* Is the page fully inside i_size? */ 2904 if (page->index < end_index) 2905 return __block_write_full_page(inode, page, get_block, wbc, 2906 end_buffer_async_write); 2907 2908 /* Is the page fully outside i_size? (truncate in progress) */ 2909 offset = i_size & (PAGE_CACHE_SIZE-1); 2910 if (page->index >= end_index+1 || !offset) { 2911 /* 2912 * The page may have dirty, unmapped buffers. For example, 2913 * they may have been added in ext3_writepage(). Make them 2914 * freeable here, so the page does not leak. 2915 */ 2916 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 2917 unlock_page(page); 2918 return 0; /* don't care */ 2919 } 2920 2921 /* 2922 * The page straddles i_size. It must be zeroed out on each and every 2923 * writepage invocation because it may be mmapped. "A file is mapped 2924 * in multiples of the page size. For a file that is not a multiple of 2925 * the page size, the remaining memory is zeroed when mapped, and 2926 * writes to that region are not written out to the file." 2927 */ 2928 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2929 return __block_write_full_page(inode, page, get_block, wbc, 2930 end_buffer_async_write); 2931 } 2932 EXPORT_SYMBOL(block_write_full_page); 2933 2934 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2935 get_block_t *get_block) 2936 { 2937 struct buffer_head tmp; 2938 struct inode *inode = mapping->host; 2939 tmp.b_state = 0; 2940 tmp.b_blocknr = 0; 2941 tmp.b_size = 1 << inode->i_blkbits; 2942 get_block(inode, block, &tmp, 0); 2943 return tmp.b_blocknr; 2944 } 2945 EXPORT_SYMBOL(generic_block_bmap); 2946 2947 static void end_bio_bh_io_sync(struct bio *bio, int err) 2948 { 2949 struct buffer_head *bh = bio->bi_private; 2950 2951 if (err == -EOPNOTSUPP) { 2952 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 2953 } 2954 2955 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) 2956 set_bit(BH_Quiet, &bh->b_state); 2957 2958 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 2959 bio_put(bio); 2960 } 2961 2962 /* 2963 * This allows us to do IO even on the odd last sectors 2964 * of a device, even if the block size is some multiple 2965 * of the physical sector size. 2966 * 2967 * We'll just truncate the bio to the size of the device, 2968 * and clear the end of the buffer head manually. 2969 * 2970 * Truly out-of-range accesses will turn into actual IO 2971 * errors, this only handles the "we need to be able to 2972 * do IO at the final sector" case. 2973 */ 2974 void guard_bio_eod(int rw, struct bio *bio) 2975 { 2976 sector_t maxsector; 2977 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 2978 unsigned truncated_bytes; 2979 2980 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 2981 if (!maxsector) 2982 return; 2983 2984 /* 2985 * If the *whole* IO is past the end of the device, 2986 * let it through, and the IO layer will turn it into 2987 * an EIO. 2988 */ 2989 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 2990 return; 2991 2992 maxsector -= bio->bi_iter.bi_sector; 2993 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 2994 return; 2995 2996 /* Uhhuh. We've got a bio that straddles the device size! */ 2997 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 2998 2999 /* Truncate the bio.. */ 3000 bio->bi_iter.bi_size -= truncated_bytes; 3001 bvec->bv_len -= truncated_bytes; 3002 3003 /* ..and clear the end of the buffer for reads */ 3004 if ((rw & RW_MASK) == READ) { 3005 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 3006 truncated_bytes); 3007 } 3008 } 3009 3010 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) 3011 { 3012 struct bio *bio; 3013 int ret = 0; 3014 3015 BUG_ON(!buffer_locked(bh)); 3016 BUG_ON(!buffer_mapped(bh)); 3017 BUG_ON(!bh->b_end_io); 3018 BUG_ON(buffer_delay(bh)); 3019 BUG_ON(buffer_unwritten(bh)); 3020 3021 /* 3022 * Only clear out a write error when rewriting 3023 */ 3024 if (test_set_buffer_req(bh) && (rw & WRITE)) 3025 clear_buffer_write_io_error(bh); 3026 3027 /* 3028 * from here on down, it's all bio -- do the initial mapping, 3029 * submit_bio -> generic_make_request may further map this bio around 3030 */ 3031 bio = bio_alloc(GFP_NOIO, 1); 3032 3033 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3034 bio->bi_bdev = bh->b_bdev; 3035 bio->bi_io_vec[0].bv_page = bh->b_page; 3036 bio->bi_io_vec[0].bv_len = bh->b_size; 3037 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 3038 3039 bio->bi_vcnt = 1; 3040 bio->bi_iter.bi_size = bh->b_size; 3041 3042 bio->bi_end_io = end_bio_bh_io_sync; 3043 bio->bi_private = bh; 3044 bio->bi_flags |= bio_flags; 3045 3046 /* Take care of bh's that straddle the end of the device */ 3047 guard_bio_eod(rw, bio); 3048 3049 if (buffer_meta(bh)) 3050 rw |= REQ_META; 3051 if (buffer_prio(bh)) 3052 rw |= REQ_PRIO; 3053 3054 bio_get(bio); 3055 submit_bio(rw, bio); 3056 3057 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 3058 ret = -EOPNOTSUPP; 3059 3060 bio_put(bio); 3061 return ret; 3062 } 3063 EXPORT_SYMBOL_GPL(_submit_bh); 3064 3065 int submit_bh(int rw, struct buffer_head *bh) 3066 { 3067 return _submit_bh(rw, bh, 0); 3068 } 3069 EXPORT_SYMBOL(submit_bh); 3070 3071 /** 3072 * ll_rw_block: low-level access to block devices (DEPRECATED) 3073 * @rw: whether to %READ or %WRITE or maybe %READA (readahead) 3074 * @nr: number of &struct buffer_heads in the array 3075 * @bhs: array of pointers to &struct buffer_head 3076 * 3077 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 3078 * requests an I/O operation on them, either a %READ or a %WRITE. The third 3079 * %READA option is described in the documentation for generic_make_request() 3080 * which ll_rw_block() calls. 3081 * 3082 * This function drops any buffer that it cannot get a lock on (with the 3083 * BH_Lock state bit), any buffer that appears to be clean when doing a write 3084 * request, and any buffer that appears to be up-to-date when doing read 3085 * request. Further it marks as clean buffers that are processed for 3086 * writing (the buffer cache won't assume that they are actually clean 3087 * until the buffer gets unlocked). 3088 * 3089 * ll_rw_block sets b_end_io to simple completion handler that marks 3090 * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 3091 * any waiters. 3092 * 3093 * All of the buffers must be for the same device, and must also be a 3094 * multiple of the current approved size for the device. 3095 */ 3096 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 3097 { 3098 int i; 3099 3100 for (i = 0; i < nr; i++) { 3101 struct buffer_head *bh = bhs[i]; 3102 3103 if (!trylock_buffer(bh)) 3104 continue; 3105 if (rw == WRITE) { 3106 if (test_clear_buffer_dirty(bh)) { 3107 bh->b_end_io = end_buffer_write_sync; 3108 get_bh(bh); 3109 submit_bh(WRITE, bh); 3110 continue; 3111 } 3112 } else { 3113 if (!buffer_uptodate(bh)) { 3114 bh->b_end_io = end_buffer_read_sync; 3115 get_bh(bh); 3116 submit_bh(rw, bh); 3117 continue; 3118 } 3119 } 3120 unlock_buffer(bh); 3121 } 3122 } 3123 EXPORT_SYMBOL(ll_rw_block); 3124 3125 void write_dirty_buffer(struct buffer_head *bh, int rw) 3126 { 3127 lock_buffer(bh); 3128 if (!test_clear_buffer_dirty(bh)) { 3129 unlock_buffer(bh); 3130 return; 3131 } 3132 bh->b_end_io = end_buffer_write_sync; 3133 get_bh(bh); 3134 submit_bh(rw, bh); 3135 } 3136 EXPORT_SYMBOL(write_dirty_buffer); 3137 3138 /* 3139 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3140 * and then start new I/O and then wait upon it. The caller must have a ref on 3141 * the buffer_head. 3142 */ 3143 int __sync_dirty_buffer(struct buffer_head *bh, int rw) 3144 { 3145 int ret = 0; 3146 3147 WARN_ON(atomic_read(&bh->b_count) < 1); 3148 lock_buffer(bh); 3149 if (test_clear_buffer_dirty(bh)) { 3150 get_bh(bh); 3151 bh->b_end_io = end_buffer_write_sync; 3152 ret = submit_bh(rw, bh); 3153 wait_on_buffer(bh); 3154 if (!ret && !buffer_uptodate(bh)) 3155 ret = -EIO; 3156 } else { 3157 unlock_buffer(bh); 3158 } 3159 return ret; 3160 } 3161 EXPORT_SYMBOL(__sync_dirty_buffer); 3162 3163 int sync_dirty_buffer(struct buffer_head *bh) 3164 { 3165 return __sync_dirty_buffer(bh, WRITE_SYNC); 3166 } 3167 EXPORT_SYMBOL(sync_dirty_buffer); 3168 3169 /* 3170 * try_to_free_buffers() checks if all the buffers on this particular page 3171 * are unused, and releases them if so. 3172 * 3173 * Exclusion against try_to_free_buffers may be obtained by either 3174 * locking the page or by holding its mapping's private_lock. 3175 * 3176 * If the page is dirty but all the buffers are clean then we need to 3177 * be sure to mark the page clean as well. This is because the page 3178 * may be against a block device, and a later reattachment of buffers 3179 * to a dirty page will set *all* buffers dirty. Which would corrupt 3180 * filesystem data on the same device. 3181 * 3182 * The same applies to regular filesystem pages: if all the buffers are 3183 * clean then we set the page clean and proceed. To do that, we require 3184 * total exclusion from __set_page_dirty_buffers(). That is obtained with 3185 * private_lock. 3186 * 3187 * try_to_free_buffers() is non-blocking. 3188 */ 3189 static inline int buffer_busy(struct buffer_head *bh) 3190 { 3191 return atomic_read(&bh->b_count) | 3192 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 3193 } 3194 3195 static int 3196 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 3197 { 3198 struct buffer_head *head = page_buffers(page); 3199 struct buffer_head *bh; 3200 3201 bh = head; 3202 do { 3203 if (buffer_write_io_error(bh) && page->mapping) 3204 set_bit(AS_EIO, &page->mapping->flags); 3205 if (buffer_busy(bh)) 3206 goto failed; 3207 bh = bh->b_this_page; 3208 } while (bh != head); 3209 3210 do { 3211 struct buffer_head *next = bh->b_this_page; 3212 3213 if (bh->b_assoc_map) 3214 __remove_assoc_queue(bh); 3215 bh = next; 3216 } while (bh != head); 3217 *buffers_to_free = head; 3218 __clear_page_buffers(page); 3219 return 1; 3220 failed: 3221 return 0; 3222 } 3223 3224 int try_to_free_buffers(struct page *page) 3225 { 3226 struct address_space * const mapping = page->mapping; 3227 struct buffer_head *buffers_to_free = NULL; 3228 int ret = 0; 3229 3230 BUG_ON(!PageLocked(page)); 3231 if (PageWriteback(page)) 3232 return 0; 3233 3234 if (mapping == NULL) { /* can this still happen? */ 3235 ret = drop_buffers(page, &buffers_to_free); 3236 goto out; 3237 } 3238 3239 spin_lock(&mapping->private_lock); 3240 ret = drop_buffers(page, &buffers_to_free); 3241 3242 /* 3243 * If the filesystem writes its buffers by hand (eg ext3) 3244 * then we can have clean buffers against a dirty page. We 3245 * clean the page here; otherwise the VM will never notice 3246 * that the filesystem did any IO at all. 3247 * 3248 * Also, during truncate, discard_buffer will have marked all 3249 * the page's buffers clean. We discover that here and clean 3250 * the page also. 3251 * 3252 * private_lock must be held over this entire operation in order 3253 * to synchronise against __set_page_dirty_buffers and prevent the 3254 * dirty bit from being lost. 3255 */ 3256 if (ret) 3257 cancel_dirty_page(page, PAGE_CACHE_SIZE); 3258 spin_unlock(&mapping->private_lock); 3259 out: 3260 if (buffers_to_free) { 3261 struct buffer_head *bh = buffers_to_free; 3262 3263 do { 3264 struct buffer_head *next = bh->b_this_page; 3265 free_buffer_head(bh); 3266 bh = next; 3267 } while (bh != buffers_to_free); 3268 } 3269 return ret; 3270 } 3271 EXPORT_SYMBOL(try_to_free_buffers); 3272 3273 /* 3274 * There are no bdflush tunables left. But distributions are 3275 * still running obsolete flush daemons, so we terminate them here. 3276 * 3277 * Use of bdflush() is deprecated and will be removed in a future kernel. 3278 * The `flush-X' kernel threads fully replace bdflush daemons and this call. 3279 */ 3280 SYSCALL_DEFINE2(bdflush, int, func, long, data) 3281 { 3282 static int msg_count; 3283 3284 if (!capable(CAP_SYS_ADMIN)) 3285 return -EPERM; 3286 3287 if (msg_count < 5) { 3288 msg_count++; 3289 printk(KERN_INFO 3290 "warning: process `%s' used the obsolete bdflush" 3291 " system call\n", current->comm); 3292 printk(KERN_INFO "Fix your initscripts?\n"); 3293 } 3294 3295 if (func == 1) 3296 do_exit(0); 3297 return 0; 3298 } 3299 3300 /* 3301 * Buffer-head allocation 3302 */ 3303 static struct kmem_cache *bh_cachep __read_mostly; 3304 3305 /* 3306 * Once the number of bh's in the machine exceeds this level, we start 3307 * stripping them in writeback. 3308 */ 3309 static unsigned long max_buffer_heads; 3310 3311 int buffer_heads_over_limit; 3312 3313 struct bh_accounting { 3314 int nr; /* Number of live bh's */ 3315 int ratelimit; /* Limit cacheline bouncing */ 3316 }; 3317 3318 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3319 3320 static void recalc_bh_state(void) 3321 { 3322 int i; 3323 int tot = 0; 3324 3325 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3326 return; 3327 __this_cpu_write(bh_accounting.ratelimit, 0); 3328 for_each_online_cpu(i) 3329 tot += per_cpu(bh_accounting, i).nr; 3330 buffer_heads_over_limit = (tot > max_buffer_heads); 3331 } 3332 3333 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3334 { 3335 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3336 if (ret) { 3337 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3338 preempt_disable(); 3339 __this_cpu_inc(bh_accounting.nr); 3340 recalc_bh_state(); 3341 preempt_enable(); 3342 } 3343 return ret; 3344 } 3345 EXPORT_SYMBOL(alloc_buffer_head); 3346 3347 void free_buffer_head(struct buffer_head *bh) 3348 { 3349 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3350 kmem_cache_free(bh_cachep, bh); 3351 preempt_disable(); 3352 __this_cpu_dec(bh_accounting.nr); 3353 recalc_bh_state(); 3354 preempt_enable(); 3355 } 3356 EXPORT_SYMBOL(free_buffer_head); 3357 3358 static void buffer_exit_cpu(int cpu) 3359 { 3360 int i; 3361 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3362 3363 for (i = 0; i < BH_LRU_SIZE; i++) { 3364 brelse(b->bhs[i]); 3365 b->bhs[i] = NULL; 3366 } 3367 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3368 per_cpu(bh_accounting, cpu).nr = 0; 3369 } 3370 3371 static int buffer_cpu_notify(struct notifier_block *self, 3372 unsigned long action, void *hcpu) 3373 { 3374 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 3375 buffer_exit_cpu((unsigned long)hcpu); 3376 return NOTIFY_OK; 3377 } 3378 3379 /** 3380 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3381 * @bh: struct buffer_head 3382 * 3383 * Return true if the buffer is up-to-date and false, 3384 * with the buffer locked, if not. 3385 */ 3386 int bh_uptodate_or_lock(struct buffer_head *bh) 3387 { 3388 if (!buffer_uptodate(bh)) { 3389 lock_buffer(bh); 3390 if (!buffer_uptodate(bh)) 3391 return 0; 3392 unlock_buffer(bh); 3393 } 3394 return 1; 3395 } 3396 EXPORT_SYMBOL(bh_uptodate_or_lock); 3397 3398 /** 3399 * bh_submit_read - Submit a locked buffer for reading 3400 * @bh: struct buffer_head 3401 * 3402 * Returns zero on success and -EIO on error. 3403 */ 3404 int bh_submit_read(struct buffer_head *bh) 3405 { 3406 BUG_ON(!buffer_locked(bh)); 3407 3408 if (buffer_uptodate(bh)) { 3409 unlock_buffer(bh); 3410 return 0; 3411 } 3412 3413 get_bh(bh); 3414 bh->b_end_io = end_buffer_read_sync; 3415 submit_bh(READ, bh); 3416 wait_on_buffer(bh); 3417 if (buffer_uptodate(bh)) 3418 return 0; 3419 return -EIO; 3420 } 3421 EXPORT_SYMBOL(bh_submit_read); 3422 3423 void __init buffer_init(void) 3424 { 3425 unsigned long nrpages; 3426 3427 bh_cachep = kmem_cache_create("buffer_head", 3428 sizeof(struct buffer_head), 0, 3429 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3430 SLAB_MEM_SPREAD), 3431 NULL); 3432 3433 /* 3434 * Limit the bh occupancy to 10% of ZONE_NORMAL 3435 */ 3436 nrpages = (nr_free_buffer_pages() * 10) / 100; 3437 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3438 hotcpu_notifier(buffer_cpu_notify, 0); 3439 } 3440