1 /* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7 /* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 9 * 10 * Removed a lot of unnecessary code and simplified things now that 11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 12 * 13 * Speed up hash, lru, and free list operations. Use gfp() for allocating 14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 15 * 16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 17 * 18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/syscalls.h> 23 #include <linux/fs.h> 24 #include <linux/mm.h> 25 #include <linux/percpu.h> 26 #include <linux/slab.h> 27 #include <linux/capability.h> 28 #include <linux/blkdev.h> 29 #include <linux/file.h> 30 #include <linux/quotaops.h> 31 #include <linux/highmem.h> 32 #include <linux/module.h> 33 #include <linux/writeback.h> 34 #include <linux/hash.h> 35 #include <linux/suspend.h> 36 #include <linux/buffer_head.h> 37 #include <linux/task_io_accounting_ops.h> 38 #include <linux/bio.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/bitops.h> 42 #include <linux/mpage.h> 43 #include <linux/bit_spinlock.h> 44 45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 46 47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 48 49 inline void 50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 51 { 52 bh->b_end_io = handler; 53 bh->b_private = private; 54 } 55 56 static int sync_buffer(void *word) 57 { 58 struct block_device *bd; 59 struct buffer_head *bh 60 = container_of(word, struct buffer_head, b_state); 61 62 smp_mb(); 63 bd = bh->b_bdev; 64 if (bd) 65 blk_run_address_space(bd->bd_inode->i_mapping); 66 io_schedule(); 67 return 0; 68 } 69 70 void fastcall __lock_buffer(struct buffer_head *bh) 71 { 72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 73 TASK_UNINTERRUPTIBLE); 74 } 75 EXPORT_SYMBOL(__lock_buffer); 76 77 void fastcall unlock_buffer(struct buffer_head *bh) 78 { 79 smp_mb__before_clear_bit(); 80 clear_buffer_locked(bh); 81 smp_mb__after_clear_bit(); 82 wake_up_bit(&bh->b_state, BH_Lock); 83 } 84 85 /* 86 * Block until a buffer comes unlocked. This doesn't stop it 87 * from becoming locked again - you have to lock it yourself 88 * if you want to preserve its state. 89 */ 90 void __wait_on_buffer(struct buffer_head * bh) 91 { 92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 93 } 94 95 static void 96 __clear_page_buffers(struct page *page) 97 { 98 ClearPagePrivate(page); 99 set_page_private(page, 0); 100 page_cache_release(page); 101 } 102 103 static void buffer_io_error(struct buffer_head *bh) 104 { 105 char b[BDEVNAME_SIZE]; 106 107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 108 bdevname(bh->b_bdev, b), 109 (unsigned long long)bh->b_blocknr); 110 } 111 112 /* 113 * End-of-IO handler helper function which does not touch the bh after 114 * unlocking it. 115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 116 * a race there is benign: unlock_buffer() only use the bh's address for 117 * hashing after unlocking the buffer, so it doesn't actually touch the bh 118 * itself. 119 */ 120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 121 { 122 if (uptodate) { 123 set_buffer_uptodate(bh); 124 } else { 125 /* This happens, due to failed READA attempts. */ 126 clear_buffer_uptodate(bh); 127 } 128 unlock_buffer(bh); 129 } 130 131 /* 132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 133 * unlock the buffer. This is what ll_rw_block uses too. 134 */ 135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 136 { 137 __end_buffer_read_notouch(bh, uptodate); 138 put_bh(bh); 139 } 140 141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 142 { 143 char b[BDEVNAME_SIZE]; 144 145 if (uptodate) { 146 set_buffer_uptodate(bh); 147 } else { 148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 149 buffer_io_error(bh); 150 printk(KERN_WARNING "lost page write due to " 151 "I/O error on %s\n", 152 bdevname(bh->b_bdev, b)); 153 } 154 set_buffer_write_io_error(bh); 155 clear_buffer_uptodate(bh); 156 } 157 unlock_buffer(bh); 158 put_bh(bh); 159 } 160 161 /* 162 * Write out and wait upon all the dirty data associated with a block 163 * device via its mapping. Does not take the superblock lock. 164 */ 165 int sync_blockdev(struct block_device *bdev) 166 { 167 int ret = 0; 168 169 if (bdev) 170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); 171 return ret; 172 } 173 EXPORT_SYMBOL(sync_blockdev); 174 175 /* 176 * Write out and wait upon all dirty data associated with this 177 * device. Filesystem data as well as the underlying block 178 * device. Takes the superblock lock. 179 */ 180 int fsync_bdev(struct block_device *bdev) 181 { 182 struct super_block *sb = get_super(bdev); 183 if (sb) { 184 int res = fsync_super(sb); 185 drop_super(sb); 186 return res; 187 } 188 return sync_blockdev(bdev); 189 } 190 191 /** 192 * freeze_bdev -- lock a filesystem and force it into a consistent state 193 * @bdev: blockdevice to lock 194 * 195 * This takes the block device bd_mount_sem to make sure no new mounts 196 * happen on bdev until thaw_bdev() is called. 197 * If a superblock is found on this device, we take the s_umount semaphore 198 * on it to make sure nobody unmounts until the snapshot creation is done. 199 */ 200 struct super_block *freeze_bdev(struct block_device *bdev) 201 { 202 struct super_block *sb; 203 204 down(&bdev->bd_mount_sem); 205 sb = get_super(bdev); 206 if (sb && !(sb->s_flags & MS_RDONLY)) { 207 sb->s_frozen = SB_FREEZE_WRITE; 208 smp_wmb(); 209 210 __fsync_super(sb); 211 212 sb->s_frozen = SB_FREEZE_TRANS; 213 smp_wmb(); 214 215 sync_blockdev(sb->s_bdev); 216 217 if (sb->s_op->write_super_lockfs) 218 sb->s_op->write_super_lockfs(sb); 219 } 220 221 sync_blockdev(bdev); 222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ 223 } 224 EXPORT_SYMBOL(freeze_bdev); 225 226 /** 227 * thaw_bdev -- unlock filesystem 228 * @bdev: blockdevice to unlock 229 * @sb: associated superblock 230 * 231 * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 232 */ 233 void thaw_bdev(struct block_device *bdev, struct super_block *sb) 234 { 235 if (sb) { 236 BUG_ON(sb->s_bdev != bdev); 237 238 if (sb->s_op->unlockfs) 239 sb->s_op->unlockfs(sb); 240 sb->s_frozen = SB_UNFROZEN; 241 smp_wmb(); 242 wake_up(&sb->s_wait_unfrozen); 243 drop_super(sb); 244 } 245 246 up(&bdev->bd_mount_sem); 247 } 248 EXPORT_SYMBOL(thaw_bdev); 249 250 /* 251 * Various filesystems appear to want __find_get_block to be non-blocking. 252 * But it's the page lock which protects the buffers. To get around this, 253 * we get exclusion from try_to_free_buffers with the blockdev mapping's 254 * private_lock. 255 * 256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 257 * may be quite high. This code could TryLock the page, and if that 258 * succeeds, there is no need to take private_lock. (But if 259 * private_lock is contended then so is mapping->tree_lock). 260 */ 261 static struct buffer_head * 262 __find_get_block_slow(struct block_device *bdev, sector_t block) 263 { 264 struct inode *bd_inode = bdev->bd_inode; 265 struct address_space *bd_mapping = bd_inode->i_mapping; 266 struct buffer_head *ret = NULL; 267 pgoff_t index; 268 struct buffer_head *bh; 269 struct buffer_head *head; 270 struct page *page; 271 int all_mapped = 1; 272 273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 274 page = find_get_page(bd_mapping, index); 275 if (!page) 276 goto out; 277 278 spin_lock(&bd_mapping->private_lock); 279 if (!page_has_buffers(page)) 280 goto out_unlock; 281 head = page_buffers(page); 282 bh = head; 283 do { 284 if (bh->b_blocknr == block) { 285 ret = bh; 286 get_bh(bh); 287 goto out_unlock; 288 } 289 if (!buffer_mapped(bh)) 290 all_mapped = 0; 291 bh = bh->b_this_page; 292 } while (bh != head); 293 294 /* we might be here because some of the buffers on this page are 295 * not mapped. This is due to various races between 296 * file io on the block device and getblk. It gets dealt with 297 * elsewhere, don't buffer_error if we had some unmapped buffers 298 */ 299 if (all_mapped) { 300 printk("__find_get_block_slow() failed. " 301 "block=%llu, b_blocknr=%llu\n", 302 (unsigned long long)block, 303 (unsigned long long)bh->b_blocknr); 304 printk("b_state=0x%08lx, b_size=%zu\n", 305 bh->b_state, bh->b_size); 306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 307 } 308 out_unlock: 309 spin_unlock(&bd_mapping->private_lock); 310 page_cache_release(page); 311 out: 312 return ret; 313 } 314 315 /* If invalidate_buffers() will trash dirty buffers, it means some kind 316 of fs corruption is going on. Trashing dirty data always imply losing 317 information that was supposed to be just stored on the physical layer 318 by the user. 319 320 Thus invalidate_buffers in general usage is not allwowed to trash 321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to 322 be preserved. These buffers are simply skipped. 323 324 We also skip buffers which are still in use. For example this can 325 happen if a userspace program is reading the block device. 326 327 NOTE: In the case where the user removed a removable-media-disk even if 328 there's still dirty data not synced on disk (due a bug in the device driver 329 or due an error of the user), by not destroying the dirty buffers we could 330 generate corruption also on the next media inserted, thus a parameter is 331 necessary to handle this case in the most safe way possible (trying 332 to not corrupt also the new disk inserted with the data belonging to 333 the old now corrupted disk). Also for the ramdisk the natural thing 334 to do in order to release the ramdisk memory is to destroy dirty buffers. 335 336 These are two special cases. Normal usage imply the device driver 337 to issue a sync on the device (without waiting I/O completion) and 338 then an invalidate_buffers call that doesn't trash dirty buffers. 339 340 For handling cache coherency with the blkdev pagecache the 'update' case 341 is been introduced. It is needed to re-read from disk any pinned 342 buffer. NOTE: re-reading from disk is destructive so we can do it only 343 when we assume nobody is changing the buffercache under our I/O and when 344 we think the disk contains more recent information than the buffercache. 345 The update == 1 pass marks the buffers we need to update, the update == 2 346 pass does the actual I/O. */ 347 void invalidate_bdev(struct block_device *bdev) 348 { 349 struct address_space *mapping = bdev->bd_inode->i_mapping; 350 351 if (mapping->nrpages == 0) 352 return; 353 354 invalidate_bh_lrus(); 355 invalidate_mapping_pages(mapping, 0, -1); 356 } 357 358 /* 359 * Kick pdflush then try to free up some ZONE_NORMAL memory. 360 */ 361 static void free_more_memory(void) 362 { 363 struct zone **zones; 364 pg_data_t *pgdat; 365 366 wakeup_pdflush(1024); 367 yield(); 368 369 for_each_online_pgdat(pgdat) { 370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; 371 if (*zones) 372 try_to_free_pages(zones, 0, GFP_NOFS); 373 } 374 } 375 376 /* 377 * I/O completion handler for block_read_full_page() - pages 378 * which come unlocked at the end of I/O. 379 */ 380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 381 { 382 unsigned long flags; 383 struct buffer_head *first; 384 struct buffer_head *tmp; 385 struct page *page; 386 int page_uptodate = 1; 387 388 BUG_ON(!buffer_async_read(bh)); 389 390 page = bh->b_page; 391 if (uptodate) { 392 set_buffer_uptodate(bh); 393 } else { 394 clear_buffer_uptodate(bh); 395 if (printk_ratelimit()) 396 buffer_io_error(bh); 397 SetPageError(page); 398 } 399 400 /* 401 * Be _very_ careful from here on. Bad things can happen if 402 * two buffer heads end IO at almost the same time and both 403 * decide that the page is now completely done. 404 */ 405 first = page_buffers(page); 406 local_irq_save(flags); 407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 408 clear_buffer_async_read(bh); 409 unlock_buffer(bh); 410 tmp = bh; 411 do { 412 if (!buffer_uptodate(tmp)) 413 page_uptodate = 0; 414 if (buffer_async_read(tmp)) { 415 BUG_ON(!buffer_locked(tmp)); 416 goto still_busy; 417 } 418 tmp = tmp->b_this_page; 419 } while (tmp != bh); 420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 421 local_irq_restore(flags); 422 423 /* 424 * If none of the buffers had errors and they are all 425 * uptodate then we can set the page uptodate. 426 */ 427 if (page_uptodate && !PageError(page)) 428 SetPageUptodate(page); 429 unlock_page(page); 430 return; 431 432 still_busy: 433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 434 local_irq_restore(flags); 435 return; 436 } 437 438 /* 439 * Completion handler for block_write_full_page() - pages which are unlocked 440 * during I/O, and which have PageWriteback cleared upon I/O completion. 441 */ 442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 443 { 444 char b[BDEVNAME_SIZE]; 445 unsigned long flags; 446 struct buffer_head *first; 447 struct buffer_head *tmp; 448 struct page *page; 449 450 BUG_ON(!buffer_async_write(bh)); 451 452 page = bh->b_page; 453 if (uptodate) { 454 set_buffer_uptodate(bh); 455 } else { 456 if (printk_ratelimit()) { 457 buffer_io_error(bh); 458 printk(KERN_WARNING "lost page write due to " 459 "I/O error on %s\n", 460 bdevname(bh->b_bdev, b)); 461 } 462 set_bit(AS_EIO, &page->mapping->flags); 463 set_buffer_write_io_error(bh); 464 clear_buffer_uptodate(bh); 465 SetPageError(page); 466 } 467 468 first = page_buffers(page); 469 local_irq_save(flags); 470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 471 472 clear_buffer_async_write(bh); 473 unlock_buffer(bh); 474 tmp = bh->b_this_page; 475 while (tmp != bh) { 476 if (buffer_async_write(tmp)) { 477 BUG_ON(!buffer_locked(tmp)); 478 goto still_busy; 479 } 480 tmp = tmp->b_this_page; 481 } 482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 483 local_irq_restore(flags); 484 end_page_writeback(page); 485 return; 486 487 still_busy: 488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 489 local_irq_restore(flags); 490 return; 491 } 492 493 /* 494 * If a page's buffers are under async readin (end_buffer_async_read 495 * completion) then there is a possibility that another thread of 496 * control could lock one of the buffers after it has completed 497 * but while some of the other buffers have not completed. This 498 * locked buffer would confuse end_buffer_async_read() into not unlocking 499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 500 * that this buffer is not under async I/O. 501 * 502 * The page comes unlocked when it has no locked buffer_async buffers 503 * left. 504 * 505 * PageLocked prevents anyone starting new async I/O reads any of 506 * the buffers. 507 * 508 * PageWriteback is used to prevent simultaneous writeout of the same 509 * page. 510 * 511 * PageLocked prevents anyone from starting writeback of a page which is 512 * under read I/O (PageWriteback is only ever set against a locked page). 513 */ 514 static void mark_buffer_async_read(struct buffer_head *bh) 515 { 516 bh->b_end_io = end_buffer_async_read; 517 set_buffer_async_read(bh); 518 } 519 520 void mark_buffer_async_write(struct buffer_head *bh) 521 { 522 bh->b_end_io = end_buffer_async_write; 523 set_buffer_async_write(bh); 524 } 525 EXPORT_SYMBOL(mark_buffer_async_write); 526 527 528 /* 529 * fs/buffer.c contains helper functions for buffer-backed address space's 530 * fsync functions. A common requirement for buffer-based filesystems is 531 * that certain data from the backing blockdev needs to be written out for 532 * a successful fsync(). For example, ext2 indirect blocks need to be 533 * written back and waited upon before fsync() returns. 534 * 535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 537 * management of a list of dependent buffers at ->i_mapping->private_list. 538 * 539 * Locking is a little subtle: try_to_free_buffers() will remove buffers 540 * from their controlling inode's queue when they are being freed. But 541 * try_to_free_buffers() will be operating against the *blockdev* mapping 542 * at the time, not against the S_ISREG file which depends on those buffers. 543 * So the locking for private_list is via the private_lock in the address_space 544 * which backs the buffers. Which is different from the address_space 545 * against which the buffers are listed. So for a particular address_space, 546 * mapping->private_lock does *not* protect mapping->private_list! In fact, 547 * mapping->private_list will always be protected by the backing blockdev's 548 * ->private_lock. 549 * 550 * Which introduces a requirement: all buffers on an address_space's 551 * ->private_list must be from the same address_space: the blockdev's. 552 * 553 * address_spaces which do not place buffers at ->private_list via these 554 * utility functions are free to use private_lock and private_list for 555 * whatever they want. The only requirement is that list_empty(private_list) 556 * be true at clear_inode() time. 557 * 558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 559 * filesystems should do that. invalidate_inode_buffers() should just go 560 * BUG_ON(!list_empty). 561 * 562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 563 * take an address_space, not an inode. And it should be called 564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 565 * queued up. 566 * 567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 568 * list if it is already on a list. Because if the buffer is on a list, 569 * it *must* already be on the right one. If not, the filesystem is being 570 * silly. This will save a ton of locking. But first we have to ensure 571 * that buffers are taken *off* the old inode's list when they are freed 572 * (presumably in truncate). That requires careful auditing of all 573 * filesystems (do it inside bforget()). It could also be done by bringing 574 * b_inode back. 575 */ 576 577 /* 578 * The buffer's backing address_space's private_lock must be held 579 */ 580 static inline void __remove_assoc_queue(struct buffer_head *bh) 581 { 582 list_del_init(&bh->b_assoc_buffers); 583 WARN_ON(!bh->b_assoc_map); 584 if (buffer_write_io_error(bh)) 585 set_bit(AS_EIO, &bh->b_assoc_map->flags); 586 bh->b_assoc_map = NULL; 587 } 588 589 int inode_has_buffers(struct inode *inode) 590 { 591 return !list_empty(&inode->i_data.private_list); 592 } 593 594 /* 595 * osync is designed to support O_SYNC io. It waits synchronously for 596 * all already-submitted IO to complete, but does not queue any new 597 * writes to the disk. 598 * 599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 600 * you dirty the buffers, and then use osync_inode_buffers to wait for 601 * completion. Any other dirty buffers which are not yet queued for 602 * write will not be flushed to disk by the osync. 603 */ 604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 605 { 606 struct buffer_head *bh; 607 struct list_head *p; 608 int err = 0; 609 610 spin_lock(lock); 611 repeat: 612 list_for_each_prev(p, list) { 613 bh = BH_ENTRY(p); 614 if (buffer_locked(bh)) { 615 get_bh(bh); 616 spin_unlock(lock); 617 wait_on_buffer(bh); 618 if (!buffer_uptodate(bh)) 619 err = -EIO; 620 brelse(bh); 621 spin_lock(lock); 622 goto repeat; 623 } 624 } 625 spin_unlock(lock); 626 return err; 627 } 628 629 /** 630 * sync_mapping_buffers - write out and wait upon a mapping's "associated" 631 * buffers 632 * @mapping: the mapping which wants those buffers written 633 * 634 * Starts I/O against the buffers at mapping->private_list, and waits upon 635 * that I/O. 636 * 637 * Basically, this is a convenience function for fsync(). 638 * @mapping is a file or directory which needs those buffers to be written for 639 * a successful fsync(). 640 */ 641 int sync_mapping_buffers(struct address_space *mapping) 642 { 643 struct address_space *buffer_mapping = mapping->assoc_mapping; 644 645 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 646 return 0; 647 648 return fsync_buffers_list(&buffer_mapping->private_lock, 649 &mapping->private_list); 650 } 651 EXPORT_SYMBOL(sync_mapping_buffers); 652 653 /* 654 * Called when we've recently written block `bblock', and it is known that 655 * `bblock' was for a buffer_boundary() buffer. This means that the block at 656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 657 * dirty, schedule it for IO. So that indirects merge nicely with their data. 658 */ 659 void write_boundary_block(struct block_device *bdev, 660 sector_t bblock, unsigned blocksize) 661 { 662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 663 if (bh) { 664 if (buffer_dirty(bh)) 665 ll_rw_block(WRITE, 1, &bh); 666 put_bh(bh); 667 } 668 } 669 670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 671 { 672 struct address_space *mapping = inode->i_mapping; 673 struct address_space *buffer_mapping = bh->b_page->mapping; 674 675 mark_buffer_dirty(bh); 676 if (!mapping->assoc_mapping) { 677 mapping->assoc_mapping = buffer_mapping; 678 } else { 679 BUG_ON(mapping->assoc_mapping != buffer_mapping); 680 } 681 if (list_empty(&bh->b_assoc_buffers)) { 682 spin_lock(&buffer_mapping->private_lock); 683 list_move_tail(&bh->b_assoc_buffers, 684 &mapping->private_list); 685 bh->b_assoc_map = mapping; 686 spin_unlock(&buffer_mapping->private_lock); 687 } 688 } 689 EXPORT_SYMBOL(mark_buffer_dirty_inode); 690 691 /* 692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode 693 * dirty. 694 * 695 * If warn is true, then emit a warning if the page is not uptodate and has 696 * not been truncated. 697 */ 698 static int __set_page_dirty(struct page *page, 699 struct address_space *mapping, int warn) 700 { 701 if (unlikely(!mapping)) 702 return !TestSetPageDirty(page); 703 704 if (TestSetPageDirty(page)) 705 return 0; 706 707 write_lock_irq(&mapping->tree_lock); 708 if (page->mapping) { /* Race with truncate? */ 709 WARN_ON_ONCE(warn && !PageUptodate(page)); 710 711 if (mapping_cap_account_dirty(mapping)) { 712 __inc_zone_page_state(page, NR_FILE_DIRTY); 713 task_io_account_write(PAGE_CACHE_SIZE); 714 } 715 radix_tree_tag_set(&mapping->page_tree, 716 page_index(page), PAGECACHE_TAG_DIRTY); 717 } 718 write_unlock_irq(&mapping->tree_lock); 719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 720 721 return 1; 722 } 723 724 /* 725 * Add a page to the dirty page list. 726 * 727 * It is a sad fact of life that this function is called from several places 728 * deeply under spinlocking. It may not sleep. 729 * 730 * If the page has buffers, the uptodate buffers are set dirty, to preserve 731 * dirty-state coherency between the page and the buffers. It the page does 732 * not have buffers then when they are later attached they will all be set 733 * dirty. 734 * 735 * The buffers are dirtied before the page is dirtied. There's a small race 736 * window in which a writepage caller may see the page cleanness but not the 737 * buffer dirtiness. That's fine. If this code were to set the page dirty 738 * before the buffers, a concurrent writepage caller could clear the page dirty 739 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 740 * page on the dirty page list. 741 * 742 * We use private_lock to lock against try_to_free_buffers while using the 743 * page's buffer list. Also use this to protect against clean buffers being 744 * added to the page after it was set dirty. 745 * 746 * FIXME: may need to call ->reservepage here as well. That's rather up to the 747 * address_space though. 748 */ 749 int __set_page_dirty_buffers(struct page *page) 750 { 751 struct address_space *mapping = page_mapping(page); 752 753 if (unlikely(!mapping)) 754 return !TestSetPageDirty(page); 755 756 spin_lock(&mapping->private_lock); 757 if (page_has_buffers(page)) { 758 struct buffer_head *head = page_buffers(page); 759 struct buffer_head *bh = head; 760 761 do { 762 set_buffer_dirty(bh); 763 bh = bh->b_this_page; 764 } while (bh != head); 765 } 766 spin_unlock(&mapping->private_lock); 767 768 return __set_page_dirty(page, mapping, 1); 769 } 770 EXPORT_SYMBOL(__set_page_dirty_buffers); 771 772 /* 773 * Write out and wait upon a list of buffers. 774 * 775 * We have conflicting pressures: we want to make sure that all 776 * initially dirty buffers get waited on, but that any subsequently 777 * dirtied buffers don't. After all, we don't want fsync to last 778 * forever if somebody is actively writing to the file. 779 * 780 * Do this in two main stages: first we copy dirty buffers to a 781 * temporary inode list, queueing the writes as we go. Then we clean 782 * up, waiting for those writes to complete. 783 * 784 * During this second stage, any subsequent updates to the file may end 785 * up refiling the buffer on the original inode's dirty list again, so 786 * there is a chance we will end up with a buffer queued for write but 787 * not yet completed on that list. So, as a final cleanup we go through 788 * the osync code to catch these locked, dirty buffers without requeuing 789 * any newly dirty buffers for write. 790 */ 791 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 792 { 793 struct buffer_head *bh; 794 struct list_head tmp; 795 int err = 0, err2; 796 797 INIT_LIST_HEAD(&tmp); 798 799 spin_lock(lock); 800 while (!list_empty(list)) { 801 bh = BH_ENTRY(list->next); 802 __remove_assoc_queue(bh); 803 if (buffer_dirty(bh) || buffer_locked(bh)) { 804 list_add(&bh->b_assoc_buffers, &tmp); 805 if (buffer_dirty(bh)) { 806 get_bh(bh); 807 spin_unlock(lock); 808 /* 809 * Ensure any pending I/O completes so that 810 * ll_rw_block() actually writes the current 811 * contents - it is a noop if I/O is still in 812 * flight on potentially older contents. 813 */ 814 ll_rw_block(SWRITE, 1, &bh); 815 brelse(bh); 816 spin_lock(lock); 817 } 818 } 819 } 820 821 while (!list_empty(&tmp)) { 822 bh = BH_ENTRY(tmp.prev); 823 list_del_init(&bh->b_assoc_buffers); 824 get_bh(bh); 825 spin_unlock(lock); 826 wait_on_buffer(bh); 827 if (!buffer_uptodate(bh)) 828 err = -EIO; 829 brelse(bh); 830 spin_lock(lock); 831 } 832 833 spin_unlock(lock); 834 err2 = osync_buffers_list(lock, list); 835 if (err) 836 return err; 837 else 838 return err2; 839 } 840 841 /* 842 * Invalidate any and all dirty buffers on a given inode. We are 843 * probably unmounting the fs, but that doesn't mean we have already 844 * done a sync(). Just drop the buffers from the inode list. 845 * 846 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 847 * assumes that all the buffers are against the blockdev. Not true 848 * for reiserfs. 849 */ 850 void invalidate_inode_buffers(struct inode *inode) 851 { 852 if (inode_has_buffers(inode)) { 853 struct address_space *mapping = &inode->i_data; 854 struct list_head *list = &mapping->private_list; 855 struct address_space *buffer_mapping = mapping->assoc_mapping; 856 857 spin_lock(&buffer_mapping->private_lock); 858 while (!list_empty(list)) 859 __remove_assoc_queue(BH_ENTRY(list->next)); 860 spin_unlock(&buffer_mapping->private_lock); 861 } 862 } 863 864 /* 865 * Remove any clean buffers from the inode's buffer list. This is called 866 * when we're trying to free the inode itself. Those buffers can pin it. 867 * 868 * Returns true if all buffers were removed. 869 */ 870 int remove_inode_buffers(struct inode *inode) 871 { 872 int ret = 1; 873 874 if (inode_has_buffers(inode)) { 875 struct address_space *mapping = &inode->i_data; 876 struct list_head *list = &mapping->private_list; 877 struct address_space *buffer_mapping = mapping->assoc_mapping; 878 879 spin_lock(&buffer_mapping->private_lock); 880 while (!list_empty(list)) { 881 struct buffer_head *bh = BH_ENTRY(list->next); 882 if (buffer_dirty(bh)) { 883 ret = 0; 884 break; 885 } 886 __remove_assoc_queue(bh); 887 } 888 spin_unlock(&buffer_mapping->private_lock); 889 } 890 return ret; 891 } 892 893 /* 894 * Create the appropriate buffers when given a page for data area and 895 * the size of each buffer.. Use the bh->b_this_page linked list to 896 * follow the buffers created. Return NULL if unable to create more 897 * buffers. 898 * 899 * The retry flag is used to differentiate async IO (paging, swapping) 900 * which may not fail from ordinary buffer allocations. 901 */ 902 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 903 int retry) 904 { 905 struct buffer_head *bh, *head; 906 long offset; 907 908 try_again: 909 head = NULL; 910 offset = PAGE_SIZE; 911 while ((offset -= size) >= 0) { 912 bh = alloc_buffer_head(GFP_NOFS); 913 if (!bh) 914 goto no_grow; 915 916 bh->b_bdev = NULL; 917 bh->b_this_page = head; 918 bh->b_blocknr = -1; 919 head = bh; 920 921 bh->b_state = 0; 922 atomic_set(&bh->b_count, 0); 923 bh->b_private = NULL; 924 bh->b_size = size; 925 926 /* Link the buffer to its page */ 927 set_bh_page(bh, page, offset); 928 929 init_buffer(bh, NULL, NULL); 930 } 931 return head; 932 /* 933 * In case anything failed, we just free everything we got. 934 */ 935 no_grow: 936 if (head) { 937 do { 938 bh = head; 939 head = head->b_this_page; 940 free_buffer_head(bh); 941 } while (head); 942 } 943 944 /* 945 * Return failure for non-async IO requests. Async IO requests 946 * are not allowed to fail, so we have to wait until buffer heads 947 * become available. But we don't want tasks sleeping with 948 * partially complete buffers, so all were released above. 949 */ 950 if (!retry) 951 return NULL; 952 953 /* We're _really_ low on memory. Now we just 954 * wait for old buffer heads to become free due to 955 * finishing IO. Since this is an async request and 956 * the reserve list is empty, we're sure there are 957 * async buffer heads in use. 958 */ 959 free_more_memory(); 960 goto try_again; 961 } 962 EXPORT_SYMBOL_GPL(alloc_page_buffers); 963 964 static inline void 965 link_dev_buffers(struct page *page, struct buffer_head *head) 966 { 967 struct buffer_head *bh, *tail; 968 969 bh = head; 970 do { 971 tail = bh; 972 bh = bh->b_this_page; 973 } while (bh); 974 tail->b_this_page = head; 975 attach_page_buffers(page, head); 976 } 977 978 /* 979 * Initialise the state of a blockdev page's buffers. 980 */ 981 static void 982 init_page_buffers(struct page *page, struct block_device *bdev, 983 sector_t block, int size) 984 { 985 struct buffer_head *head = page_buffers(page); 986 struct buffer_head *bh = head; 987 int uptodate = PageUptodate(page); 988 989 do { 990 if (!buffer_mapped(bh)) { 991 init_buffer(bh, NULL, NULL); 992 bh->b_bdev = bdev; 993 bh->b_blocknr = block; 994 if (uptodate) 995 set_buffer_uptodate(bh); 996 set_buffer_mapped(bh); 997 } 998 block++; 999 bh = bh->b_this_page; 1000 } while (bh != head); 1001 } 1002 1003 /* 1004 * Create the page-cache page that contains the requested block. 1005 * 1006 * This is user purely for blockdev mappings. 1007 */ 1008 static struct page * 1009 grow_dev_page(struct block_device *bdev, sector_t block, 1010 pgoff_t index, int size) 1011 { 1012 struct inode *inode = bdev->bd_inode; 1013 struct page *page; 1014 struct buffer_head *bh; 1015 1016 page = find_or_create_page(inode->i_mapping, index, 1017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 1018 if (!page) 1019 return NULL; 1020 1021 BUG_ON(!PageLocked(page)); 1022 1023 if (page_has_buffers(page)) { 1024 bh = page_buffers(page); 1025 if (bh->b_size == size) { 1026 init_page_buffers(page, bdev, block, size); 1027 return page; 1028 } 1029 if (!try_to_free_buffers(page)) 1030 goto failed; 1031 } 1032 1033 /* 1034 * Allocate some buffers for this page 1035 */ 1036 bh = alloc_page_buffers(page, size, 0); 1037 if (!bh) 1038 goto failed; 1039 1040 /* 1041 * Link the page to the buffers and initialise them. Take the 1042 * lock to be atomic wrt __find_get_block(), which does not 1043 * run under the page lock. 1044 */ 1045 spin_lock(&inode->i_mapping->private_lock); 1046 link_dev_buffers(page, bh); 1047 init_page_buffers(page, bdev, block, size); 1048 spin_unlock(&inode->i_mapping->private_lock); 1049 return page; 1050 1051 failed: 1052 BUG(); 1053 unlock_page(page); 1054 page_cache_release(page); 1055 return NULL; 1056 } 1057 1058 /* 1059 * Create buffers for the specified block device block's page. If 1060 * that page was dirty, the buffers are set dirty also. 1061 */ 1062 static int 1063 grow_buffers(struct block_device *bdev, sector_t block, int size) 1064 { 1065 struct page *page; 1066 pgoff_t index; 1067 int sizebits; 1068 1069 sizebits = -1; 1070 do { 1071 sizebits++; 1072 } while ((size << sizebits) < PAGE_SIZE); 1073 1074 index = block >> sizebits; 1075 1076 /* 1077 * Check for a block which wants to lie outside our maximum possible 1078 * pagecache index. (this comparison is done using sector_t types). 1079 */ 1080 if (unlikely(index != block >> sizebits)) { 1081 char b[BDEVNAME_SIZE]; 1082 1083 printk(KERN_ERR "%s: requested out-of-range block %llu for " 1084 "device %s\n", 1085 __FUNCTION__, (unsigned long long)block, 1086 bdevname(bdev, b)); 1087 return -EIO; 1088 } 1089 block = index << sizebits; 1090 /* Create a page with the proper size buffers.. */ 1091 page = grow_dev_page(bdev, block, index, size); 1092 if (!page) 1093 return 0; 1094 unlock_page(page); 1095 page_cache_release(page); 1096 return 1; 1097 } 1098 1099 static struct buffer_head * 1100 __getblk_slow(struct block_device *bdev, sector_t block, int size) 1101 { 1102 /* Size must be multiple of hard sectorsize */ 1103 if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1104 (size < 512 || size > PAGE_SIZE))) { 1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1106 size); 1107 printk(KERN_ERR "hardsect size: %d\n", 1108 bdev_hardsect_size(bdev)); 1109 1110 dump_stack(); 1111 return NULL; 1112 } 1113 1114 for (;;) { 1115 struct buffer_head * bh; 1116 int ret; 1117 1118 bh = __find_get_block(bdev, block, size); 1119 if (bh) 1120 return bh; 1121 1122 ret = grow_buffers(bdev, block, size); 1123 if (ret < 0) 1124 return NULL; 1125 if (ret == 0) 1126 free_more_memory(); 1127 } 1128 } 1129 1130 /* 1131 * The relationship between dirty buffers and dirty pages: 1132 * 1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1134 * the page is tagged dirty in its radix tree. 1135 * 1136 * At all times, the dirtiness of the buffers represents the dirtiness of 1137 * subsections of the page. If the page has buffers, the page dirty bit is 1138 * merely a hint about the true dirty state. 1139 * 1140 * When a page is set dirty in its entirety, all its buffers are marked dirty 1141 * (if the page has buffers). 1142 * 1143 * When a buffer is marked dirty, its page is dirtied, but the page's other 1144 * buffers are not. 1145 * 1146 * Also. When blockdev buffers are explicitly read with bread(), they 1147 * individually become uptodate. But their backing page remains not 1148 * uptodate - even if all of its buffers are uptodate. A subsequent 1149 * block_read_full_page() against that page will discover all the uptodate 1150 * buffers, will set the page uptodate and will perform no I/O. 1151 */ 1152 1153 /** 1154 * mark_buffer_dirty - mark a buffer_head as needing writeout 1155 * @bh: the buffer_head to mark dirty 1156 * 1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 1158 * backing page dirty, then tag the page as dirty in its address_space's radix 1159 * tree and then attach the address_space's inode to its superblock's dirty 1160 * inode list. 1161 * 1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1163 * mapping->tree_lock and the global inode_lock. 1164 */ 1165 void fastcall mark_buffer_dirty(struct buffer_head *bh) 1166 { 1167 WARN_ON_ONCE(!buffer_uptodate(bh)); 1168 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1169 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); 1170 } 1171 1172 /* 1173 * Decrement a buffer_head's reference count. If all buffers against a page 1174 * have zero reference count, are clean and unlocked, and if the page is clean 1175 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1176 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1177 * a page but it ends up not being freed, and buffers may later be reattached). 1178 */ 1179 void __brelse(struct buffer_head * buf) 1180 { 1181 if (atomic_read(&buf->b_count)) { 1182 put_bh(buf); 1183 return; 1184 } 1185 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1186 WARN_ON(1); 1187 } 1188 1189 /* 1190 * bforget() is like brelse(), except it discards any 1191 * potentially dirty data. 1192 */ 1193 void __bforget(struct buffer_head *bh) 1194 { 1195 clear_buffer_dirty(bh); 1196 if (!list_empty(&bh->b_assoc_buffers)) { 1197 struct address_space *buffer_mapping = bh->b_page->mapping; 1198 1199 spin_lock(&buffer_mapping->private_lock); 1200 list_del_init(&bh->b_assoc_buffers); 1201 bh->b_assoc_map = NULL; 1202 spin_unlock(&buffer_mapping->private_lock); 1203 } 1204 __brelse(bh); 1205 } 1206 1207 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1208 { 1209 lock_buffer(bh); 1210 if (buffer_uptodate(bh)) { 1211 unlock_buffer(bh); 1212 return bh; 1213 } else { 1214 get_bh(bh); 1215 bh->b_end_io = end_buffer_read_sync; 1216 submit_bh(READ, bh); 1217 wait_on_buffer(bh); 1218 if (buffer_uptodate(bh)) 1219 return bh; 1220 } 1221 brelse(bh); 1222 return NULL; 1223 } 1224 1225 /* 1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1228 * refcount elevated by one when they're in an LRU. A buffer can only appear 1229 * once in a particular CPU's LRU. A single buffer can be present in multiple 1230 * CPU's LRUs at the same time. 1231 * 1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1233 * sb_find_get_block(). 1234 * 1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1236 * a local interrupt disable for that. 1237 */ 1238 1239 #define BH_LRU_SIZE 8 1240 1241 struct bh_lru { 1242 struct buffer_head *bhs[BH_LRU_SIZE]; 1243 }; 1244 1245 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1246 1247 #ifdef CONFIG_SMP 1248 #define bh_lru_lock() local_irq_disable() 1249 #define bh_lru_unlock() local_irq_enable() 1250 #else 1251 #define bh_lru_lock() preempt_disable() 1252 #define bh_lru_unlock() preempt_enable() 1253 #endif 1254 1255 static inline void check_irqs_on(void) 1256 { 1257 #ifdef irqs_disabled 1258 BUG_ON(irqs_disabled()); 1259 #endif 1260 } 1261 1262 /* 1263 * The LRU management algorithm is dopey-but-simple. Sorry. 1264 */ 1265 static void bh_lru_install(struct buffer_head *bh) 1266 { 1267 struct buffer_head *evictee = NULL; 1268 struct bh_lru *lru; 1269 1270 check_irqs_on(); 1271 bh_lru_lock(); 1272 lru = &__get_cpu_var(bh_lrus); 1273 if (lru->bhs[0] != bh) { 1274 struct buffer_head *bhs[BH_LRU_SIZE]; 1275 int in; 1276 int out = 0; 1277 1278 get_bh(bh); 1279 bhs[out++] = bh; 1280 for (in = 0; in < BH_LRU_SIZE; in++) { 1281 struct buffer_head *bh2 = lru->bhs[in]; 1282 1283 if (bh2 == bh) { 1284 __brelse(bh2); 1285 } else { 1286 if (out >= BH_LRU_SIZE) { 1287 BUG_ON(evictee != NULL); 1288 evictee = bh2; 1289 } else { 1290 bhs[out++] = bh2; 1291 } 1292 } 1293 } 1294 while (out < BH_LRU_SIZE) 1295 bhs[out++] = NULL; 1296 memcpy(lru->bhs, bhs, sizeof(bhs)); 1297 } 1298 bh_lru_unlock(); 1299 1300 if (evictee) 1301 __brelse(evictee); 1302 } 1303 1304 /* 1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1306 */ 1307 static struct buffer_head * 1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1309 { 1310 struct buffer_head *ret = NULL; 1311 struct bh_lru *lru; 1312 unsigned int i; 1313 1314 check_irqs_on(); 1315 bh_lru_lock(); 1316 lru = &__get_cpu_var(bh_lrus); 1317 for (i = 0; i < BH_LRU_SIZE; i++) { 1318 struct buffer_head *bh = lru->bhs[i]; 1319 1320 if (bh && bh->b_bdev == bdev && 1321 bh->b_blocknr == block && bh->b_size == size) { 1322 if (i) { 1323 while (i) { 1324 lru->bhs[i] = lru->bhs[i - 1]; 1325 i--; 1326 } 1327 lru->bhs[0] = bh; 1328 } 1329 get_bh(bh); 1330 ret = bh; 1331 break; 1332 } 1333 } 1334 bh_lru_unlock(); 1335 return ret; 1336 } 1337 1338 /* 1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1340 * it in the LRU and mark it as accessed. If it is not present then return 1341 * NULL 1342 */ 1343 struct buffer_head * 1344 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1345 { 1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1347 1348 if (bh == NULL) { 1349 bh = __find_get_block_slow(bdev, block); 1350 if (bh) 1351 bh_lru_install(bh); 1352 } 1353 if (bh) 1354 touch_buffer(bh); 1355 return bh; 1356 } 1357 EXPORT_SYMBOL(__find_get_block); 1358 1359 /* 1360 * __getblk will locate (and, if necessary, create) the buffer_head 1361 * which corresponds to the passed block_device, block and size. The 1362 * returned buffer has its reference count incremented. 1363 * 1364 * __getblk() cannot fail - it just keeps trying. If you pass it an 1365 * illegal block number, __getblk() will happily return a buffer_head 1366 * which represents the non-existent block. Very weird. 1367 * 1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1369 * attempt is failing. FIXME, perhaps? 1370 */ 1371 struct buffer_head * 1372 __getblk(struct block_device *bdev, sector_t block, unsigned size) 1373 { 1374 struct buffer_head *bh = __find_get_block(bdev, block, size); 1375 1376 might_sleep(); 1377 if (bh == NULL) 1378 bh = __getblk_slow(bdev, block, size); 1379 return bh; 1380 } 1381 EXPORT_SYMBOL(__getblk); 1382 1383 /* 1384 * Do async read-ahead on a buffer.. 1385 */ 1386 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1387 { 1388 struct buffer_head *bh = __getblk(bdev, block, size); 1389 if (likely(bh)) { 1390 ll_rw_block(READA, 1, &bh); 1391 brelse(bh); 1392 } 1393 } 1394 EXPORT_SYMBOL(__breadahead); 1395 1396 /** 1397 * __bread() - reads a specified block and returns the bh 1398 * @bdev: the block_device to read from 1399 * @block: number of block 1400 * @size: size (in bytes) to read 1401 * 1402 * Reads a specified block, and returns buffer head that contains it. 1403 * It returns NULL if the block was unreadable. 1404 */ 1405 struct buffer_head * 1406 __bread(struct block_device *bdev, sector_t block, unsigned size) 1407 { 1408 struct buffer_head *bh = __getblk(bdev, block, size); 1409 1410 if (likely(bh) && !buffer_uptodate(bh)) 1411 bh = __bread_slow(bh); 1412 return bh; 1413 } 1414 EXPORT_SYMBOL(__bread); 1415 1416 /* 1417 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1418 * This doesn't race because it runs in each cpu either in irq 1419 * or with preempt disabled. 1420 */ 1421 static void invalidate_bh_lru(void *arg) 1422 { 1423 struct bh_lru *b = &get_cpu_var(bh_lrus); 1424 int i; 1425 1426 for (i = 0; i < BH_LRU_SIZE; i++) { 1427 brelse(b->bhs[i]); 1428 b->bhs[i] = NULL; 1429 } 1430 put_cpu_var(bh_lrus); 1431 } 1432 1433 void invalidate_bh_lrus(void) 1434 { 1435 on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 1436 } 1437 1438 void set_bh_page(struct buffer_head *bh, 1439 struct page *page, unsigned long offset) 1440 { 1441 bh->b_page = page; 1442 BUG_ON(offset >= PAGE_SIZE); 1443 if (PageHighMem(page)) 1444 /* 1445 * This catches illegal uses and preserves the offset: 1446 */ 1447 bh->b_data = (char *)(0 + offset); 1448 else 1449 bh->b_data = page_address(page) + offset; 1450 } 1451 EXPORT_SYMBOL(set_bh_page); 1452 1453 /* 1454 * Called when truncating a buffer on a page completely. 1455 */ 1456 static void discard_buffer(struct buffer_head * bh) 1457 { 1458 lock_buffer(bh); 1459 clear_buffer_dirty(bh); 1460 bh->b_bdev = NULL; 1461 clear_buffer_mapped(bh); 1462 clear_buffer_req(bh); 1463 clear_buffer_new(bh); 1464 clear_buffer_delay(bh); 1465 clear_buffer_unwritten(bh); 1466 unlock_buffer(bh); 1467 } 1468 1469 /** 1470 * block_invalidatepage - invalidate part of all of a buffer-backed page 1471 * 1472 * @page: the page which is affected 1473 * @offset: the index of the truncation point 1474 * 1475 * block_invalidatepage() is called when all or part of the page has become 1476 * invalidatedby a truncate operation. 1477 * 1478 * block_invalidatepage() does not have to release all buffers, but it must 1479 * ensure that no dirty buffer is left outside @offset and that no I/O 1480 * is underway against any of the blocks which are outside the truncation 1481 * point. Because the caller is about to free (and possibly reuse) those 1482 * blocks on-disk. 1483 */ 1484 void block_invalidatepage(struct page *page, unsigned long offset) 1485 { 1486 struct buffer_head *head, *bh, *next; 1487 unsigned int curr_off = 0; 1488 1489 BUG_ON(!PageLocked(page)); 1490 if (!page_has_buffers(page)) 1491 goto out; 1492 1493 head = page_buffers(page); 1494 bh = head; 1495 do { 1496 unsigned int next_off = curr_off + bh->b_size; 1497 next = bh->b_this_page; 1498 1499 /* 1500 * is this block fully invalidated? 1501 */ 1502 if (offset <= curr_off) 1503 discard_buffer(bh); 1504 curr_off = next_off; 1505 bh = next; 1506 } while (bh != head); 1507 1508 /* 1509 * We release buffers only if the entire page is being invalidated. 1510 * The get_block cached value has been unconditionally invalidated, 1511 * so real IO is not possible anymore. 1512 */ 1513 if (offset == 0) 1514 try_to_release_page(page, 0); 1515 out: 1516 return; 1517 } 1518 EXPORT_SYMBOL(block_invalidatepage); 1519 1520 /* 1521 * We attach and possibly dirty the buffers atomically wrt 1522 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1523 * is already excluded via the page lock. 1524 */ 1525 void create_empty_buffers(struct page *page, 1526 unsigned long blocksize, unsigned long b_state) 1527 { 1528 struct buffer_head *bh, *head, *tail; 1529 1530 head = alloc_page_buffers(page, blocksize, 1); 1531 bh = head; 1532 do { 1533 bh->b_state |= b_state; 1534 tail = bh; 1535 bh = bh->b_this_page; 1536 } while (bh); 1537 tail->b_this_page = head; 1538 1539 spin_lock(&page->mapping->private_lock); 1540 if (PageUptodate(page) || PageDirty(page)) { 1541 bh = head; 1542 do { 1543 if (PageDirty(page)) 1544 set_buffer_dirty(bh); 1545 if (PageUptodate(page)) 1546 set_buffer_uptodate(bh); 1547 bh = bh->b_this_page; 1548 } while (bh != head); 1549 } 1550 attach_page_buffers(page, head); 1551 spin_unlock(&page->mapping->private_lock); 1552 } 1553 EXPORT_SYMBOL(create_empty_buffers); 1554 1555 /* 1556 * We are taking a block for data and we don't want any output from any 1557 * buffer-cache aliases starting from return from that function and 1558 * until the moment when something will explicitly mark the buffer 1559 * dirty (hopefully that will not happen until we will free that block ;-) 1560 * We don't even need to mark it not-uptodate - nobody can expect 1561 * anything from a newly allocated buffer anyway. We used to used 1562 * unmap_buffer() for such invalidation, but that was wrong. We definitely 1563 * don't want to mark the alias unmapped, for example - it would confuse 1564 * anyone who might pick it with bread() afterwards... 1565 * 1566 * Also.. Note that bforget() doesn't lock the buffer. So there can 1567 * be writeout I/O going on against recently-freed buffers. We don't 1568 * wait on that I/O in bforget() - it's more efficient to wait on the I/O 1569 * only if we really need to. That happens here. 1570 */ 1571 void unmap_underlying_metadata(struct block_device *bdev, sector_t block) 1572 { 1573 struct buffer_head *old_bh; 1574 1575 might_sleep(); 1576 1577 old_bh = __find_get_block_slow(bdev, block); 1578 if (old_bh) { 1579 clear_buffer_dirty(old_bh); 1580 wait_on_buffer(old_bh); 1581 clear_buffer_req(old_bh); 1582 __brelse(old_bh); 1583 } 1584 } 1585 EXPORT_SYMBOL(unmap_underlying_metadata); 1586 1587 /* 1588 * NOTE! All mapped/uptodate combinations are valid: 1589 * 1590 * Mapped Uptodate Meaning 1591 * 1592 * No No "unknown" - must do get_block() 1593 * No Yes "hole" - zero-filled 1594 * Yes No "allocated" - allocated on disk, not read in 1595 * Yes Yes "valid" - allocated and up-to-date in memory. 1596 * 1597 * "Dirty" is valid only with the last case (mapped+uptodate). 1598 */ 1599 1600 /* 1601 * While block_write_full_page is writing back the dirty buffers under 1602 * the page lock, whoever dirtied the buffers may decide to clean them 1603 * again at any time. We handle that by only looking at the buffer 1604 * state inside lock_buffer(). 1605 * 1606 * If block_write_full_page() is called for regular writeback 1607 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1608 * locked buffer. This only can happen if someone has written the buffer 1609 * directly, with submit_bh(). At the address_space level PageWriteback 1610 * prevents this contention from occurring. 1611 */ 1612 static int __block_write_full_page(struct inode *inode, struct page *page, 1613 get_block_t *get_block, struct writeback_control *wbc) 1614 { 1615 int err; 1616 sector_t block; 1617 sector_t last_block; 1618 struct buffer_head *bh, *head; 1619 const unsigned blocksize = 1 << inode->i_blkbits; 1620 int nr_underway = 0; 1621 1622 BUG_ON(!PageLocked(page)); 1623 1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 1625 1626 if (!page_has_buffers(page)) { 1627 create_empty_buffers(page, blocksize, 1628 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1629 } 1630 1631 /* 1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1633 * here, and the (potentially unmapped) buffers may become dirty at 1634 * any time. If a buffer becomes dirty here after we've inspected it 1635 * then we just miss that fact, and the page stays dirty. 1636 * 1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 1638 * handle that here by just cleaning them. 1639 */ 1640 1641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1642 head = page_buffers(page); 1643 bh = head; 1644 1645 /* 1646 * Get all the dirty buffers mapped to disk addresses and 1647 * handle any aliases from the underlying blockdev's mapping. 1648 */ 1649 do { 1650 if (block > last_block) { 1651 /* 1652 * mapped buffers outside i_size will occur, because 1653 * this page can be outside i_size when there is a 1654 * truncate in progress. 1655 */ 1656 /* 1657 * The buffer was zeroed by block_write_full_page() 1658 */ 1659 clear_buffer_dirty(bh); 1660 set_buffer_uptodate(bh); 1661 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1662 WARN_ON(bh->b_size != blocksize); 1663 err = get_block(inode, block, bh, 1); 1664 if (err) 1665 goto recover; 1666 if (buffer_new(bh)) { 1667 /* blockdev mappings never come here */ 1668 clear_buffer_new(bh); 1669 unmap_underlying_metadata(bh->b_bdev, 1670 bh->b_blocknr); 1671 } 1672 } 1673 bh = bh->b_this_page; 1674 block++; 1675 } while (bh != head); 1676 1677 do { 1678 if (!buffer_mapped(bh)) 1679 continue; 1680 /* 1681 * If it's a fully non-blocking write attempt and we cannot 1682 * lock the buffer then redirty the page. Note that this can 1683 * potentially cause a busy-wait loop from pdflush and kswapd 1684 * activity, but those code paths have their own higher-level 1685 * throttling. 1686 */ 1687 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1688 lock_buffer(bh); 1689 } else if (test_set_buffer_locked(bh)) { 1690 redirty_page_for_writepage(wbc, page); 1691 continue; 1692 } 1693 if (test_clear_buffer_dirty(bh)) { 1694 mark_buffer_async_write(bh); 1695 } else { 1696 unlock_buffer(bh); 1697 } 1698 } while ((bh = bh->b_this_page) != head); 1699 1700 /* 1701 * The page and its buffers are protected by PageWriteback(), so we can 1702 * drop the bh refcounts early. 1703 */ 1704 BUG_ON(PageWriteback(page)); 1705 set_page_writeback(page); 1706 1707 do { 1708 struct buffer_head *next = bh->b_this_page; 1709 if (buffer_async_write(bh)) { 1710 submit_bh(WRITE, bh); 1711 nr_underway++; 1712 } 1713 bh = next; 1714 } while (bh != head); 1715 unlock_page(page); 1716 1717 err = 0; 1718 done: 1719 if (nr_underway == 0) { 1720 /* 1721 * The page was marked dirty, but the buffers were 1722 * clean. Someone wrote them back by hand with 1723 * ll_rw_block/submit_bh. A rare case. 1724 */ 1725 end_page_writeback(page); 1726 1727 /* 1728 * The page and buffer_heads can be released at any time from 1729 * here on. 1730 */ 1731 wbc->pages_skipped++; /* We didn't write this page */ 1732 } 1733 return err; 1734 1735 recover: 1736 /* 1737 * ENOSPC, or some other error. We may already have added some 1738 * blocks to the file, so we need to write these out to avoid 1739 * exposing stale data. 1740 * The page is currently locked and not marked for writeback 1741 */ 1742 bh = head; 1743 /* Recovery: lock and submit the mapped buffers */ 1744 do { 1745 if (buffer_mapped(bh) && buffer_dirty(bh)) { 1746 lock_buffer(bh); 1747 mark_buffer_async_write(bh); 1748 } else { 1749 /* 1750 * The buffer may have been set dirty during 1751 * attachment to a dirty page. 1752 */ 1753 clear_buffer_dirty(bh); 1754 } 1755 } while ((bh = bh->b_this_page) != head); 1756 SetPageError(page); 1757 BUG_ON(PageWriteback(page)); 1758 mapping_set_error(page->mapping, err); 1759 set_page_writeback(page); 1760 do { 1761 struct buffer_head *next = bh->b_this_page; 1762 if (buffer_async_write(bh)) { 1763 clear_buffer_dirty(bh); 1764 submit_bh(WRITE, bh); 1765 nr_underway++; 1766 } 1767 bh = next; 1768 } while (bh != head); 1769 unlock_page(page); 1770 goto done; 1771 } 1772 1773 /* 1774 * If a page has any new buffers, zero them out here, and mark them uptodate 1775 * and dirty so they'll be written out (in order to prevent uninitialised 1776 * block data from leaking). And clear the new bit. 1777 */ 1778 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1779 { 1780 unsigned int block_start, block_end; 1781 struct buffer_head *head, *bh; 1782 1783 BUG_ON(!PageLocked(page)); 1784 if (!page_has_buffers(page)) 1785 return; 1786 1787 bh = head = page_buffers(page); 1788 block_start = 0; 1789 do { 1790 block_end = block_start + bh->b_size; 1791 1792 if (buffer_new(bh)) { 1793 if (block_end > from && block_start < to) { 1794 if (!PageUptodate(page)) { 1795 unsigned start, size; 1796 1797 start = max(from, block_start); 1798 size = min(to, block_end) - start; 1799 1800 zero_user_page(page, start, size, KM_USER0); 1801 set_buffer_uptodate(bh); 1802 } 1803 1804 clear_buffer_new(bh); 1805 mark_buffer_dirty(bh); 1806 } 1807 } 1808 1809 block_start = block_end; 1810 bh = bh->b_this_page; 1811 } while (bh != head); 1812 } 1813 EXPORT_SYMBOL(page_zero_new_buffers); 1814 1815 static int __block_prepare_write(struct inode *inode, struct page *page, 1816 unsigned from, unsigned to, get_block_t *get_block) 1817 { 1818 unsigned block_start, block_end; 1819 sector_t block; 1820 int err = 0; 1821 unsigned blocksize, bbits; 1822 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1823 1824 BUG_ON(!PageLocked(page)); 1825 BUG_ON(from > PAGE_CACHE_SIZE); 1826 BUG_ON(to > PAGE_CACHE_SIZE); 1827 BUG_ON(from > to); 1828 1829 blocksize = 1 << inode->i_blkbits; 1830 if (!page_has_buffers(page)) 1831 create_empty_buffers(page, blocksize, 0); 1832 head = page_buffers(page); 1833 1834 bbits = inode->i_blkbits; 1835 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1836 1837 for(bh = head, block_start = 0; bh != head || !block_start; 1838 block++, block_start=block_end, bh = bh->b_this_page) { 1839 block_end = block_start + blocksize; 1840 if (block_end <= from || block_start >= to) { 1841 if (PageUptodate(page)) { 1842 if (!buffer_uptodate(bh)) 1843 set_buffer_uptodate(bh); 1844 } 1845 continue; 1846 } 1847 if (buffer_new(bh)) 1848 clear_buffer_new(bh); 1849 if (!buffer_mapped(bh)) { 1850 WARN_ON(bh->b_size != blocksize); 1851 err = get_block(inode, block, bh, 1); 1852 if (err) 1853 break; 1854 if (buffer_new(bh)) { 1855 unmap_underlying_metadata(bh->b_bdev, 1856 bh->b_blocknr); 1857 if (PageUptodate(page)) { 1858 clear_buffer_new(bh); 1859 set_buffer_uptodate(bh); 1860 mark_buffer_dirty(bh); 1861 continue; 1862 } 1863 if (block_end > to || block_start < from) { 1864 void *kaddr; 1865 1866 kaddr = kmap_atomic(page, KM_USER0); 1867 if (block_end > to) 1868 memset(kaddr+to, 0, 1869 block_end-to); 1870 if (block_start < from) 1871 memset(kaddr+block_start, 1872 0, from-block_start); 1873 flush_dcache_page(page); 1874 kunmap_atomic(kaddr, KM_USER0); 1875 } 1876 continue; 1877 } 1878 } 1879 if (PageUptodate(page)) { 1880 if (!buffer_uptodate(bh)) 1881 set_buffer_uptodate(bh); 1882 continue; 1883 } 1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1885 !buffer_unwritten(bh) && 1886 (block_start < from || block_end > to)) { 1887 ll_rw_block(READ, 1, &bh); 1888 *wait_bh++=bh; 1889 } 1890 } 1891 /* 1892 * If we issued read requests - let them complete. 1893 */ 1894 while(wait_bh > wait) { 1895 wait_on_buffer(*--wait_bh); 1896 if (!buffer_uptodate(*wait_bh)) 1897 err = -EIO; 1898 } 1899 if (unlikely(err)) 1900 page_zero_new_buffers(page, from, to); 1901 return err; 1902 } 1903 1904 static int __block_commit_write(struct inode *inode, struct page *page, 1905 unsigned from, unsigned to) 1906 { 1907 unsigned block_start, block_end; 1908 int partial = 0; 1909 unsigned blocksize; 1910 struct buffer_head *bh, *head; 1911 1912 blocksize = 1 << inode->i_blkbits; 1913 1914 for(bh = head = page_buffers(page), block_start = 0; 1915 bh != head || !block_start; 1916 block_start=block_end, bh = bh->b_this_page) { 1917 block_end = block_start + blocksize; 1918 if (block_end <= from || block_start >= to) { 1919 if (!buffer_uptodate(bh)) 1920 partial = 1; 1921 } else { 1922 set_buffer_uptodate(bh); 1923 mark_buffer_dirty(bh); 1924 } 1925 clear_buffer_new(bh); 1926 } 1927 1928 /* 1929 * If this is a partial write which happened to make all buffers 1930 * uptodate then we can optimize away a bogus readpage() for 1931 * the next read(). Here we 'discover' whether the page went 1932 * uptodate as a result of this (potentially partial) write. 1933 */ 1934 if (!partial) 1935 SetPageUptodate(page); 1936 return 0; 1937 } 1938 1939 /* 1940 * block_write_begin takes care of the basic task of block allocation and 1941 * bringing partial write blocks uptodate first. 1942 * 1943 * If *pagep is not NULL, then block_write_begin uses the locked page 1944 * at *pagep rather than allocating its own. In this case, the page will 1945 * not be unlocked or deallocated on failure. 1946 */ 1947 int block_write_begin(struct file *file, struct address_space *mapping, 1948 loff_t pos, unsigned len, unsigned flags, 1949 struct page **pagep, void **fsdata, 1950 get_block_t *get_block) 1951 { 1952 struct inode *inode = mapping->host; 1953 int status = 0; 1954 struct page *page; 1955 pgoff_t index; 1956 unsigned start, end; 1957 int ownpage = 0; 1958 1959 index = pos >> PAGE_CACHE_SHIFT; 1960 start = pos & (PAGE_CACHE_SIZE - 1); 1961 end = start + len; 1962 1963 page = *pagep; 1964 if (page == NULL) { 1965 ownpage = 1; 1966 page = __grab_cache_page(mapping, index); 1967 if (!page) { 1968 status = -ENOMEM; 1969 goto out; 1970 } 1971 *pagep = page; 1972 } else 1973 BUG_ON(!PageLocked(page)); 1974 1975 status = __block_prepare_write(inode, page, start, end, get_block); 1976 if (unlikely(status)) { 1977 ClearPageUptodate(page); 1978 1979 if (ownpage) { 1980 unlock_page(page); 1981 page_cache_release(page); 1982 *pagep = NULL; 1983 1984 /* 1985 * prepare_write() may have instantiated a few blocks 1986 * outside i_size. Trim these off again. Don't need 1987 * i_size_read because we hold i_mutex. 1988 */ 1989 if (pos + len > inode->i_size) 1990 vmtruncate(inode, inode->i_size); 1991 } 1992 goto out; 1993 } 1994 1995 out: 1996 return status; 1997 } 1998 EXPORT_SYMBOL(block_write_begin); 1999 2000 int block_write_end(struct file *file, struct address_space *mapping, 2001 loff_t pos, unsigned len, unsigned copied, 2002 struct page *page, void *fsdata) 2003 { 2004 struct inode *inode = mapping->host; 2005 unsigned start; 2006 2007 start = pos & (PAGE_CACHE_SIZE - 1); 2008 2009 if (unlikely(copied < len)) { 2010 /* 2011 * The buffers that were written will now be uptodate, so we 2012 * don't have to worry about a readpage reading them and 2013 * overwriting a partial write. However if we have encountered 2014 * a short write and only partially written into a buffer, it 2015 * will not be marked uptodate, so a readpage might come in and 2016 * destroy our partial write. 2017 * 2018 * Do the simplest thing, and just treat any short write to a 2019 * non uptodate page as a zero-length write, and force the 2020 * caller to redo the whole thing. 2021 */ 2022 if (!PageUptodate(page)) 2023 copied = 0; 2024 2025 page_zero_new_buffers(page, start+copied, start+len); 2026 } 2027 flush_dcache_page(page); 2028 2029 /* This could be a short (even 0-length) commit */ 2030 __block_commit_write(inode, page, start, start+copied); 2031 2032 return copied; 2033 } 2034 EXPORT_SYMBOL(block_write_end); 2035 2036 int generic_write_end(struct file *file, struct address_space *mapping, 2037 loff_t pos, unsigned len, unsigned copied, 2038 struct page *page, void *fsdata) 2039 { 2040 struct inode *inode = mapping->host; 2041 2042 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2043 2044 /* 2045 * No need to use i_size_read() here, the i_size 2046 * cannot change under us because we hold i_mutex. 2047 * 2048 * But it's important to update i_size while still holding page lock: 2049 * page writeout could otherwise come in and zero beyond i_size. 2050 */ 2051 if (pos+copied > inode->i_size) { 2052 i_size_write(inode, pos+copied); 2053 mark_inode_dirty(inode); 2054 } 2055 2056 unlock_page(page); 2057 page_cache_release(page); 2058 2059 return copied; 2060 } 2061 EXPORT_SYMBOL(generic_write_end); 2062 2063 /* 2064 * Generic "read page" function for block devices that have the normal 2065 * get_block functionality. This is most of the block device filesystems. 2066 * Reads the page asynchronously --- the unlock_buffer() and 2067 * set/clear_buffer_uptodate() functions propagate buffer state into the 2068 * page struct once IO has completed. 2069 */ 2070 int block_read_full_page(struct page *page, get_block_t *get_block) 2071 { 2072 struct inode *inode = page->mapping->host; 2073 sector_t iblock, lblock; 2074 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2075 unsigned int blocksize; 2076 int nr, i; 2077 int fully_mapped = 1; 2078 2079 BUG_ON(!PageLocked(page)); 2080 blocksize = 1 << inode->i_blkbits; 2081 if (!page_has_buffers(page)) 2082 create_empty_buffers(page, blocksize, 0); 2083 head = page_buffers(page); 2084 2085 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2086 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; 2087 bh = head; 2088 nr = 0; 2089 i = 0; 2090 2091 do { 2092 if (buffer_uptodate(bh)) 2093 continue; 2094 2095 if (!buffer_mapped(bh)) { 2096 int err = 0; 2097 2098 fully_mapped = 0; 2099 if (iblock < lblock) { 2100 WARN_ON(bh->b_size != blocksize); 2101 err = get_block(inode, iblock, bh, 0); 2102 if (err) 2103 SetPageError(page); 2104 } 2105 if (!buffer_mapped(bh)) { 2106 zero_user_page(page, i * blocksize, blocksize, 2107 KM_USER0); 2108 if (!err) 2109 set_buffer_uptodate(bh); 2110 continue; 2111 } 2112 /* 2113 * get_block() might have updated the buffer 2114 * synchronously 2115 */ 2116 if (buffer_uptodate(bh)) 2117 continue; 2118 } 2119 arr[nr++] = bh; 2120 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2121 2122 if (fully_mapped) 2123 SetPageMappedToDisk(page); 2124 2125 if (!nr) { 2126 /* 2127 * All buffers are uptodate - we can set the page uptodate 2128 * as well. But not if get_block() returned an error. 2129 */ 2130 if (!PageError(page)) 2131 SetPageUptodate(page); 2132 unlock_page(page); 2133 return 0; 2134 } 2135 2136 /* Stage two: lock the buffers */ 2137 for (i = 0; i < nr; i++) { 2138 bh = arr[i]; 2139 lock_buffer(bh); 2140 mark_buffer_async_read(bh); 2141 } 2142 2143 /* 2144 * Stage 3: start the IO. Check for uptodateness 2145 * inside the buffer lock in case another process reading 2146 * the underlying blockdev brought it uptodate (the sct fix). 2147 */ 2148 for (i = 0; i < nr; i++) { 2149 bh = arr[i]; 2150 if (buffer_uptodate(bh)) 2151 end_buffer_async_read(bh, 1); 2152 else 2153 submit_bh(READ, bh); 2154 } 2155 return 0; 2156 } 2157 2158 /* utility function for filesystems that need to do work on expanding 2159 * truncates. Uses prepare/commit_write to allow the filesystem to 2160 * deal with the hole. 2161 */ 2162 static int __generic_cont_expand(struct inode *inode, loff_t size, 2163 pgoff_t index, unsigned int offset) 2164 { 2165 struct address_space *mapping = inode->i_mapping; 2166 struct page *page; 2167 unsigned long limit; 2168 int err; 2169 2170 err = -EFBIG; 2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) { 2173 send_sig(SIGXFSZ, current, 0); 2174 goto out; 2175 } 2176 if (size > inode->i_sb->s_maxbytes) 2177 goto out; 2178 2179 err = -ENOMEM; 2180 page = grab_cache_page(mapping, index); 2181 if (!page) 2182 goto out; 2183 err = mapping->a_ops->prepare_write(NULL, page, offset, offset); 2184 if (err) { 2185 /* 2186 * ->prepare_write() may have instantiated a few blocks 2187 * outside i_size. Trim these off again. 2188 */ 2189 unlock_page(page); 2190 page_cache_release(page); 2191 vmtruncate(inode, inode->i_size); 2192 goto out; 2193 } 2194 2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset); 2196 2197 unlock_page(page); 2198 page_cache_release(page); 2199 if (err > 0) 2200 err = 0; 2201 out: 2202 return err; 2203 } 2204 2205 int generic_cont_expand(struct inode *inode, loff_t size) 2206 { 2207 pgoff_t index; 2208 unsigned int offset; 2209 2210 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */ 2211 2212 /* ugh. in prepare/commit_write, if from==to==start of block, we 2213 ** skip the prepare. make sure we never send an offset for the start 2214 ** of a block 2215 */ 2216 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { 2217 /* caller must handle this extra byte. */ 2218 offset++; 2219 } 2220 index = size >> PAGE_CACHE_SHIFT; 2221 2222 return __generic_cont_expand(inode, size, index, offset); 2223 } 2224 2225 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2226 { 2227 loff_t pos = size - 1; 2228 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 2229 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1; 2230 2231 /* prepare/commit_write can handle even if from==to==start of block. */ 2232 return __generic_cont_expand(inode, size, index, offset); 2233 } 2234 2235 /* 2236 * For moronic filesystems that do not allow holes in file. 2237 * We may have to extend the file. 2238 */ 2239 2240 int cont_prepare_write(struct page *page, unsigned offset, 2241 unsigned to, get_block_t *get_block, loff_t *bytes) 2242 { 2243 struct address_space *mapping = page->mapping; 2244 struct inode *inode = mapping->host; 2245 struct page *new_page; 2246 pgoff_t pgpos; 2247 long status; 2248 unsigned zerofrom; 2249 unsigned blocksize = 1 << inode->i_blkbits; 2250 2251 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { 2252 status = -ENOMEM; 2253 new_page = grab_cache_page(mapping, pgpos); 2254 if (!new_page) 2255 goto out; 2256 /* we might sleep */ 2257 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) { 2258 unlock_page(new_page); 2259 page_cache_release(new_page); 2260 continue; 2261 } 2262 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2263 if (zerofrom & (blocksize-1)) { 2264 *bytes |= (blocksize-1); 2265 (*bytes)++; 2266 } 2267 status = __block_prepare_write(inode, new_page, zerofrom, 2268 PAGE_CACHE_SIZE, get_block); 2269 if (status) 2270 goto out_unmap; 2271 zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom, 2272 KM_USER0); 2273 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); 2274 unlock_page(new_page); 2275 page_cache_release(new_page); 2276 } 2277 2278 if (page->index < pgpos) { 2279 /* completely inside the area */ 2280 zerofrom = offset; 2281 } else { 2282 /* page covers the boundary, find the boundary offset */ 2283 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2284 2285 /* if we will expand the thing last block will be filled */ 2286 if (to > zerofrom && (zerofrom & (blocksize-1))) { 2287 *bytes |= (blocksize-1); 2288 (*bytes)++; 2289 } 2290 2291 /* starting below the boundary? Nothing to zero out */ 2292 if (offset <= zerofrom) 2293 zerofrom = offset; 2294 } 2295 status = __block_prepare_write(inode, page, zerofrom, to, get_block); 2296 if (status) 2297 goto out1; 2298 if (zerofrom < offset) { 2299 zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0); 2300 __block_commit_write(inode, page, zerofrom, offset); 2301 } 2302 return 0; 2303 out1: 2304 ClearPageUptodate(page); 2305 return status; 2306 2307 out_unmap: 2308 ClearPageUptodate(new_page); 2309 unlock_page(new_page); 2310 page_cache_release(new_page); 2311 out: 2312 return status; 2313 } 2314 2315 int block_prepare_write(struct page *page, unsigned from, unsigned to, 2316 get_block_t *get_block) 2317 { 2318 struct inode *inode = page->mapping->host; 2319 int err = __block_prepare_write(inode, page, from, to, get_block); 2320 if (err) 2321 ClearPageUptodate(page); 2322 return err; 2323 } 2324 2325 int block_commit_write(struct page *page, unsigned from, unsigned to) 2326 { 2327 struct inode *inode = page->mapping->host; 2328 __block_commit_write(inode,page,from,to); 2329 return 0; 2330 } 2331 2332 int generic_commit_write(struct file *file, struct page *page, 2333 unsigned from, unsigned to) 2334 { 2335 struct inode *inode = page->mapping->host; 2336 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2337 __block_commit_write(inode,page,from,to); 2338 /* 2339 * No need to use i_size_read() here, the i_size 2340 * cannot change under us because we hold i_mutex. 2341 */ 2342 if (pos > inode->i_size) { 2343 i_size_write(inode, pos); 2344 mark_inode_dirty(inode); 2345 } 2346 return 0; 2347 } 2348 2349 /* 2350 * block_page_mkwrite() is not allowed to change the file size as it gets 2351 * called from a page fault handler when a page is first dirtied. Hence we must 2352 * be careful to check for EOF conditions here. We set the page up correctly 2353 * for a written page which means we get ENOSPC checking when writing into 2354 * holes and correct delalloc and unwritten extent mapping on filesystems that 2355 * support these features. 2356 * 2357 * We are not allowed to take the i_mutex here so we have to play games to 2358 * protect against truncate races as the page could now be beyond EOF. Because 2359 * vmtruncate() writes the inode size before removing pages, once we have the 2360 * page lock we can determine safely if the page is beyond EOF. If it is not 2361 * beyond EOF, then the page is guaranteed safe against truncation until we 2362 * unlock the page. 2363 */ 2364 int 2365 block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 2366 get_block_t get_block) 2367 { 2368 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2369 unsigned long end; 2370 loff_t size; 2371 int ret = -EINVAL; 2372 2373 lock_page(page); 2374 size = i_size_read(inode); 2375 if ((page->mapping != inode->i_mapping) || 2376 (page_offset(page) > size)) { 2377 /* page got truncated out from underneath us */ 2378 goto out_unlock; 2379 } 2380 2381 /* page is wholly or partially inside EOF */ 2382 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2383 end = size & ~PAGE_CACHE_MASK; 2384 else 2385 end = PAGE_CACHE_SIZE; 2386 2387 ret = block_prepare_write(page, 0, end, get_block); 2388 if (!ret) 2389 ret = block_commit_write(page, 0, end); 2390 2391 out_unlock: 2392 unlock_page(page); 2393 return ret; 2394 } 2395 2396 /* 2397 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed 2398 * immediately, while under the page lock. So it needs a special end_io 2399 * handler which does not touch the bh after unlocking it. 2400 */ 2401 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 2402 { 2403 __end_buffer_read_notouch(bh, uptodate); 2404 } 2405 2406 /* 2407 * On entry, the page is fully not uptodate. 2408 * On exit the page is fully uptodate in the areas outside (from,to) 2409 */ 2410 int nobh_prepare_write(struct page *page, unsigned from, unsigned to, 2411 get_block_t *get_block) 2412 { 2413 struct inode *inode = page->mapping->host; 2414 const unsigned blkbits = inode->i_blkbits; 2415 const unsigned blocksize = 1 << blkbits; 2416 struct buffer_head *head, *bh; 2417 unsigned block_in_page; 2418 unsigned block_start, block_end; 2419 sector_t block_in_file; 2420 char *kaddr; 2421 int nr_reads = 0; 2422 int ret = 0; 2423 int is_mapped_to_disk = 1; 2424 2425 if (page_has_buffers(page)) 2426 return block_prepare_write(page, from, to, get_block); 2427 2428 if (PageMappedToDisk(page)) 2429 return 0; 2430 2431 /* 2432 * Allocate buffers so that we can keep track of state, and potentially 2433 * attach them to the page if an error occurs. In the common case of 2434 * no error, they will just be freed again without ever being attached 2435 * to the page (which is all OK, because we're under the page lock). 2436 * 2437 * Be careful: the buffer linked list is a NULL terminated one, rather 2438 * than the circular one we're used to. 2439 */ 2440 head = alloc_page_buffers(page, blocksize, 0); 2441 if (!head) 2442 return -ENOMEM; 2443 2444 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2445 2446 /* 2447 * We loop across all blocks in the page, whether or not they are 2448 * part of the affected region. This is so we can discover if the 2449 * page is fully mapped-to-disk. 2450 */ 2451 for (block_start = 0, block_in_page = 0, bh = head; 2452 block_start < PAGE_CACHE_SIZE; 2453 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2454 int create; 2455 2456 block_end = block_start + blocksize; 2457 bh->b_state = 0; 2458 create = 1; 2459 if (block_start >= to) 2460 create = 0; 2461 ret = get_block(inode, block_in_file + block_in_page, 2462 bh, create); 2463 if (ret) 2464 goto failed; 2465 if (!buffer_mapped(bh)) 2466 is_mapped_to_disk = 0; 2467 if (buffer_new(bh)) 2468 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 2469 if (PageUptodate(page)) { 2470 set_buffer_uptodate(bh); 2471 continue; 2472 } 2473 if (buffer_new(bh) || !buffer_mapped(bh)) { 2474 kaddr = kmap_atomic(page, KM_USER0); 2475 if (block_start < from) 2476 memset(kaddr+block_start, 0, from-block_start); 2477 if (block_end > to) 2478 memset(kaddr + to, 0, block_end - to); 2479 flush_dcache_page(page); 2480 kunmap_atomic(kaddr, KM_USER0); 2481 continue; 2482 } 2483 if (buffer_uptodate(bh)) 2484 continue; /* reiserfs does this */ 2485 if (block_start < from || block_end > to) { 2486 lock_buffer(bh); 2487 bh->b_end_io = end_buffer_read_nobh; 2488 submit_bh(READ, bh); 2489 nr_reads++; 2490 } 2491 } 2492 2493 if (nr_reads) { 2494 /* 2495 * The page is locked, so these buffers are protected from 2496 * any VM or truncate activity. Hence we don't need to care 2497 * for the buffer_head refcounts. 2498 */ 2499 for (bh = head; bh; bh = bh->b_this_page) { 2500 wait_on_buffer(bh); 2501 if (!buffer_uptodate(bh)) 2502 ret = -EIO; 2503 } 2504 if (ret) 2505 goto failed; 2506 } 2507 2508 if (is_mapped_to_disk) 2509 SetPageMappedToDisk(page); 2510 2511 do { 2512 bh = head; 2513 head = head->b_this_page; 2514 free_buffer_head(bh); 2515 } while (head); 2516 2517 return 0; 2518 2519 failed: 2520 /* 2521 * Error recovery is a bit difficult. We need to zero out blocks that 2522 * were newly allocated, and dirty them to ensure they get written out. 2523 * Buffers need to be attached to the page at this point, otherwise 2524 * the handling of potential IO errors during writeout would be hard 2525 * (could try doing synchronous writeout, but what if that fails too?) 2526 */ 2527 spin_lock(&page->mapping->private_lock); 2528 bh = head; 2529 block_start = 0; 2530 do { 2531 if (PageUptodate(page)) 2532 set_buffer_uptodate(bh); 2533 if (PageDirty(page)) 2534 set_buffer_dirty(bh); 2535 2536 block_end = block_start+blocksize; 2537 if (block_end <= from) 2538 goto next; 2539 if (block_start >= to) 2540 goto next; 2541 2542 if (buffer_new(bh)) { 2543 clear_buffer_new(bh); 2544 if (!buffer_uptodate(bh)) { 2545 zero_user_page(page, block_start, bh->b_size, KM_USER0); 2546 set_buffer_uptodate(bh); 2547 } 2548 mark_buffer_dirty(bh); 2549 } 2550 next: 2551 block_start = block_end; 2552 if (!bh->b_this_page) 2553 bh->b_this_page = head; 2554 bh = bh->b_this_page; 2555 } while (bh != head); 2556 attach_page_buffers(page, head); 2557 spin_unlock(&page->mapping->private_lock); 2558 2559 return ret; 2560 } 2561 EXPORT_SYMBOL(nobh_prepare_write); 2562 2563 /* 2564 * Make sure any changes to nobh_commit_write() are reflected in 2565 * nobh_truncate_page(), since it doesn't call commit_write(). 2566 */ 2567 int nobh_commit_write(struct file *file, struct page *page, 2568 unsigned from, unsigned to) 2569 { 2570 struct inode *inode = page->mapping->host; 2571 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2572 2573 if (page_has_buffers(page)) 2574 return generic_commit_write(file, page, from, to); 2575 2576 SetPageUptodate(page); 2577 set_page_dirty(page); 2578 if (pos > inode->i_size) { 2579 i_size_write(inode, pos); 2580 mark_inode_dirty(inode); 2581 } 2582 return 0; 2583 } 2584 EXPORT_SYMBOL(nobh_commit_write); 2585 2586 /* 2587 * nobh_writepage() - based on block_full_write_page() except 2588 * that it tries to operate without attaching bufferheads to 2589 * the page. 2590 */ 2591 int nobh_writepage(struct page *page, get_block_t *get_block, 2592 struct writeback_control *wbc) 2593 { 2594 struct inode * const inode = page->mapping->host; 2595 loff_t i_size = i_size_read(inode); 2596 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2597 unsigned offset; 2598 int ret; 2599 2600 /* Is the page fully inside i_size? */ 2601 if (page->index < end_index) 2602 goto out; 2603 2604 /* Is the page fully outside i_size? (truncate in progress) */ 2605 offset = i_size & (PAGE_CACHE_SIZE-1); 2606 if (page->index >= end_index+1 || !offset) { 2607 /* 2608 * The page may have dirty, unmapped buffers. For example, 2609 * they may have been added in ext3_writepage(). Make them 2610 * freeable here, so the page does not leak. 2611 */ 2612 #if 0 2613 /* Not really sure about this - do we need this ? */ 2614 if (page->mapping->a_ops->invalidatepage) 2615 page->mapping->a_ops->invalidatepage(page, offset); 2616 #endif 2617 unlock_page(page); 2618 return 0; /* don't care */ 2619 } 2620 2621 /* 2622 * The page straddles i_size. It must be zeroed out on each and every 2623 * writepage invocation because it may be mmapped. "A file is mapped 2624 * in multiples of the page size. For a file that is not a multiple of 2625 * the page size, the remaining memory is zeroed when mapped, and 2626 * writes to that region are not written out to the file." 2627 */ 2628 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2629 out: 2630 ret = mpage_writepage(page, get_block, wbc); 2631 if (ret == -EAGAIN) 2632 ret = __block_write_full_page(inode, page, get_block, wbc); 2633 return ret; 2634 } 2635 EXPORT_SYMBOL(nobh_writepage); 2636 2637 /* 2638 * This function assumes that ->prepare_write() uses nobh_prepare_write(). 2639 */ 2640 int nobh_truncate_page(struct address_space *mapping, loff_t from) 2641 { 2642 struct inode *inode = mapping->host; 2643 unsigned blocksize = 1 << inode->i_blkbits; 2644 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2645 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2646 unsigned to; 2647 struct page *page; 2648 const struct address_space_operations *a_ops = mapping->a_ops; 2649 int ret = 0; 2650 2651 if ((offset & (blocksize - 1)) == 0) 2652 goto out; 2653 2654 ret = -ENOMEM; 2655 page = grab_cache_page(mapping, index); 2656 if (!page) 2657 goto out; 2658 2659 to = (offset + blocksize) & ~(blocksize - 1); 2660 ret = a_ops->prepare_write(NULL, page, offset, to); 2661 if (ret == 0) { 2662 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, 2663 KM_USER0); 2664 /* 2665 * It would be more correct to call aops->commit_write() 2666 * here, but this is more efficient. 2667 */ 2668 SetPageUptodate(page); 2669 set_page_dirty(page); 2670 } 2671 unlock_page(page); 2672 page_cache_release(page); 2673 out: 2674 return ret; 2675 } 2676 EXPORT_SYMBOL(nobh_truncate_page); 2677 2678 int block_truncate_page(struct address_space *mapping, 2679 loff_t from, get_block_t *get_block) 2680 { 2681 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2682 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2683 unsigned blocksize; 2684 sector_t iblock; 2685 unsigned length, pos; 2686 struct inode *inode = mapping->host; 2687 struct page *page; 2688 struct buffer_head *bh; 2689 int err; 2690 2691 blocksize = 1 << inode->i_blkbits; 2692 length = offset & (blocksize - 1); 2693 2694 /* Block boundary? Nothing to do */ 2695 if (!length) 2696 return 0; 2697 2698 length = blocksize - length; 2699 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2700 2701 page = grab_cache_page(mapping, index); 2702 err = -ENOMEM; 2703 if (!page) 2704 goto out; 2705 2706 if (!page_has_buffers(page)) 2707 create_empty_buffers(page, blocksize, 0); 2708 2709 /* Find the buffer that contains "offset" */ 2710 bh = page_buffers(page); 2711 pos = blocksize; 2712 while (offset >= pos) { 2713 bh = bh->b_this_page; 2714 iblock++; 2715 pos += blocksize; 2716 } 2717 2718 err = 0; 2719 if (!buffer_mapped(bh)) { 2720 WARN_ON(bh->b_size != blocksize); 2721 err = get_block(inode, iblock, bh, 0); 2722 if (err) 2723 goto unlock; 2724 /* unmapped? It's a hole - nothing to do */ 2725 if (!buffer_mapped(bh)) 2726 goto unlock; 2727 } 2728 2729 /* Ok, it's mapped. Make sure it's up-to-date */ 2730 if (PageUptodate(page)) 2731 set_buffer_uptodate(bh); 2732 2733 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2734 err = -EIO; 2735 ll_rw_block(READ, 1, &bh); 2736 wait_on_buffer(bh); 2737 /* Uhhuh. Read error. Complain and punt. */ 2738 if (!buffer_uptodate(bh)) 2739 goto unlock; 2740 } 2741 2742 zero_user_page(page, offset, length, KM_USER0); 2743 mark_buffer_dirty(bh); 2744 err = 0; 2745 2746 unlock: 2747 unlock_page(page); 2748 page_cache_release(page); 2749 out: 2750 return err; 2751 } 2752 2753 /* 2754 * The generic ->writepage function for buffer-backed address_spaces 2755 */ 2756 int block_write_full_page(struct page *page, get_block_t *get_block, 2757 struct writeback_control *wbc) 2758 { 2759 struct inode * const inode = page->mapping->host; 2760 loff_t i_size = i_size_read(inode); 2761 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2762 unsigned offset; 2763 2764 /* Is the page fully inside i_size? */ 2765 if (page->index < end_index) 2766 return __block_write_full_page(inode, page, get_block, wbc); 2767 2768 /* Is the page fully outside i_size? (truncate in progress) */ 2769 offset = i_size & (PAGE_CACHE_SIZE-1); 2770 if (page->index >= end_index+1 || !offset) { 2771 /* 2772 * The page may have dirty, unmapped buffers. For example, 2773 * they may have been added in ext3_writepage(). Make them 2774 * freeable here, so the page does not leak. 2775 */ 2776 do_invalidatepage(page, 0); 2777 unlock_page(page); 2778 return 0; /* don't care */ 2779 } 2780 2781 /* 2782 * The page straddles i_size. It must be zeroed out on each and every 2783 * writepage invokation because it may be mmapped. "A file is mapped 2784 * in multiples of the page size. For a file that is not a multiple of 2785 * the page size, the remaining memory is zeroed when mapped, and 2786 * writes to that region are not written out to the file." 2787 */ 2788 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 2789 return __block_write_full_page(inode, page, get_block, wbc); 2790 } 2791 2792 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2793 get_block_t *get_block) 2794 { 2795 struct buffer_head tmp; 2796 struct inode *inode = mapping->host; 2797 tmp.b_state = 0; 2798 tmp.b_blocknr = 0; 2799 tmp.b_size = 1 << inode->i_blkbits; 2800 get_block(inode, block, &tmp, 0); 2801 return tmp.b_blocknr; 2802 } 2803 2804 static void end_bio_bh_io_sync(struct bio *bio, int err) 2805 { 2806 struct buffer_head *bh = bio->bi_private; 2807 2808 if (err == -EOPNOTSUPP) { 2809 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 2810 set_bit(BH_Eopnotsupp, &bh->b_state); 2811 } 2812 2813 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 2814 bio_put(bio); 2815 } 2816 2817 int submit_bh(int rw, struct buffer_head * bh) 2818 { 2819 struct bio *bio; 2820 int ret = 0; 2821 2822 BUG_ON(!buffer_locked(bh)); 2823 BUG_ON(!buffer_mapped(bh)); 2824 BUG_ON(!bh->b_end_io); 2825 2826 if (buffer_ordered(bh) && (rw == WRITE)) 2827 rw = WRITE_BARRIER; 2828 2829 /* 2830 * Only clear out a write error when rewriting, should this 2831 * include WRITE_SYNC as well? 2832 */ 2833 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER)) 2834 clear_buffer_write_io_error(bh); 2835 2836 /* 2837 * from here on down, it's all bio -- do the initial mapping, 2838 * submit_bio -> generic_make_request may further map this bio around 2839 */ 2840 bio = bio_alloc(GFP_NOIO, 1); 2841 2842 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2843 bio->bi_bdev = bh->b_bdev; 2844 bio->bi_io_vec[0].bv_page = bh->b_page; 2845 bio->bi_io_vec[0].bv_len = bh->b_size; 2846 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 2847 2848 bio->bi_vcnt = 1; 2849 bio->bi_idx = 0; 2850 bio->bi_size = bh->b_size; 2851 2852 bio->bi_end_io = end_bio_bh_io_sync; 2853 bio->bi_private = bh; 2854 2855 bio_get(bio); 2856 submit_bio(rw, bio); 2857 2858 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 2859 ret = -EOPNOTSUPP; 2860 2861 bio_put(bio); 2862 return ret; 2863 } 2864 2865 /** 2866 * ll_rw_block: low-level access to block devices (DEPRECATED) 2867 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 2868 * @nr: number of &struct buffer_heads in the array 2869 * @bhs: array of pointers to &struct buffer_head 2870 * 2871 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2872 * requests an I/O operation on them, either a %READ or a %WRITE. The third 2873 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2874 * are sent to disk. The fourth %READA option is described in the documentation 2875 * for generic_make_request() which ll_rw_block() calls. 2876 * 2877 * This function drops any buffer that it cannot get a lock on (with the 2878 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2879 * clean when doing a write request, and any buffer that appears to be 2880 * up-to-date when doing read request. Further it marks as clean buffers that 2881 * are processed for writing (the buffer cache won't assume that they are 2882 * actually clean until the buffer gets unlocked). 2883 * 2884 * ll_rw_block sets b_end_io to simple completion handler that marks 2885 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2886 * any waiters. 2887 * 2888 * All of the buffers must be for the same device, and must also be a 2889 * multiple of the current approved size for the device. 2890 */ 2891 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 2892 { 2893 int i; 2894 2895 for (i = 0; i < nr; i++) { 2896 struct buffer_head *bh = bhs[i]; 2897 2898 if (rw == SWRITE) 2899 lock_buffer(bh); 2900 else if (test_set_buffer_locked(bh)) 2901 continue; 2902 2903 if (rw == WRITE || rw == SWRITE) { 2904 if (test_clear_buffer_dirty(bh)) { 2905 bh->b_end_io = end_buffer_write_sync; 2906 get_bh(bh); 2907 submit_bh(WRITE, bh); 2908 continue; 2909 } 2910 } else { 2911 if (!buffer_uptodate(bh)) { 2912 bh->b_end_io = end_buffer_read_sync; 2913 get_bh(bh); 2914 submit_bh(rw, bh); 2915 continue; 2916 } 2917 } 2918 unlock_buffer(bh); 2919 } 2920 } 2921 2922 /* 2923 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2924 * and then start new I/O and then wait upon it. The caller must have a ref on 2925 * the buffer_head. 2926 */ 2927 int sync_dirty_buffer(struct buffer_head *bh) 2928 { 2929 int ret = 0; 2930 2931 WARN_ON(atomic_read(&bh->b_count) < 1); 2932 lock_buffer(bh); 2933 if (test_clear_buffer_dirty(bh)) { 2934 get_bh(bh); 2935 bh->b_end_io = end_buffer_write_sync; 2936 ret = submit_bh(WRITE, bh); 2937 wait_on_buffer(bh); 2938 if (buffer_eopnotsupp(bh)) { 2939 clear_buffer_eopnotsupp(bh); 2940 ret = -EOPNOTSUPP; 2941 } 2942 if (!ret && !buffer_uptodate(bh)) 2943 ret = -EIO; 2944 } else { 2945 unlock_buffer(bh); 2946 } 2947 return ret; 2948 } 2949 2950 /* 2951 * try_to_free_buffers() checks if all the buffers on this particular page 2952 * are unused, and releases them if so. 2953 * 2954 * Exclusion against try_to_free_buffers may be obtained by either 2955 * locking the page or by holding its mapping's private_lock. 2956 * 2957 * If the page is dirty but all the buffers are clean then we need to 2958 * be sure to mark the page clean as well. This is because the page 2959 * may be against a block device, and a later reattachment of buffers 2960 * to a dirty page will set *all* buffers dirty. Which would corrupt 2961 * filesystem data on the same device. 2962 * 2963 * The same applies to regular filesystem pages: if all the buffers are 2964 * clean then we set the page clean and proceed. To do that, we require 2965 * total exclusion from __set_page_dirty_buffers(). That is obtained with 2966 * private_lock. 2967 * 2968 * try_to_free_buffers() is non-blocking. 2969 */ 2970 static inline int buffer_busy(struct buffer_head *bh) 2971 { 2972 return atomic_read(&bh->b_count) | 2973 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2974 } 2975 2976 static int 2977 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 2978 { 2979 struct buffer_head *head = page_buffers(page); 2980 struct buffer_head *bh; 2981 2982 bh = head; 2983 do { 2984 if (buffer_write_io_error(bh) && page->mapping) 2985 set_bit(AS_EIO, &page->mapping->flags); 2986 if (buffer_busy(bh)) 2987 goto failed; 2988 bh = bh->b_this_page; 2989 } while (bh != head); 2990 2991 do { 2992 struct buffer_head *next = bh->b_this_page; 2993 2994 if (!list_empty(&bh->b_assoc_buffers)) 2995 __remove_assoc_queue(bh); 2996 bh = next; 2997 } while (bh != head); 2998 *buffers_to_free = head; 2999 __clear_page_buffers(page); 3000 return 1; 3001 failed: 3002 return 0; 3003 } 3004 3005 int try_to_free_buffers(struct page *page) 3006 { 3007 struct address_space * const mapping = page->mapping; 3008 struct buffer_head *buffers_to_free = NULL; 3009 int ret = 0; 3010 3011 BUG_ON(!PageLocked(page)); 3012 if (PageWriteback(page)) 3013 return 0; 3014 3015 if (mapping == NULL) { /* can this still happen? */ 3016 ret = drop_buffers(page, &buffers_to_free); 3017 goto out; 3018 } 3019 3020 spin_lock(&mapping->private_lock); 3021 ret = drop_buffers(page, &buffers_to_free); 3022 3023 /* 3024 * If the filesystem writes its buffers by hand (eg ext3) 3025 * then we can have clean buffers against a dirty page. We 3026 * clean the page here; otherwise the VM will never notice 3027 * that the filesystem did any IO at all. 3028 * 3029 * Also, during truncate, discard_buffer will have marked all 3030 * the page's buffers clean. We discover that here and clean 3031 * the page also. 3032 * 3033 * private_lock must be held over this entire operation in order 3034 * to synchronise against __set_page_dirty_buffers and prevent the 3035 * dirty bit from being lost. 3036 */ 3037 if (ret) 3038 cancel_dirty_page(page, PAGE_CACHE_SIZE); 3039 spin_unlock(&mapping->private_lock); 3040 out: 3041 if (buffers_to_free) { 3042 struct buffer_head *bh = buffers_to_free; 3043 3044 do { 3045 struct buffer_head *next = bh->b_this_page; 3046 free_buffer_head(bh); 3047 bh = next; 3048 } while (bh != buffers_to_free); 3049 } 3050 return ret; 3051 } 3052 EXPORT_SYMBOL(try_to_free_buffers); 3053 3054 void block_sync_page(struct page *page) 3055 { 3056 struct address_space *mapping; 3057 3058 smp_mb(); 3059 mapping = page_mapping(page); 3060 if (mapping) 3061 blk_run_backing_dev(mapping->backing_dev_info, page); 3062 } 3063 3064 /* 3065 * There are no bdflush tunables left. But distributions are 3066 * still running obsolete flush daemons, so we terminate them here. 3067 * 3068 * Use of bdflush() is deprecated and will be removed in a future kernel. 3069 * The `pdflush' kernel threads fully replace bdflush daemons and this call. 3070 */ 3071 asmlinkage long sys_bdflush(int func, long data) 3072 { 3073 static int msg_count; 3074 3075 if (!capable(CAP_SYS_ADMIN)) 3076 return -EPERM; 3077 3078 if (msg_count < 5) { 3079 msg_count++; 3080 printk(KERN_INFO 3081 "warning: process `%s' used the obsolete bdflush" 3082 " system call\n", current->comm); 3083 printk(KERN_INFO "Fix your initscripts?\n"); 3084 } 3085 3086 if (func == 1) 3087 do_exit(0); 3088 return 0; 3089 } 3090 3091 /* 3092 * Buffer-head allocation 3093 */ 3094 static struct kmem_cache *bh_cachep; 3095 3096 /* 3097 * Once the number of bh's in the machine exceeds this level, we start 3098 * stripping them in writeback. 3099 */ 3100 static int max_buffer_heads; 3101 3102 int buffer_heads_over_limit; 3103 3104 struct bh_accounting { 3105 int nr; /* Number of live bh's */ 3106 int ratelimit; /* Limit cacheline bouncing */ 3107 }; 3108 3109 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3110 3111 static void recalc_bh_state(void) 3112 { 3113 int i; 3114 int tot = 0; 3115 3116 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) 3117 return; 3118 __get_cpu_var(bh_accounting).ratelimit = 0; 3119 for_each_online_cpu(i) 3120 tot += per_cpu(bh_accounting, i).nr; 3121 buffer_heads_over_limit = (tot > max_buffer_heads); 3122 } 3123 3124 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3125 { 3126 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3127 if (ret) { 3128 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3129 get_cpu_var(bh_accounting).nr++; 3130 recalc_bh_state(); 3131 put_cpu_var(bh_accounting); 3132 } 3133 return ret; 3134 } 3135 EXPORT_SYMBOL(alloc_buffer_head); 3136 3137 void free_buffer_head(struct buffer_head *bh) 3138 { 3139 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3140 kmem_cache_free(bh_cachep, bh); 3141 get_cpu_var(bh_accounting).nr--; 3142 recalc_bh_state(); 3143 put_cpu_var(bh_accounting); 3144 } 3145 EXPORT_SYMBOL(free_buffer_head); 3146 3147 static void buffer_exit_cpu(int cpu) 3148 { 3149 int i; 3150 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3151 3152 for (i = 0; i < BH_LRU_SIZE; i++) { 3153 brelse(b->bhs[i]); 3154 b->bhs[i] = NULL; 3155 } 3156 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; 3157 per_cpu(bh_accounting, cpu).nr = 0; 3158 put_cpu_var(bh_accounting); 3159 } 3160 3161 static int buffer_cpu_notify(struct notifier_block *self, 3162 unsigned long action, void *hcpu) 3163 { 3164 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 3165 buffer_exit_cpu((unsigned long)hcpu); 3166 return NOTIFY_OK; 3167 } 3168 3169 void __init buffer_init(void) 3170 { 3171 int nrpages; 3172 3173 bh_cachep = KMEM_CACHE(buffer_head, 3174 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3175 3176 /* 3177 * Limit the bh occupancy to 10% of ZONE_NORMAL 3178 */ 3179 nrpages = (nr_free_buffer_pages() * 10) / 100; 3180 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3181 hotcpu_notifier(buffer_cpu_notify, 0); 3182 } 3183 3184 EXPORT_SYMBOL(__bforget); 3185 EXPORT_SYMBOL(__brelse); 3186 EXPORT_SYMBOL(__wait_on_buffer); 3187 EXPORT_SYMBOL(block_commit_write); 3188 EXPORT_SYMBOL(block_prepare_write); 3189 EXPORT_SYMBOL(block_page_mkwrite); 3190 EXPORT_SYMBOL(block_read_full_page); 3191 EXPORT_SYMBOL(block_sync_page); 3192 EXPORT_SYMBOL(block_truncate_page); 3193 EXPORT_SYMBOL(block_write_full_page); 3194 EXPORT_SYMBOL(cont_prepare_write); 3195 EXPORT_SYMBOL(end_buffer_read_sync); 3196 EXPORT_SYMBOL(end_buffer_write_sync); 3197 EXPORT_SYMBOL(file_fsync); 3198 EXPORT_SYMBOL(fsync_bdev); 3199 EXPORT_SYMBOL(generic_block_bmap); 3200 EXPORT_SYMBOL(generic_commit_write); 3201 EXPORT_SYMBOL(generic_cont_expand); 3202 EXPORT_SYMBOL(generic_cont_expand_simple); 3203 EXPORT_SYMBOL(init_buffer); 3204 EXPORT_SYMBOL(invalidate_bdev); 3205 EXPORT_SYMBOL(ll_rw_block); 3206 EXPORT_SYMBOL(mark_buffer_dirty); 3207 EXPORT_SYMBOL(submit_bh); 3208 EXPORT_SYMBOL(sync_dirty_buffer); 3209 EXPORT_SYMBOL(unlock_buffer); 3210