1 /* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7 /* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 9 * 10 * Removed a lot of unnecessary code and simplified things now that 11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 12 * 13 * Speed up hash, lru, and free list operations. Use gfp() for allocating 14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 15 * 16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 17 * 18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 19 */ 20 21 #include <linux/config.h> 22 #include <linux/kernel.h> 23 #include <linux/syscalls.h> 24 #include <linux/fs.h> 25 #include <linux/mm.h> 26 #include <linux/percpu.h> 27 #include <linux/slab.h> 28 #include <linux/smp_lock.h> 29 #include <linux/blkdev.h> 30 #include <linux/file.h> 31 #include <linux/quotaops.h> 32 #include <linux/highmem.h> 33 #include <linux/module.h> 34 #include <linux/writeback.h> 35 #include <linux/hash.h> 36 #include <linux/suspend.h> 37 #include <linux/buffer_head.h> 38 #include <linux/bio.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/bitops.h> 42 #include <linux/mpage.h> 43 #include <linux/bit_spinlock.h> 44 45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 46 static void invalidate_bh_lrus(void); 47 48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 49 50 inline void 51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 52 { 53 bh->b_end_io = handler; 54 bh->b_private = private; 55 } 56 57 static int sync_buffer(void *word) 58 { 59 struct block_device *bd; 60 struct buffer_head *bh 61 = container_of(word, struct buffer_head, b_state); 62 63 smp_mb(); 64 bd = bh->b_bdev; 65 if (bd) 66 blk_run_address_space(bd->bd_inode->i_mapping); 67 io_schedule(); 68 return 0; 69 } 70 71 void fastcall __lock_buffer(struct buffer_head *bh) 72 { 73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 74 TASK_UNINTERRUPTIBLE); 75 } 76 EXPORT_SYMBOL(__lock_buffer); 77 78 void fastcall unlock_buffer(struct buffer_head *bh) 79 { 80 clear_buffer_locked(bh); 81 smp_mb__after_clear_bit(); 82 wake_up_bit(&bh->b_state, BH_Lock); 83 } 84 85 /* 86 * Block until a buffer comes unlocked. This doesn't stop it 87 * from becoming locked again - you have to lock it yourself 88 * if you want to preserve its state. 89 */ 90 void __wait_on_buffer(struct buffer_head * bh) 91 { 92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 93 } 94 95 static void 96 __clear_page_buffers(struct page *page) 97 { 98 ClearPagePrivate(page); 99 set_page_private(page, 0); 100 page_cache_release(page); 101 } 102 103 static void buffer_io_error(struct buffer_head *bh) 104 { 105 char b[BDEVNAME_SIZE]; 106 107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 108 bdevname(bh->b_bdev, b), 109 (unsigned long long)bh->b_blocknr); 110 } 111 112 /* 113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 114 * unlock the buffer. This is what ll_rw_block uses too. 115 */ 116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 117 { 118 if (uptodate) { 119 set_buffer_uptodate(bh); 120 } else { 121 /* This happens, due to failed READA attempts. */ 122 clear_buffer_uptodate(bh); 123 } 124 unlock_buffer(bh); 125 put_bh(bh); 126 } 127 128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 129 { 130 char b[BDEVNAME_SIZE]; 131 132 if (uptodate) { 133 set_buffer_uptodate(bh); 134 } else { 135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 136 buffer_io_error(bh); 137 printk(KERN_WARNING "lost page write due to " 138 "I/O error on %s\n", 139 bdevname(bh->b_bdev, b)); 140 } 141 set_buffer_write_io_error(bh); 142 clear_buffer_uptodate(bh); 143 } 144 unlock_buffer(bh); 145 put_bh(bh); 146 } 147 148 /* 149 * Write out and wait upon all the dirty data associated with a block 150 * device via its mapping. Does not take the superblock lock. 151 */ 152 int sync_blockdev(struct block_device *bdev) 153 { 154 int ret = 0; 155 156 if (bdev) { 157 int err; 158 159 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping); 160 err = filemap_fdatawait(bdev->bd_inode->i_mapping); 161 if (!ret) 162 ret = err; 163 } 164 return ret; 165 } 166 EXPORT_SYMBOL(sync_blockdev); 167 168 /* 169 * Write out and wait upon all dirty data associated with this 170 * superblock. Filesystem data as well as the underlying block 171 * device. Takes the superblock lock. 172 */ 173 int fsync_super(struct super_block *sb) 174 { 175 sync_inodes_sb(sb, 0); 176 DQUOT_SYNC(sb); 177 lock_super(sb); 178 if (sb->s_dirt && sb->s_op->write_super) 179 sb->s_op->write_super(sb); 180 unlock_super(sb); 181 if (sb->s_op->sync_fs) 182 sb->s_op->sync_fs(sb, 1); 183 sync_blockdev(sb->s_bdev); 184 sync_inodes_sb(sb, 1); 185 186 return sync_blockdev(sb->s_bdev); 187 } 188 189 /* 190 * Write out and wait upon all dirty data associated with this 191 * device. Filesystem data as well as the underlying block 192 * device. Takes the superblock lock. 193 */ 194 int fsync_bdev(struct block_device *bdev) 195 { 196 struct super_block *sb = get_super(bdev); 197 if (sb) { 198 int res = fsync_super(sb); 199 drop_super(sb); 200 return res; 201 } 202 return sync_blockdev(bdev); 203 } 204 205 /** 206 * freeze_bdev -- lock a filesystem and force it into a consistent state 207 * @bdev: blockdevice to lock 208 * 209 * This takes the block device bd_mount_sem to make sure no new mounts 210 * happen on bdev until thaw_bdev() is called. 211 * If a superblock is found on this device, we take the s_umount semaphore 212 * on it to make sure nobody unmounts until the snapshot creation is done. 213 */ 214 struct super_block *freeze_bdev(struct block_device *bdev) 215 { 216 struct super_block *sb; 217 218 down(&bdev->bd_mount_sem); 219 sb = get_super(bdev); 220 if (sb && !(sb->s_flags & MS_RDONLY)) { 221 sb->s_frozen = SB_FREEZE_WRITE; 222 smp_wmb(); 223 224 sync_inodes_sb(sb, 0); 225 DQUOT_SYNC(sb); 226 227 lock_super(sb); 228 if (sb->s_dirt && sb->s_op->write_super) 229 sb->s_op->write_super(sb); 230 unlock_super(sb); 231 232 if (sb->s_op->sync_fs) 233 sb->s_op->sync_fs(sb, 1); 234 235 sync_blockdev(sb->s_bdev); 236 sync_inodes_sb(sb, 1); 237 238 sb->s_frozen = SB_FREEZE_TRANS; 239 smp_wmb(); 240 241 sync_blockdev(sb->s_bdev); 242 243 if (sb->s_op->write_super_lockfs) 244 sb->s_op->write_super_lockfs(sb); 245 } 246 247 sync_blockdev(bdev); 248 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ 249 } 250 EXPORT_SYMBOL(freeze_bdev); 251 252 /** 253 * thaw_bdev -- unlock filesystem 254 * @bdev: blockdevice to unlock 255 * @sb: associated superblock 256 * 257 * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 258 */ 259 void thaw_bdev(struct block_device *bdev, struct super_block *sb) 260 { 261 if (sb) { 262 BUG_ON(sb->s_bdev != bdev); 263 264 if (sb->s_op->unlockfs) 265 sb->s_op->unlockfs(sb); 266 sb->s_frozen = SB_UNFROZEN; 267 smp_wmb(); 268 wake_up(&sb->s_wait_unfrozen); 269 drop_super(sb); 270 } 271 272 up(&bdev->bd_mount_sem); 273 } 274 EXPORT_SYMBOL(thaw_bdev); 275 276 /* 277 * sync everything. Start out by waking pdflush, because that writes back 278 * all queues in parallel. 279 */ 280 static void do_sync(unsigned long wait) 281 { 282 wakeup_pdflush(0); 283 sync_inodes(0); /* All mappings, inodes and their blockdevs */ 284 DQUOT_SYNC(NULL); 285 sync_supers(); /* Write the superblocks */ 286 sync_filesystems(0); /* Start syncing the filesystems */ 287 sync_filesystems(wait); /* Waitingly sync the filesystems */ 288 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ 289 if (!wait) 290 printk("Emergency Sync complete\n"); 291 if (unlikely(laptop_mode)) 292 laptop_sync_completion(); 293 } 294 295 asmlinkage long sys_sync(void) 296 { 297 do_sync(1); 298 return 0; 299 } 300 301 void emergency_sync(void) 302 { 303 pdflush_operation(do_sync, 0); 304 } 305 306 /* 307 * Generic function to fsync a file. 308 * 309 * filp may be NULL if called via the msync of a vma. 310 */ 311 312 int file_fsync(struct file *filp, struct dentry *dentry, int datasync) 313 { 314 struct inode * inode = dentry->d_inode; 315 struct super_block * sb; 316 int ret, err; 317 318 /* sync the inode to buffers */ 319 ret = write_inode_now(inode, 0); 320 321 /* sync the superblock to buffers */ 322 sb = inode->i_sb; 323 lock_super(sb); 324 if (sb->s_op->write_super) 325 sb->s_op->write_super(sb); 326 unlock_super(sb); 327 328 /* .. finally sync the buffers to disk */ 329 err = sync_blockdev(sb->s_bdev); 330 if (!ret) 331 ret = err; 332 return ret; 333 } 334 335 static long do_fsync(unsigned int fd, int datasync) 336 { 337 struct file * file; 338 struct address_space *mapping; 339 int ret, err; 340 341 ret = -EBADF; 342 file = fget(fd); 343 if (!file) 344 goto out; 345 346 ret = -EINVAL; 347 if (!file->f_op || !file->f_op->fsync) { 348 /* Why? We can still call filemap_fdatawrite */ 349 goto out_putf; 350 } 351 352 mapping = file->f_mapping; 353 354 current->flags |= PF_SYNCWRITE; 355 ret = filemap_fdatawrite(mapping); 356 357 /* 358 * We need to protect against concurrent writers, 359 * which could cause livelocks in fsync_buffers_list 360 */ 361 down(&mapping->host->i_sem); 362 err = file->f_op->fsync(file, file->f_dentry, datasync); 363 if (!ret) 364 ret = err; 365 up(&mapping->host->i_sem); 366 err = filemap_fdatawait(mapping); 367 if (!ret) 368 ret = err; 369 current->flags &= ~PF_SYNCWRITE; 370 371 out_putf: 372 fput(file); 373 out: 374 return ret; 375 } 376 377 asmlinkage long sys_fsync(unsigned int fd) 378 { 379 return do_fsync(fd, 0); 380 } 381 382 asmlinkage long sys_fdatasync(unsigned int fd) 383 { 384 return do_fsync(fd, 1); 385 } 386 387 /* 388 * Various filesystems appear to want __find_get_block to be non-blocking. 389 * But it's the page lock which protects the buffers. To get around this, 390 * we get exclusion from try_to_free_buffers with the blockdev mapping's 391 * private_lock. 392 * 393 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 394 * may be quite high. This code could TryLock the page, and if that 395 * succeeds, there is no need to take private_lock. (But if 396 * private_lock is contended then so is mapping->tree_lock). 397 */ 398 static struct buffer_head * 399 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused) 400 { 401 struct inode *bd_inode = bdev->bd_inode; 402 struct address_space *bd_mapping = bd_inode->i_mapping; 403 struct buffer_head *ret = NULL; 404 pgoff_t index; 405 struct buffer_head *bh; 406 struct buffer_head *head; 407 struct page *page; 408 int all_mapped = 1; 409 410 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 411 page = find_get_page(bd_mapping, index); 412 if (!page) 413 goto out; 414 415 spin_lock(&bd_mapping->private_lock); 416 if (!page_has_buffers(page)) 417 goto out_unlock; 418 head = page_buffers(page); 419 bh = head; 420 do { 421 if (bh->b_blocknr == block) { 422 ret = bh; 423 get_bh(bh); 424 goto out_unlock; 425 } 426 if (!buffer_mapped(bh)) 427 all_mapped = 0; 428 bh = bh->b_this_page; 429 } while (bh != head); 430 431 /* we might be here because some of the buffers on this page are 432 * not mapped. This is due to various races between 433 * file io on the block device and getblk. It gets dealt with 434 * elsewhere, don't buffer_error if we had some unmapped buffers 435 */ 436 if (all_mapped) { 437 printk("__find_get_block_slow() failed. " 438 "block=%llu, b_blocknr=%llu\n", 439 (unsigned long long)block, (unsigned long long)bh->b_blocknr); 440 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); 441 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 442 } 443 out_unlock: 444 spin_unlock(&bd_mapping->private_lock); 445 page_cache_release(page); 446 out: 447 return ret; 448 } 449 450 /* If invalidate_buffers() will trash dirty buffers, it means some kind 451 of fs corruption is going on. Trashing dirty data always imply losing 452 information that was supposed to be just stored on the physical layer 453 by the user. 454 455 Thus invalidate_buffers in general usage is not allwowed to trash 456 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to 457 be preserved. These buffers are simply skipped. 458 459 We also skip buffers which are still in use. For example this can 460 happen if a userspace program is reading the block device. 461 462 NOTE: In the case where the user removed a removable-media-disk even if 463 there's still dirty data not synced on disk (due a bug in the device driver 464 or due an error of the user), by not destroying the dirty buffers we could 465 generate corruption also on the next media inserted, thus a parameter is 466 necessary to handle this case in the most safe way possible (trying 467 to not corrupt also the new disk inserted with the data belonging to 468 the old now corrupted disk). Also for the ramdisk the natural thing 469 to do in order to release the ramdisk memory is to destroy dirty buffers. 470 471 These are two special cases. Normal usage imply the device driver 472 to issue a sync on the device (without waiting I/O completion) and 473 then an invalidate_buffers call that doesn't trash dirty buffers. 474 475 For handling cache coherency with the blkdev pagecache the 'update' case 476 is been introduced. It is needed to re-read from disk any pinned 477 buffer. NOTE: re-reading from disk is destructive so we can do it only 478 when we assume nobody is changing the buffercache under our I/O and when 479 we think the disk contains more recent information than the buffercache. 480 The update == 1 pass marks the buffers we need to update, the update == 2 481 pass does the actual I/O. */ 482 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) 483 { 484 invalidate_bh_lrus(); 485 /* 486 * FIXME: what about destroy_dirty_buffers? 487 * We really want to use invalidate_inode_pages2() for 488 * that, but not until that's cleaned up. 489 */ 490 invalidate_inode_pages(bdev->bd_inode->i_mapping); 491 } 492 493 /* 494 * Kick pdflush then try to free up some ZONE_NORMAL memory. 495 */ 496 static void free_more_memory(void) 497 { 498 struct zone **zones; 499 pg_data_t *pgdat; 500 501 wakeup_pdflush(1024); 502 yield(); 503 504 for_each_pgdat(pgdat) { 505 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; 506 if (*zones) 507 try_to_free_pages(zones, GFP_NOFS); 508 } 509 } 510 511 /* 512 * I/O completion handler for block_read_full_page() - pages 513 * which come unlocked at the end of I/O. 514 */ 515 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 516 { 517 unsigned long flags; 518 struct buffer_head *first; 519 struct buffer_head *tmp; 520 struct page *page; 521 int page_uptodate = 1; 522 523 BUG_ON(!buffer_async_read(bh)); 524 525 page = bh->b_page; 526 if (uptodate) { 527 set_buffer_uptodate(bh); 528 } else { 529 clear_buffer_uptodate(bh); 530 if (printk_ratelimit()) 531 buffer_io_error(bh); 532 SetPageError(page); 533 } 534 535 /* 536 * Be _very_ careful from here on. Bad things can happen if 537 * two buffer heads end IO at almost the same time and both 538 * decide that the page is now completely done. 539 */ 540 first = page_buffers(page); 541 local_irq_save(flags); 542 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 543 clear_buffer_async_read(bh); 544 unlock_buffer(bh); 545 tmp = bh; 546 do { 547 if (!buffer_uptodate(tmp)) 548 page_uptodate = 0; 549 if (buffer_async_read(tmp)) { 550 BUG_ON(!buffer_locked(tmp)); 551 goto still_busy; 552 } 553 tmp = tmp->b_this_page; 554 } while (tmp != bh); 555 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 556 local_irq_restore(flags); 557 558 /* 559 * If none of the buffers had errors and they are all 560 * uptodate then we can set the page uptodate. 561 */ 562 if (page_uptodate && !PageError(page)) 563 SetPageUptodate(page); 564 unlock_page(page); 565 return; 566 567 still_busy: 568 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 569 local_irq_restore(flags); 570 return; 571 } 572 573 /* 574 * Completion handler for block_write_full_page() - pages which are unlocked 575 * during I/O, and which have PageWriteback cleared upon I/O completion. 576 */ 577 void end_buffer_async_write(struct buffer_head *bh, int uptodate) 578 { 579 char b[BDEVNAME_SIZE]; 580 unsigned long flags; 581 struct buffer_head *first; 582 struct buffer_head *tmp; 583 struct page *page; 584 585 BUG_ON(!buffer_async_write(bh)); 586 587 page = bh->b_page; 588 if (uptodate) { 589 set_buffer_uptodate(bh); 590 } else { 591 if (printk_ratelimit()) { 592 buffer_io_error(bh); 593 printk(KERN_WARNING "lost page write due to " 594 "I/O error on %s\n", 595 bdevname(bh->b_bdev, b)); 596 } 597 set_bit(AS_EIO, &page->mapping->flags); 598 clear_buffer_uptodate(bh); 599 SetPageError(page); 600 } 601 602 first = page_buffers(page); 603 local_irq_save(flags); 604 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 605 606 clear_buffer_async_write(bh); 607 unlock_buffer(bh); 608 tmp = bh->b_this_page; 609 while (tmp != bh) { 610 if (buffer_async_write(tmp)) { 611 BUG_ON(!buffer_locked(tmp)); 612 goto still_busy; 613 } 614 tmp = tmp->b_this_page; 615 } 616 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 617 local_irq_restore(flags); 618 end_page_writeback(page); 619 return; 620 621 still_busy: 622 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 623 local_irq_restore(flags); 624 return; 625 } 626 627 /* 628 * If a page's buffers are under async readin (end_buffer_async_read 629 * completion) then there is a possibility that another thread of 630 * control could lock one of the buffers after it has completed 631 * but while some of the other buffers have not completed. This 632 * locked buffer would confuse end_buffer_async_read() into not unlocking 633 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 634 * that this buffer is not under async I/O. 635 * 636 * The page comes unlocked when it has no locked buffer_async buffers 637 * left. 638 * 639 * PageLocked prevents anyone starting new async I/O reads any of 640 * the buffers. 641 * 642 * PageWriteback is used to prevent simultaneous writeout of the same 643 * page. 644 * 645 * PageLocked prevents anyone from starting writeback of a page which is 646 * under read I/O (PageWriteback is only ever set against a locked page). 647 */ 648 static void mark_buffer_async_read(struct buffer_head *bh) 649 { 650 bh->b_end_io = end_buffer_async_read; 651 set_buffer_async_read(bh); 652 } 653 654 void mark_buffer_async_write(struct buffer_head *bh) 655 { 656 bh->b_end_io = end_buffer_async_write; 657 set_buffer_async_write(bh); 658 } 659 EXPORT_SYMBOL(mark_buffer_async_write); 660 661 662 /* 663 * fs/buffer.c contains helper functions for buffer-backed address space's 664 * fsync functions. A common requirement for buffer-based filesystems is 665 * that certain data from the backing blockdev needs to be written out for 666 * a successful fsync(). For example, ext2 indirect blocks need to be 667 * written back and waited upon before fsync() returns. 668 * 669 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 670 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 671 * management of a list of dependent buffers at ->i_mapping->private_list. 672 * 673 * Locking is a little subtle: try_to_free_buffers() will remove buffers 674 * from their controlling inode's queue when they are being freed. But 675 * try_to_free_buffers() will be operating against the *blockdev* mapping 676 * at the time, not against the S_ISREG file which depends on those buffers. 677 * So the locking for private_list is via the private_lock in the address_space 678 * which backs the buffers. Which is different from the address_space 679 * against which the buffers are listed. So for a particular address_space, 680 * mapping->private_lock does *not* protect mapping->private_list! In fact, 681 * mapping->private_list will always be protected by the backing blockdev's 682 * ->private_lock. 683 * 684 * Which introduces a requirement: all buffers on an address_space's 685 * ->private_list must be from the same address_space: the blockdev's. 686 * 687 * address_spaces which do not place buffers at ->private_list via these 688 * utility functions are free to use private_lock and private_list for 689 * whatever they want. The only requirement is that list_empty(private_list) 690 * be true at clear_inode() time. 691 * 692 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 693 * filesystems should do that. invalidate_inode_buffers() should just go 694 * BUG_ON(!list_empty). 695 * 696 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 697 * take an address_space, not an inode. And it should be called 698 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 699 * queued up. 700 * 701 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 702 * list if it is already on a list. Because if the buffer is on a list, 703 * it *must* already be on the right one. If not, the filesystem is being 704 * silly. This will save a ton of locking. But first we have to ensure 705 * that buffers are taken *off* the old inode's list when they are freed 706 * (presumably in truncate). That requires careful auditing of all 707 * filesystems (do it inside bforget()). It could also be done by bringing 708 * b_inode back. 709 */ 710 711 /* 712 * The buffer's backing address_space's private_lock must be held 713 */ 714 static inline void __remove_assoc_queue(struct buffer_head *bh) 715 { 716 list_del_init(&bh->b_assoc_buffers); 717 } 718 719 int inode_has_buffers(struct inode *inode) 720 { 721 return !list_empty(&inode->i_data.private_list); 722 } 723 724 /* 725 * osync is designed to support O_SYNC io. It waits synchronously for 726 * all already-submitted IO to complete, but does not queue any new 727 * writes to the disk. 728 * 729 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 730 * you dirty the buffers, and then use osync_inode_buffers to wait for 731 * completion. Any other dirty buffers which are not yet queued for 732 * write will not be flushed to disk by the osync. 733 */ 734 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 735 { 736 struct buffer_head *bh; 737 struct list_head *p; 738 int err = 0; 739 740 spin_lock(lock); 741 repeat: 742 list_for_each_prev(p, list) { 743 bh = BH_ENTRY(p); 744 if (buffer_locked(bh)) { 745 get_bh(bh); 746 spin_unlock(lock); 747 wait_on_buffer(bh); 748 if (!buffer_uptodate(bh)) 749 err = -EIO; 750 brelse(bh); 751 spin_lock(lock); 752 goto repeat; 753 } 754 } 755 spin_unlock(lock); 756 return err; 757 } 758 759 /** 760 * sync_mapping_buffers - write out and wait upon a mapping's "associated" 761 * buffers 762 * @mapping: the mapping which wants those buffers written 763 * 764 * Starts I/O against the buffers at mapping->private_list, and waits upon 765 * that I/O. 766 * 767 * Basically, this is a convenience function for fsync(). 768 * @mapping is a file or directory which needs those buffers to be written for 769 * a successful fsync(). 770 */ 771 int sync_mapping_buffers(struct address_space *mapping) 772 { 773 struct address_space *buffer_mapping = mapping->assoc_mapping; 774 775 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 776 return 0; 777 778 return fsync_buffers_list(&buffer_mapping->private_lock, 779 &mapping->private_list); 780 } 781 EXPORT_SYMBOL(sync_mapping_buffers); 782 783 /* 784 * Called when we've recently written block `bblock', and it is known that 785 * `bblock' was for a buffer_boundary() buffer. This means that the block at 786 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 787 * dirty, schedule it for IO. So that indirects merge nicely with their data. 788 */ 789 void write_boundary_block(struct block_device *bdev, 790 sector_t bblock, unsigned blocksize) 791 { 792 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 793 if (bh) { 794 if (buffer_dirty(bh)) 795 ll_rw_block(WRITE, 1, &bh); 796 put_bh(bh); 797 } 798 } 799 800 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 801 { 802 struct address_space *mapping = inode->i_mapping; 803 struct address_space *buffer_mapping = bh->b_page->mapping; 804 805 mark_buffer_dirty(bh); 806 if (!mapping->assoc_mapping) { 807 mapping->assoc_mapping = buffer_mapping; 808 } else { 809 if (mapping->assoc_mapping != buffer_mapping) 810 BUG(); 811 } 812 if (list_empty(&bh->b_assoc_buffers)) { 813 spin_lock(&buffer_mapping->private_lock); 814 list_move_tail(&bh->b_assoc_buffers, 815 &mapping->private_list); 816 spin_unlock(&buffer_mapping->private_lock); 817 } 818 } 819 EXPORT_SYMBOL(mark_buffer_dirty_inode); 820 821 /* 822 * Add a page to the dirty page list. 823 * 824 * It is a sad fact of life that this function is called from several places 825 * deeply under spinlocking. It may not sleep. 826 * 827 * If the page has buffers, the uptodate buffers are set dirty, to preserve 828 * dirty-state coherency between the page and the buffers. It the page does 829 * not have buffers then when they are later attached they will all be set 830 * dirty. 831 * 832 * The buffers are dirtied before the page is dirtied. There's a small race 833 * window in which a writepage caller may see the page cleanness but not the 834 * buffer dirtiness. That's fine. If this code were to set the page dirty 835 * before the buffers, a concurrent writepage caller could clear the page dirty 836 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 837 * page on the dirty page list. 838 * 839 * We use private_lock to lock against try_to_free_buffers while using the 840 * page's buffer list. Also use this to protect against clean buffers being 841 * added to the page after it was set dirty. 842 * 843 * FIXME: may need to call ->reservepage here as well. That's rather up to the 844 * address_space though. 845 */ 846 int __set_page_dirty_buffers(struct page *page) 847 { 848 struct address_space * const mapping = page->mapping; 849 850 spin_lock(&mapping->private_lock); 851 if (page_has_buffers(page)) { 852 struct buffer_head *head = page_buffers(page); 853 struct buffer_head *bh = head; 854 855 do { 856 set_buffer_dirty(bh); 857 bh = bh->b_this_page; 858 } while (bh != head); 859 } 860 spin_unlock(&mapping->private_lock); 861 862 if (!TestSetPageDirty(page)) { 863 write_lock_irq(&mapping->tree_lock); 864 if (page->mapping) { /* Race with truncate? */ 865 if (mapping_cap_account_dirty(mapping)) 866 inc_page_state(nr_dirty); 867 radix_tree_tag_set(&mapping->page_tree, 868 page_index(page), 869 PAGECACHE_TAG_DIRTY); 870 } 871 write_unlock_irq(&mapping->tree_lock); 872 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 873 } 874 875 return 0; 876 } 877 EXPORT_SYMBOL(__set_page_dirty_buffers); 878 879 /* 880 * Write out and wait upon a list of buffers. 881 * 882 * We have conflicting pressures: we want to make sure that all 883 * initially dirty buffers get waited on, but that any subsequently 884 * dirtied buffers don't. After all, we don't want fsync to last 885 * forever if somebody is actively writing to the file. 886 * 887 * Do this in two main stages: first we copy dirty buffers to a 888 * temporary inode list, queueing the writes as we go. Then we clean 889 * up, waiting for those writes to complete. 890 * 891 * During this second stage, any subsequent updates to the file may end 892 * up refiling the buffer on the original inode's dirty list again, so 893 * there is a chance we will end up with a buffer queued for write but 894 * not yet completed on that list. So, as a final cleanup we go through 895 * the osync code to catch these locked, dirty buffers without requeuing 896 * any newly dirty buffers for write. 897 */ 898 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 899 { 900 struct buffer_head *bh; 901 struct list_head tmp; 902 int err = 0, err2; 903 904 INIT_LIST_HEAD(&tmp); 905 906 spin_lock(lock); 907 while (!list_empty(list)) { 908 bh = BH_ENTRY(list->next); 909 list_del_init(&bh->b_assoc_buffers); 910 if (buffer_dirty(bh) || buffer_locked(bh)) { 911 list_add(&bh->b_assoc_buffers, &tmp); 912 if (buffer_dirty(bh)) { 913 get_bh(bh); 914 spin_unlock(lock); 915 /* 916 * Ensure any pending I/O completes so that 917 * ll_rw_block() actually writes the current 918 * contents - it is a noop if I/O is still in 919 * flight on potentially older contents. 920 */ 921 ll_rw_block(SWRITE, 1, &bh); 922 brelse(bh); 923 spin_lock(lock); 924 } 925 } 926 } 927 928 while (!list_empty(&tmp)) { 929 bh = BH_ENTRY(tmp.prev); 930 __remove_assoc_queue(bh); 931 get_bh(bh); 932 spin_unlock(lock); 933 wait_on_buffer(bh); 934 if (!buffer_uptodate(bh)) 935 err = -EIO; 936 brelse(bh); 937 spin_lock(lock); 938 } 939 940 spin_unlock(lock); 941 err2 = osync_buffers_list(lock, list); 942 if (err) 943 return err; 944 else 945 return err2; 946 } 947 948 /* 949 * Invalidate any and all dirty buffers on a given inode. We are 950 * probably unmounting the fs, but that doesn't mean we have already 951 * done a sync(). Just drop the buffers from the inode list. 952 * 953 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 954 * assumes that all the buffers are against the blockdev. Not true 955 * for reiserfs. 956 */ 957 void invalidate_inode_buffers(struct inode *inode) 958 { 959 if (inode_has_buffers(inode)) { 960 struct address_space *mapping = &inode->i_data; 961 struct list_head *list = &mapping->private_list; 962 struct address_space *buffer_mapping = mapping->assoc_mapping; 963 964 spin_lock(&buffer_mapping->private_lock); 965 while (!list_empty(list)) 966 __remove_assoc_queue(BH_ENTRY(list->next)); 967 spin_unlock(&buffer_mapping->private_lock); 968 } 969 } 970 971 /* 972 * Remove any clean buffers from the inode's buffer list. This is called 973 * when we're trying to free the inode itself. Those buffers can pin it. 974 * 975 * Returns true if all buffers were removed. 976 */ 977 int remove_inode_buffers(struct inode *inode) 978 { 979 int ret = 1; 980 981 if (inode_has_buffers(inode)) { 982 struct address_space *mapping = &inode->i_data; 983 struct list_head *list = &mapping->private_list; 984 struct address_space *buffer_mapping = mapping->assoc_mapping; 985 986 spin_lock(&buffer_mapping->private_lock); 987 while (!list_empty(list)) { 988 struct buffer_head *bh = BH_ENTRY(list->next); 989 if (buffer_dirty(bh)) { 990 ret = 0; 991 break; 992 } 993 __remove_assoc_queue(bh); 994 } 995 spin_unlock(&buffer_mapping->private_lock); 996 } 997 return ret; 998 } 999 1000 /* 1001 * Create the appropriate buffers when given a page for data area and 1002 * the size of each buffer.. Use the bh->b_this_page linked list to 1003 * follow the buffers created. Return NULL if unable to create more 1004 * buffers. 1005 * 1006 * The retry flag is used to differentiate async IO (paging, swapping) 1007 * which may not fail from ordinary buffer allocations. 1008 */ 1009 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 1010 int retry) 1011 { 1012 struct buffer_head *bh, *head; 1013 long offset; 1014 1015 try_again: 1016 head = NULL; 1017 offset = PAGE_SIZE; 1018 while ((offset -= size) >= 0) { 1019 bh = alloc_buffer_head(GFP_NOFS); 1020 if (!bh) 1021 goto no_grow; 1022 1023 bh->b_bdev = NULL; 1024 bh->b_this_page = head; 1025 bh->b_blocknr = -1; 1026 head = bh; 1027 1028 bh->b_state = 0; 1029 atomic_set(&bh->b_count, 0); 1030 bh->b_size = size; 1031 1032 /* Link the buffer to its page */ 1033 set_bh_page(bh, page, offset); 1034 1035 bh->b_end_io = NULL; 1036 } 1037 return head; 1038 /* 1039 * In case anything failed, we just free everything we got. 1040 */ 1041 no_grow: 1042 if (head) { 1043 do { 1044 bh = head; 1045 head = head->b_this_page; 1046 free_buffer_head(bh); 1047 } while (head); 1048 } 1049 1050 /* 1051 * Return failure for non-async IO requests. Async IO requests 1052 * are not allowed to fail, so we have to wait until buffer heads 1053 * become available. But we don't want tasks sleeping with 1054 * partially complete buffers, so all were released above. 1055 */ 1056 if (!retry) 1057 return NULL; 1058 1059 /* We're _really_ low on memory. Now we just 1060 * wait for old buffer heads to become free due to 1061 * finishing IO. Since this is an async request and 1062 * the reserve list is empty, we're sure there are 1063 * async buffer heads in use. 1064 */ 1065 free_more_memory(); 1066 goto try_again; 1067 } 1068 EXPORT_SYMBOL_GPL(alloc_page_buffers); 1069 1070 static inline void 1071 link_dev_buffers(struct page *page, struct buffer_head *head) 1072 { 1073 struct buffer_head *bh, *tail; 1074 1075 bh = head; 1076 do { 1077 tail = bh; 1078 bh = bh->b_this_page; 1079 } while (bh); 1080 tail->b_this_page = head; 1081 attach_page_buffers(page, head); 1082 } 1083 1084 /* 1085 * Initialise the state of a blockdev page's buffers. 1086 */ 1087 static void 1088 init_page_buffers(struct page *page, struct block_device *bdev, 1089 sector_t block, int size) 1090 { 1091 struct buffer_head *head = page_buffers(page); 1092 struct buffer_head *bh = head; 1093 int uptodate = PageUptodate(page); 1094 1095 do { 1096 if (!buffer_mapped(bh)) { 1097 init_buffer(bh, NULL, NULL); 1098 bh->b_bdev = bdev; 1099 bh->b_blocknr = block; 1100 if (uptodate) 1101 set_buffer_uptodate(bh); 1102 set_buffer_mapped(bh); 1103 } 1104 block++; 1105 bh = bh->b_this_page; 1106 } while (bh != head); 1107 } 1108 1109 /* 1110 * Create the page-cache page that contains the requested block. 1111 * 1112 * This is user purely for blockdev mappings. 1113 */ 1114 static struct page * 1115 grow_dev_page(struct block_device *bdev, sector_t block, 1116 pgoff_t index, int size) 1117 { 1118 struct inode *inode = bdev->bd_inode; 1119 struct page *page; 1120 struct buffer_head *bh; 1121 1122 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 1123 if (!page) 1124 return NULL; 1125 1126 if (!PageLocked(page)) 1127 BUG(); 1128 1129 if (page_has_buffers(page)) { 1130 bh = page_buffers(page); 1131 if (bh->b_size == size) { 1132 init_page_buffers(page, bdev, block, size); 1133 return page; 1134 } 1135 if (!try_to_free_buffers(page)) 1136 goto failed; 1137 } 1138 1139 /* 1140 * Allocate some buffers for this page 1141 */ 1142 bh = alloc_page_buffers(page, size, 0); 1143 if (!bh) 1144 goto failed; 1145 1146 /* 1147 * Link the page to the buffers and initialise them. Take the 1148 * lock to be atomic wrt __find_get_block(), which does not 1149 * run under the page lock. 1150 */ 1151 spin_lock(&inode->i_mapping->private_lock); 1152 link_dev_buffers(page, bh); 1153 init_page_buffers(page, bdev, block, size); 1154 spin_unlock(&inode->i_mapping->private_lock); 1155 return page; 1156 1157 failed: 1158 BUG(); 1159 unlock_page(page); 1160 page_cache_release(page); 1161 return NULL; 1162 } 1163 1164 /* 1165 * Create buffers for the specified block device block's page. If 1166 * that page was dirty, the buffers are set dirty also. 1167 * 1168 * Except that's a bug. Attaching dirty buffers to a dirty 1169 * blockdev's page can result in filesystem corruption, because 1170 * some of those buffers may be aliases of filesystem data. 1171 * grow_dev_page() will go BUG() if this happens. 1172 */ 1173 static inline int 1174 grow_buffers(struct block_device *bdev, sector_t block, int size) 1175 { 1176 struct page *page; 1177 pgoff_t index; 1178 int sizebits; 1179 1180 sizebits = -1; 1181 do { 1182 sizebits++; 1183 } while ((size << sizebits) < PAGE_SIZE); 1184 1185 index = block >> sizebits; 1186 block = index << sizebits; 1187 1188 /* Create a page with the proper size buffers.. */ 1189 page = grow_dev_page(bdev, block, index, size); 1190 if (!page) 1191 return 0; 1192 unlock_page(page); 1193 page_cache_release(page); 1194 return 1; 1195 } 1196 1197 static struct buffer_head * 1198 __getblk_slow(struct block_device *bdev, sector_t block, int size) 1199 { 1200 /* Size must be multiple of hard sectorsize */ 1201 if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1202 (size < 512 || size > PAGE_SIZE))) { 1203 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1204 size); 1205 printk(KERN_ERR "hardsect size: %d\n", 1206 bdev_hardsect_size(bdev)); 1207 1208 dump_stack(); 1209 return NULL; 1210 } 1211 1212 for (;;) { 1213 struct buffer_head * bh; 1214 1215 bh = __find_get_block(bdev, block, size); 1216 if (bh) 1217 return bh; 1218 1219 if (!grow_buffers(bdev, block, size)) 1220 free_more_memory(); 1221 } 1222 } 1223 1224 /* 1225 * The relationship between dirty buffers and dirty pages: 1226 * 1227 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1228 * the page is tagged dirty in its radix tree. 1229 * 1230 * At all times, the dirtiness of the buffers represents the dirtiness of 1231 * subsections of the page. If the page has buffers, the page dirty bit is 1232 * merely a hint about the true dirty state. 1233 * 1234 * When a page is set dirty in its entirety, all its buffers are marked dirty 1235 * (if the page has buffers). 1236 * 1237 * When a buffer is marked dirty, its page is dirtied, but the page's other 1238 * buffers are not. 1239 * 1240 * Also. When blockdev buffers are explicitly read with bread(), they 1241 * individually become uptodate. But their backing page remains not 1242 * uptodate - even if all of its buffers are uptodate. A subsequent 1243 * block_read_full_page() against that page will discover all the uptodate 1244 * buffers, will set the page uptodate and will perform no I/O. 1245 */ 1246 1247 /** 1248 * mark_buffer_dirty - mark a buffer_head as needing writeout 1249 * @bh: the buffer_head to mark dirty 1250 * 1251 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 1252 * backing page dirty, then tag the page as dirty in its address_space's radix 1253 * tree and then attach the address_space's inode to its superblock's dirty 1254 * inode list. 1255 * 1256 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1257 * mapping->tree_lock and the global inode_lock. 1258 */ 1259 void fastcall mark_buffer_dirty(struct buffer_head *bh) 1260 { 1261 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1262 __set_page_dirty_nobuffers(bh->b_page); 1263 } 1264 1265 /* 1266 * Decrement a buffer_head's reference count. If all buffers against a page 1267 * have zero reference count, are clean and unlocked, and if the page is clean 1268 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1269 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1270 * a page but it ends up not being freed, and buffers may later be reattached). 1271 */ 1272 void __brelse(struct buffer_head * buf) 1273 { 1274 if (atomic_read(&buf->b_count)) { 1275 put_bh(buf); 1276 return; 1277 } 1278 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1279 WARN_ON(1); 1280 } 1281 1282 /* 1283 * bforget() is like brelse(), except it discards any 1284 * potentially dirty data. 1285 */ 1286 void __bforget(struct buffer_head *bh) 1287 { 1288 clear_buffer_dirty(bh); 1289 if (!list_empty(&bh->b_assoc_buffers)) { 1290 struct address_space *buffer_mapping = bh->b_page->mapping; 1291 1292 spin_lock(&buffer_mapping->private_lock); 1293 list_del_init(&bh->b_assoc_buffers); 1294 spin_unlock(&buffer_mapping->private_lock); 1295 } 1296 __brelse(bh); 1297 } 1298 1299 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1300 { 1301 lock_buffer(bh); 1302 if (buffer_uptodate(bh)) { 1303 unlock_buffer(bh); 1304 return bh; 1305 } else { 1306 get_bh(bh); 1307 bh->b_end_io = end_buffer_read_sync; 1308 submit_bh(READ, bh); 1309 wait_on_buffer(bh); 1310 if (buffer_uptodate(bh)) 1311 return bh; 1312 } 1313 brelse(bh); 1314 return NULL; 1315 } 1316 1317 /* 1318 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1319 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1320 * refcount elevated by one when they're in an LRU. A buffer can only appear 1321 * once in a particular CPU's LRU. A single buffer can be present in multiple 1322 * CPU's LRUs at the same time. 1323 * 1324 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1325 * sb_find_get_block(). 1326 * 1327 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1328 * a local interrupt disable for that. 1329 */ 1330 1331 #define BH_LRU_SIZE 8 1332 1333 struct bh_lru { 1334 struct buffer_head *bhs[BH_LRU_SIZE]; 1335 }; 1336 1337 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1338 1339 #ifdef CONFIG_SMP 1340 #define bh_lru_lock() local_irq_disable() 1341 #define bh_lru_unlock() local_irq_enable() 1342 #else 1343 #define bh_lru_lock() preempt_disable() 1344 #define bh_lru_unlock() preempt_enable() 1345 #endif 1346 1347 static inline void check_irqs_on(void) 1348 { 1349 #ifdef irqs_disabled 1350 BUG_ON(irqs_disabled()); 1351 #endif 1352 } 1353 1354 /* 1355 * The LRU management algorithm is dopey-but-simple. Sorry. 1356 */ 1357 static void bh_lru_install(struct buffer_head *bh) 1358 { 1359 struct buffer_head *evictee = NULL; 1360 struct bh_lru *lru; 1361 1362 check_irqs_on(); 1363 bh_lru_lock(); 1364 lru = &__get_cpu_var(bh_lrus); 1365 if (lru->bhs[0] != bh) { 1366 struct buffer_head *bhs[BH_LRU_SIZE]; 1367 int in; 1368 int out = 0; 1369 1370 get_bh(bh); 1371 bhs[out++] = bh; 1372 for (in = 0; in < BH_LRU_SIZE; in++) { 1373 struct buffer_head *bh2 = lru->bhs[in]; 1374 1375 if (bh2 == bh) { 1376 __brelse(bh2); 1377 } else { 1378 if (out >= BH_LRU_SIZE) { 1379 BUG_ON(evictee != NULL); 1380 evictee = bh2; 1381 } else { 1382 bhs[out++] = bh2; 1383 } 1384 } 1385 } 1386 while (out < BH_LRU_SIZE) 1387 bhs[out++] = NULL; 1388 memcpy(lru->bhs, bhs, sizeof(bhs)); 1389 } 1390 bh_lru_unlock(); 1391 1392 if (evictee) 1393 __brelse(evictee); 1394 } 1395 1396 /* 1397 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1398 */ 1399 static inline struct buffer_head * 1400 lookup_bh_lru(struct block_device *bdev, sector_t block, int size) 1401 { 1402 struct buffer_head *ret = NULL; 1403 struct bh_lru *lru; 1404 int i; 1405 1406 check_irqs_on(); 1407 bh_lru_lock(); 1408 lru = &__get_cpu_var(bh_lrus); 1409 for (i = 0; i < BH_LRU_SIZE; i++) { 1410 struct buffer_head *bh = lru->bhs[i]; 1411 1412 if (bh && bh->b_bdev == bdev && 1413 bh->b_blocknr == block && bh->b_size == size) { 1414 if (i) { 1415 while (i) { 1416 lru->bhs[i] = lru->bhs[i - 1]; 1417 i--; 1418 } 1419 lru->bhs[0] = bh; 1420 } 1421 get_bh(bh); 1422 ret = bh; 1423 break; 1424 } 1425 } 1426 bh_lru_unlock(); 1427 return ret; 1428 } 1429 1430 /* 1431 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1432 * it in the LRU and mark it as accessed. If it is not present then return 1433 * NULL 1434 */ 1435 struct buffer_head * 1436 __find_get_block(struct block_device *bdev, sector_t block, int size) 1437 { 1438 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1439 1440 if (bh == NULL) { 1441 bh = __find_get_block_slow(bdev, block, size); 1442 if (bh) 1443 bh_lru_install(bh); 1444 } 1445 if (bh) 1446 touch_buffer(bh); 1447 return bh; 1448 } 1449 EXPORT_SYMBOL(__find_get_block); 1450 1451 /* 1452 * __getblk will locate (and, if necessary, create) the buffer_head 1453 * which corresponds to the passed block_device, block and size. The 1454 * returned buffer has its reference count incremented. 1455 * 1456 * __getblk() cannot fail - it just keeps trying. If you pass it an 1457 * illegal block number, __getblk() will happily return a buffer_head 1458 * which represents the non-existent block. Very weird. 1459 * 1460 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1461 * attempt is failing. FIXME, perhaps? 1462 */ 1463 struct buffer_head * 1464 __getblk(struct block_device *bdev, sector_t block, int size) 1465 { 1466 struct buffer_head *bh = __find_get_block(bdev, block, size); 1467 1468 might_sleep(); 1469 if (bh == NULL) 1470 bh = __getblk_slow(bdev, block, size); 1471 return bh; 1472 } 1473 EXPORT_SYMBOL(__getblk); 1474 1475 /* 1476 * Do async read-ahead on a buffer.. 1477 */ 1478 void __breadahead(struct block_device *bdev, sector_t block, int size) 1479 { 1480 struct buffer_head *bh = __getblk(bdev, block, size); 1481 if (likely(bh)) { 1482 ll_rw_block(READA, 1, &bh); 1483 brelse(bh); 1484 } 1485 } 1486 EXPORT_SYMBOL(__breadahead); 1487 1488 /** 1489 * __bread() - reads a specified block and returns the bh 1490 * @bdev: the block_device to read from 1491 * @block: number of block 1492 * @size: size (in bytes) to read 1493 * 1494 * Reads a specified block, and returns buffer head that contains it. 1495 * It returns NULL if the block was unreadable. 1496 */ 1497 struct buffer_head * 1498 __bread(struct block_device *bdev, sector_t block, int size) 1499 { 1500 struct buffer_head *bh = __getblk(bdev, block, size); 1501 1502 if (likely(bh) && !buffer_uptodate(bh)) 1503 bh = __bread_slow(bh); 1504 return bh; 1505 } 1506 EXPORT_SYMBOL(__bread); 1507 1508 /* 1509 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1510 * This doesn't race because it runs in each cpu either in irq 1511 * or with preempt disabled. 1512 */ 1513 static void invalidate_bh_lru(void *arg) 1514 { 1515 struct bh_lru *b = &get_cpu_var(bh_lrus); 1516 int i; 1517 1518 for (i = 0; i < BH_LRU_SIZE; i++) { 1519 brelse(b->bhs[i]); 1520 b->bhs[i] = NULL; 1521 } 1522 put_cpu_var(bh_lrus); 1523 } 1524 1525 static void invalidate_bh_lrus(void) 1526 { 1527 on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 1528 } 1529 1530 void set_bh_page(struct buffer_head *bh, 1531 struct page *page, unsigned long offset) 1532 { 1533 bh->b_page = page; 1534 if (offset >= PAGE_SIZE) 1535 BUG(); 1536 if (PageHighMem(page)) 1537 /* 1538 * This catches illegal uses and preserves the offset: 1539 */ 1540 bh->b_data = (char *)(0 + offset); 1541 else 1542 bh->b_data = page_address(page) + offset; 1543 } 1544 EXPORT_SYMBOL(set_bh_page); 1545 1546 /* 1547 * Called when truncating a buffer on a page completely. 1548 */ 1549 static inline void discard_buffer(struct buffer_head * bh) 1550 { 1551 lock_buffer(bh); 1552 clear_buffer_dirty(bh); 1553 bh->b_bdev = NULL; 1554 clear_buffer_mapped(bh); 1555 clear_buffer_req(bh); 1556 clear_buffer_new(bh); 1557 clear_buffer_delay(bh); 1558 unlock_buffer(bh); 1559 } 1560 1561 /** 1562 * try_to_release_page() - release old fs-specific metadata on a page 1563 * 1564 * @page: the page which the kernel is trying to free 1565 * @gfp_mask: memory allocation flags (and I/O mode) 1566 * 1567 * The address_space is to try to release any data against the page 1568 * (presumably at page->private). If the release was successful, return `1'. 1569 * Otherwise return zero. 1570 * 1571 * The @gfp_mask argument specifies whether I/O may be performed to release 1572 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). 1573 * 1574 * NOTE: @gfp_mask may go away, and this function may become non-blocking. 1575 */ 1576 int try_to_release_page(struct page *page, gfp_t gfp_mask) 1577 { 1578 struct address_space * const mapping = page->mapping; 1579 1580 BUG_ON(!PageLocked(page)); 1581 if (PageWriteback(page)) 1582 return 0; 1583 1584 if (mapping && mapping->a_ops->releasepage) 1585 return mapping->a_ops->releasepage(page, gfp_mask); 1586 return try_to_free_buffers(page); 1587 } 1588 EXPORT_SYMBOL(try_to_release_page); 1589 1590 /** 1591 * block_invalidatepage - invalidate part of all of a buffer-backed page 1592 * 1593 * @page: the page which is affected 1594 * @offset: the index of the truncation point 1595 * 1596 * block_invalidatepage() is called when all or part of the page has become 1597 * invalidatedby a truncate operation. 1598 * 1599 * block_invalidatepage() does not have to release all buffers, but it must 1600 * ensure that no dirty buffer is left outside @offset and that no I/O 1601 * is underway against any of the blocks which are outside the truncation 1602 * point. Because the caller is about to free (and possibly reuse) those 1603 * blocks on-disk. 1604 */ 1605 int block_invalidatepage(struct page *page, unsigned long offset) 1606 { 1607 struct buffer_head *head, *bh, *next; 1608 unsigned int curr_off = 0; 1609 int ret = 1; 1610 1611 BUG_ON(!PageLocked(page)); 1612 if (!page_has_buffers(page)) 1613 goto out; 1614 1615 head = page_buffers(page); 1616 bh = head; 1617 do { 1618 unsigned int next_off = curr_off + bh->b_size; 1619 next = bh->b_this_page; 1620 1621 /* 1622 * is this block fully invalidated? 1623 */ 1624 if (offset <= curr_off) 1625 discard_buffer(bh); 1626 curr_off = next_off; 1627 bh = next; 1628 } while (bh != head); 1629 1630 /* 1631 * We release buffers only if the entire page is being invalidated. 1632 * The get_block cached value has been unconditionally invalidated, 1633 * so real IO is not possible anymore. 1634 */ 1635 if (offset == 0) 1636 ret = try_to_release_page(page, 0); 1637 out: 1638 return ret; 1639 } 1640 EXPORT_SYMBOL(block_invalidatepage); 1641 1642 int do_invalidatepage(struct page *page, unsigned long offset) 1643 { 1644 int (*invalidatepage)(struct page *, unsigned long); 1645 invalidatepage = page->mapping->a_ops->invalidatepage; 1646 if (invalidatepage == NULL) 1647 invalidatepage = block_invalidatepage; 1648 return (*invalidatepage)(page, offset); 1649 } 1650 1651 /* 1652 * We attach and possibly dirty the buffers atomically wrt 1653 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1654 * is already excluded via the page lock. 1655 */ 1656 void create_empty_buffers(struct page *page, 1657 unsigned long blocksize, unsigned long b_state) 1658 { 1659 struct buffer_head *bh, *head, *tail; 1660 1661 head = alloc_page_buffers(page, blocksize, 1); 1662 bh = head; 1663 do { 1664 bh->b_state |= b_state; 1665 tail = bh; 1666 bh = bh->b_this_page; 1667 } while (bh); 1668 tail->b_this_page = head; 1669 1670 spin_lock(&page->mapping->private_lock); 1671 if (PageUptodate(page) || PageDirty(page)) { 1672 bh = head; 1673 do { 1674 if (PageDirty(page)) 1675 set_buffer_dirty(bh); 1676 if (PageUptodate(page)) 1677 set_buffer_uptodate(bh); 1678 bh = bh->b_this_page; 1679 } while (bh != head); 1680 } 1681 attach_page_buffers(page, head); 1682 spin_unlock(&page->mapping->private_lock); 1683 } 1684 EXPORT_SYMBOL(create_empty_buffers); 1685 1686 /* 1687 * We are taking a block for data and we don't want any output from any 1688 * buffer-cache aliases starting from return from that function and 1689 * until the moment when something will explicitly mark the buffer 1690 * dirty (hopefully that will not happen until we will free that block ;-) 1691 * We don't even need to mark it not-uptodate - nobody can expect 1692 * anything from a newly allocated buffer anyway. We used to used 1693 * unmap_buffer() for such invalidation, but that was wrong. We definitely 1694 * don't want to mark the alias unmapped, for example - it would confuse 1695 * anyone who might pick it with bread() afterwards... 1696 * 1697 * Also.. Note that bforget() doesn't lock the buffer. So there can 1698 * be writeout I/O going on against recently-freed buffers. We don't 1699 * wait on that I/O in bforget() - it's more efficient to wait on the I/O 1700 * only if we really need to. That happens here. 1701 */ 1702 void unmap_underlying_metadata(struct block_device *bdev, sector_t block) 1703 { 1704 struct buffer_head *old_bh; 1705 1706 might_sleep(); 1707 1708 old_bh = __find_get_block_slow(bdev, block, 0); 1709 if (old_bh) { 1710 clear_buffer_dirty(old_bh); 1711 wait_on_buffer(old_bh); 1712 clear_buffer_req(old_bh); 1713 __brelse(old_bh); 1714 } 1715 } 1716 EXPORT_SYMBOL(unmap_underlying_metadata); 1717 1718 /* 1719 * NOTE! All mapped/uptodate combinations are valid: 1720 * 1721 * Mapped Uptodate Meaning 1722 * 1723 * No No "unknown" - must do get_block() 1724 * No Yes "hole" - zero-filled 1725 * Yes No "allocated" - allocated on disk, not read in 1726 * Yes Yes "valid" - allocated and up-to-date in memory. 1727 * 1728 * "Dirty" is valid only with the last case (mapped+uptodate). 1729 */ 1730 1731 /* 1732 * While block_write_full_page is writing back the dirty buffers under 1733 * the page lock, whoever dirtied the buffers may decide to clean them 1734 * again at any time. We handle that by only looking at the buffer 1735 * state inside lock_buffer(). 1736 * 1737 * If block_write_full_page() is called for regular writeback 1738 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1739 * locked buffer. This only can happen if someone has written the buffer 1740 * directly, with submit_bh(). At the address_space level PageWriteback 1741 * prevents this contention from occurring. 1742 */ 1743 static int __block_write_full_page(struct inode *inode, struct page *page, 1744 get_block_t *get_block, struct writeback_control *wbc) 1745 { 1746 int err; 1747 sector_t block; 1748 sector_t last_block; 1749 struct buffer_head *bh, *head; 1750 int nr_underway = 0; 1751 1752 BUG_ON(!PageLocked(page)); 1753 1754 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 1755 1756 if (!page_has_buffers(page)) { 1757 create_empty_buffers(page, 1 << inode->i_blkbits, 1758 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1759 } 1760 1761 /* 1762 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1763 * here, and the (potentially unmapped) buffers may become dirty at 1764 * any time. If a buffer becomes dirty here after we've inspected it 1765 * then we just miss that fact, and the page stays dirty. 1766 * 1767 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 1768 * handle that here by just cleaning them. 1769 */ 1770 1771 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1772 head = page_buffers(page); 1773 bh = head; 1774 1775 /* 1776 * Get all the dirty buffers mapped to disk addresses and 1777 * handle any aliases from the underlying blockdev's mapping. 1778 */ 1779 do { 1780 if (block > last_block) { 1781 /* 1782 * mapped buffers outside i_size will occur, because 1783 * this page can be outside i_size when there is a 1784 * truncate in progress. 1785 */ 1786 /* 1787 * The buffer was zeroed by block_write_full_page() 1788 */ 1789 clear_buffer_dirty(bh); 1790 set_buffer_uptodate(bh); 1791 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1792 err = get_block(inode, block, bh, 1); 1793 if (err) 1794 goto recover; 1795 if (buffer_new(bh)) { 1796 /* blockdev mappings never come here */ 1797 clear_buffer_new(bh); 1798 unmap_underlying_metadata(bh->b_bdev, 1799 bh->b_blocknr); 1800 } 1801 } 1802 bh = bh->b_this_page; 1803 block++; 1804 } while (bh != head); 1805 1806 do { 1807 if (!buffer_mapped(bh)) 1808 continue; 1809 /* 1810 * If it's a fully non-blocking write attempt and we cannot 1811 * lock the buffer then redirty the page. Note that this can 1812 * potentially cause a busy-wait loop from pdflush and kswapd 1813 * activity, but those code paths have their own higher-level 1814 * throttling. 1815 */ 1816 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1817 lock_buffer(bh); 1818 } else if (test_set_buffer_locked(bh)) { 1819 redirty_page_for_writepage(wbc, page); 1820 continue; 1821 } 1822 if (test_clear_buffer_dirty(bh)) { 1823 mark_buffer_async_write(bh); 1824 } else { 1825 unlock_buffer(bh); 1826 } 1827 } while ((bh = bh->b_this_page) != head); 1828 1829 /* 1830 * The page and its buffers are protected by PageWriteback(), so we can 1831 * drop the bh refcounts early. 1832 */ 1833 BUG_ON(PageWriteback(page)); 1834 set_page_writeback(page); 1835 1836 do { 1837 struct buffer_head *next = bh->b_this_page; 1838 if (buffer_async_write(bh)) { 1839 submit_bh(WRITE, bh); 1840 nr_underway++; 1841 } 1842 bh = next; 1843 } while (bh != head); 1844 unlock_page(page); 1845 1846 err = 0; 1847 done: 1848 if (nr_underway == 0) { 1849 /* 1850 * The page was marked dirty, but the buffers were 1851 * clean. Someone wrote them back by hand with 1852 * ll_rw_block/submit_bh. A rare case. 1853 */ 1854 int uptodate = 1; 1855 do { 1856 if (!buffer_uptodate(bh)) { 1857 uptodate = 0; 1858 break; 1859 } 1860 bh = bh->b_this_page; 1861 } while (bh != head); 1862 if (uptodate) 1863 SetPageUptodate(page); 1864 end_page_writeback(page); 1865 /* 1866 * The page and buffer_heads can be released at any time from 1867 * here on. 1868 */ 1869 wbc->pages_skipped++; /* We didn't write this page */ 1870 } 1871 return err; 1872 1873 recover: 1874 /* 1875 * ENOSPC, or some other error. We may already have added some 1876 * blocks to the file, so we need to write these out to avoid 1877 * exposing stale data. 1878 * The page is currently locked and not marked for writeback 1879 */ 1880 bh = head; 1881 /* Recovery: lock and submit the mapped buffers */ 1882 do { 1883 if (buffer_mapped(bh) && buffer_dirty(bh)) { 1884 lock_buffer(bh); 1885 mark_buffer_async_write(bh); 1886 } else { 1887 /* 1888 * The buffer may have been set dirty during 1889 * attachment to a dirty page. 1890 */ 1891 clear_buffer_dirty(bh); 1892 } 1893 } while ((bh = bh->b_this_page) != head); 1894 SetPageError(page); 1895 BUG_ON(PageWriteback(page)); 1896 set_page_writeback(page); 1897 unlock_page(page); 1898 do { 1899 struct buffer_head *next = bh->b_this_page; 1900 if (buffer_async_write(bh)) { 1901 clear_buffer_dirty(bh); 1902 submit_bh(WRITE, bh); 1903 nr_underway++; 1904 } 1905 bh = next; 1906 } while (bh != head); 1907 goto done; 1908 } 1909 1910 static int __block_prepare_write(struct inode *inode, struct page *page, 1911 unsigned from, unsigned to, get_block_t *get_block) 1912 { 1913 unsigned block_start, block_end; 1914 sector_t block; 1915 int err = 0; 1916 unsigned blocksize, bbits; 1917 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1918 1919 BUG_ON(!PageLocked(page)); 1920 BUG_ON(from > PAGE_CACHE_SIZE); 1921 BUG_ON(to > PAGE_CACHE_SIZE); 1922 BUG_ON(from > to); 1923 1924 blocksize = 1 << inode->i_blkbits; 1925 if (!page_has_buffers(page)) 1926 create_empty_buffers(page, blocksize, 0); 1927 head = page_buffers(page); 1928 1929 bbits = inode->i_blkbits; 1930 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 1931 1932 for(bh = head, block_start = 0; bh != head || !block_start; 1933 block++, block_start=block_end, bh = bh->b_this_page) { 1934 block_end = block_start + blocksize; 1935 if (block_end <= from || block_start >= to) { 1936 if (PageUptodate(page)) { 1937 if (!buffer_uptodate(bh)) 1938 set_buffer_uptodate(bh); 1939 } 1940 continue; 1941 } 1942 if (buffer_new(bh)) 1943 clear_buffer_new(bh); 1944 if (!buffer_mapped(bh)) { 1945 err = get_block(inode, block, bh, 1); 1946 if (err) 1947 break; 1948 if (buffer_new(bh)) { 1949 unmap_underlying_metadata(bh->b_bdev, 1950 bh->b_blocknr); 1951 if (PageUptodate(page)) { 1952 set_buffer_uptodate(bh); 1953 continue; 1954 } 1955 if (block_end > to || block_start < from) { 1956 void *kaddr; 1957 1958 kaddr = kmap_atomic(page, KM_USER0); 1959 if (block_end > to) 1960 memset(kaddr+to, 0, 1961 block_end-to); 1962 if (block_start < from) 1963 memset(kaddr+block_start, 1964 0, from-block_start); 1965 flush_dcache_page(page); 1966 kunmap_atomic(kaddr, KM_USER0); 1967 } 1968 continue; 1969 } 1970 } 1971 if (PageUptodate(page)) { 1972 if (!buffer_uptodate(bh)) 1973 set_buffer_uptodate(bh); 1974 continue; 1975 } 1976 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1977 (block_start < from || block_end > to)) { 1978 ll_rw_block(READ, 1, &bh); 1979 *wait_bh++=bh; 1980 } 1981 } 1982 /* 1983 * If we issued read requests - let them complete. 1984 */ 1985 while(wait_bh > wait) { 1986 wait_on_buffer(*--wait_bh); 1987 if (!buffer_uptodate(*wait_bh)) 1988 err = -EIO; 1989 } 1990 if (!err) { 1991 bh = head; 1992 do { 1993 if (buffer_new(bh)) 1994 clear_buffer_new(bh); 1995 } while ((bh = bh->b_this_page) != head); 1996 return 0; 1997 } 1998 /* Error case: */ 1999 /* 2000 * Zero out any newly allocated blocks to avoid exposing stale 2001 * data. If BH_New is set, we know that the block was newly 2002 * allocated in the above loop. 2003 */ 2004 bh = head; 2005 block_start = 0; 2006 do { 2007 block_end = block_start+blocksize; 2008 if (block_end <= from) 2009 goto next_bh; 2010 if (block_start >= to) 2011 break; 2012 if (buffer_new(bh)) { 2013 void *kaddr; 2014 2015 clear_buffer_new(bh); 2016 kaddr = kmap_atomic(page, KM_USER0); 2017 memset(kaddr+block_start, 0, bh->b_size); 2018 kunmap_atomic(kaddr, KM_USER0); 2019 set_buffer_uptodate(bh); 2020 mark_buffer_dirty(bh); 2021 } 2022 next_bh: 2023 block_start = block_end; 2024 bh = bh->b_this_page; 2025 } while (bh != head); 2026 return err; 2027 } 2028 2029 static int __block_commit_write(struct inode *inode, struct page *page, 2030 unsigned from, unsigned to) 2031 { 2032 unsigned block_start, block_end; 2033 int partial = 0; 2034 unsigned blocksize; 2035 struct buffer_head *bh, *head; 2036 2037 blocksize = 1 << inode->i_blkbits; 2038 2039 for(bh = head = page_buffers(page), block_start = 0; 2040 bh != head || !block_start; 2041 block_start=block_end, bh = bh->b_this_page) { 2042 block_end = block_start + blocksize; 2043 if (block_end <= from || block_start >= to) { 2044 if (!buffer_uptodate(bh)) 2045 partial = 1; 2046 } else { 2047 set_buffer_uptodate(bh); 2048 mark_buffer_dirty(bh); 2049 } 2050 } 2051 2052 /* 2053 * If this is a partial write which happened to make all buffers 2054 * uptodate then we can optimize away a bogus readpage() for 2055 * the next read(). Here we 'discover' whether the page went 2056 * uptodate as a result of this (potentially partial) write. 2057 */ 2058 if (!partial) 2059 SetPageUptodate(page); 2060 return 0; 2061 } 2062 2063 /* 2064 * Generic "read page" function for block devices that have the normal 2065 * get_block functionality. This is most of the block device filesystems. 2066 * Reads the page asynchronously --- the unlock_buffer() and 2067 * set/clear_buffer_uptodate() functions propagate buffer state into the 2068 * page struct once IO has completed. 2069 */ 2070 int block_read_full_page(struct page *page, get_block_t *get_block) 2071 { 2072 struct inode *inode = page->mapping->host; 2073 sector_t iblock, lblock; 2074 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2075 unsigned int blocksize; 2076 int nr, i; 2077 int fully_mapped = 1; 2078 2079 BUG_ON(!PageLocked(page)); 2080 blocksize = 1 << inode->i_blkbits; 2081 if (!page_has_buffers(page)) 2082 create_empty_buffers(page, blocksize, 0); 2083 head = page_buffers(page); 2084 2085 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2086 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; 2087 bh = head; 2088 nr = 0; 2089 i = 0; 2090 2091 do { 2092 if (buffer_uptodate(bh)) 2093 continue; 2094 2095 if (!buffer_mapped(bh)) { 2096 int err = 0; 2097 2098 fully_mapped = 0; 2099 if (iblock < lblock) { 2100 err = get_block(inode, iblock, bh, 0); 2101 if (err) 2102 SetPageError(page); 2103 } 2104 if (!buffer_mapped(bh)) { 2105 void *kaddr = kmap_atomic(page, KM_USER0); 2106 memset(kaddr + i * blocksize, 0, blocksize); 2107 flush_dcache_page(page); 2108 kunmap_atomic(kaddr, KM_USER0); 2109 if (!err) 2110 set_buffer_uptodate(bh); 2111 continue; 2112 } 2113 /* 2114 * get_block() might have updated the buffer 2115 * synchronously 2116 */ 2117 if (buffer_uptodate(bh)) 2118 continue; 2119 } 2120 arr[nr++] = bh; 2121 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2122 2123 if (fully_mapped) 2124 SetPageMappedToDisk(page); 2125 2126 if (!nr) { 2127 /* 2128 * All buffers are uptodate - we can set the page uptodate 2129 * as well. But not if get_block() returned an error. 2130 */ 2131 if (!PageError(page)) 2132 SetPageUptodate(page); 2133 unlock_page(page); 2134 return 0; 2135 } 2136 2137 /* Stage two: lock the buffers */ 2138 for (i = 0; i < nr; i++) { 2139 bh = arr[i]; 2140 lock_buffer(bh); 2141 mark_buffer_async_read(bh); 2142 } 2143 2144 /* 2145 * Stage 3: start the IO. Check for uptodateness 2146 * inside the buffer lock in case another process reading 2147 * the underlying blockdev brought it uptodate (the sct fix). 2148 */ 2149 for (i = 0; i < nr; i++) { 2150 bh = arr[i]; 2151 if (buffer_uptodate(bh)) 2152 end_buffer_async_read(bh, 1); 2153 else 2154 submit_bh(READ, bh); 2155 } 2156 return 0; 2157 } 2158 2159 /* utility function for filesystems that need to do work on expanding 2160 * truncates. Uses prepare/commit_write to allow the filesystem to 2161 * deal with the hole. 2162 */ 2163 int generic_cont_expand(struct inode *inode, loff_t size) 2164 { 2165 struct address_space *mapping = inode->i_mapping; 2166 struct page *page; 2167 unsigned long index, offset, limit; 2168 int err; 2169 2170 err = -EFBIG; 2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) { 2173 send_sig(SIGXFSZ, current, 0); 2174 goto out; 2175 } 2176 if (size > inode->i_sb->s_maxbytes) 2177 goto out; 2178 2179 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ 2180 2181 /* ugh. in prepare/commit_write, if from==to==start of block, we 2182 ** skip the prepare. make sure we never send an offset for the start 2183 ** of a block 2184 */ 2185 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { 2186 offset++; 2187 } 2188 index = size >> PAGE_CACHE_SHIFT; 2189 err = -ENOMEM; 2190 page = grab_cache_page(mapping, index); 2191 if (!page) 2192 goto out; 2193 err = mapping->a_ops->prepare_write(NULL, page, offset, offset); 2194 if (!err) { 2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset); 2196 } 2197 unlock_page(page); 2198 page_cache_release(page); 2199 if (err > 0) 2200 err = 0; 2201 out: 2202 return err; 2203 } 2204 2205 /* 2206 * For moronic filesystems that do not allow holes in file. 2207 * We may have to extend the file. 2208 */ 2209 2210 int cont_prepare_write(struct page *page, unsigned offset, 2211 unsigned to, get_block_t *get_block, loff_t *bytes) 2212 { 2213 struct address_space *mapping = page->mapping; 2214 struct inode *inode = mapping->host; 2215 struct page *new_page; 2216 pgoff_t pgpos; 2217 long status; 2218 unsigned zerofrom; 2219 unsigned blocksize = 1 << inode->i_blkbits; 2220 void *kaddr; 2221 2222 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { 2223 status = -ENOMEM; 2224 new_page = grab_cache_page(mapping, pgpos); 2225 if (!new_page) 2226 goto out; 2227 /* we might sleep */ 2228 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) { 2229 unlock_page(new_page); 2230 page_cache_release(new_page); 2231 continue; 2232 } 2233 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2234 if (zerofrom & (blocksize-1)) { 2235 *bytes |= (blocksize-1); 2236 (*bytes)++; 2237 } 2238 status = __block_prepare_write(inode, new_page, zerofrom, 2239 PAGE_CACHE_SIZE, get_block); 2240 if (status) 2241 goto out_unmap; 2242 kaddr = kmap_atomic(new_page, KM_USER0); 2243 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); 2244 flush_dcache_page(new_page); 2245 kunmap_atomic(kaddr, KM_USER0); 2246 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); 2247 unlock_page(new_page); 2248 page_cache_release(new_page); 2249 } 2250 2251 if (page->index < pgpos) { 2252 /* completely inside the area */ 2253 zerofrom = offset; 2254 } else { 2255 /* page covers the boundary, find the boundary offset */ 2256 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2257 2258 /* if we will expand the thing last block will be filled */ 2259 if (to > zerofrom && (zerofrom & (blocksize-1))) { 2260 *bytes |= (blocksize-1); 2261 (*bytes)++; 2262 } 2263 2264 /* starting below the boundary? Nothing to zero out */ 2265 if (offset <= zerofrom) 2266 zerofrom = offset; 2267 } 2268 status = __block_prepare_write(inode, page, zerofrom, to, get_block); 2269 if (status) 2270 goto out1; 2271 if (zerofrom < offset) { 2272 kaddr = kmap_atomic(page, KM_USER0); 2273 memset(kaddr+zerofrom, 0, offset-zerofrom); 2274 flush_dcache_page(page); 2275 kunmap_atomic(kaddr, KM_USER0); 2276 __block_commit_write(inode, page, zerofrom, offset); 2277 } 2278 return 0; 2279 out1: 2280 ClearPageUptodate(page); 2281 return status; 2282 2283 out_unmap: 2284 ClearPageUptodate(new_page); 2285 unlock_page(new_page); 2286 page_cache_release(new_page); 2287 out: 2288 return status; 2289 } 2290 2291 int block_prepare_write(struct page *page, unsigned from, unsigned to, 2292 get_block_t *get_block) 2293 { 2294 struct inode *inode = page->mapping->host; 2295 int err = __block_prepare_write(inode, page, from, to, get_block); 2296 if (err) 2297 ClearPageUptodate(page); 2298 return err; 2299 } 2300 2301 int block_commit_write(struct page *page, unsigned from, unsigned to) 2302 { 2303 struct inode *inode = page->mapping->host; 2304 __block_commit_write(inode,page,from,to); 2305 return 0; 2306 } 2307 2308 int generic_commit_write(struct file *file, struct page *page, 2309 unsigned from, unsigned to) 2310 { 2311 struct inode *inode = page->mapping->host; 2312 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2313 __block_commit_write(inode,page,from,to); 2314 /* 2315 * No need to use i_size_read() here, the i_size 2316 * cannot change under us because we hold i_sem. 2317 */ 2318 if (pos > inode->i_size) { 2319 i_size_write(inode, pos); 2320 mark_inode_dirty(inode); 2321 } 2322 return 0; 2323 } 2324 2325 2326 /* 2327 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed 2328 * immediately, while under the page lock. So it needs a special end_io 2329 * handler which does not touch the bh after unlocking it. 2330 * 2331 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 2332 * a race there is benign: unlock_buffer() only use the bh's address for 2333 * hashing after unlocking the buffer, so it doesn't actually touch the bh 2334 * itself. 2335 */ 2336 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 2337 { 2338 if (uptodate) { 2339 set_buffer_uptodate(bh); 2340 } else { 2341 /* This happens, due to failed READA attempts. */ 2342 clear_buffer_uptodate(bh); 2343 } 2344 unlock_buffer(bh); 2345 } 2346 2347 /* 2348 * On entry, the page is fully not uptodate. 2349 * On exit the page is fully uptodate in the areas outside (from,to) 2350 */ 2351 int nobh_prepare_write(struct page *page, unsigned from, unsigned to, 2352 get_block_t *get_block) 2353 { 2354 struct inode *inode = page->mapping->host; 2355 const unsigned blkbits = inode->i_blkbits; 2356 const unsigned blocksize = 1 << blkbits; 2357 struct buffer_head map_bh; 2358 struct buffer_head *read_bh[MAX_BUF_PER_PAGE]; 2359 unsigned block_in_page; 2360 unsigned block_start; 2361 sector_t block_in_file; 2362 char *kaddr; 2363 int nr_reads = 0; 2364 int i; 2365 int ret = 0; 2366 int is_mapped_to_disk = 1; 2367 int dirtied_it = 0; 2368 2369 if (PageMappedToDisk(page)) 2370 return 0; 2371 2372 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 2373 map_bh.b_page = page; 2374 2375 /* 2376 * We loop across all blocks in the page, whether or not they are 2377 * part of the affected region. This is so we can discover if the 2378 * page is fully mapped-to-disk. 2379 */ 2380 for (block_start = 0, block_in_page = 0; 2381 block_start < PAGE_CACHE_SIZE; 2382 block_in_page++, block_start += blocksize) { 2383 unsigned block_end = block_start + blocksize; 2384 int create; 2385 2386 map_bh.b_state = 0; 2387 create = 1; 2388 if (block_start >= to) 2389 create = 0; 2390 ret = get_block(inode, block_in_file + block_in_page, 2391 &map_bh, create); 2392 if (ret) 2393 goto failed; 2394 if (!buffer_mapped(&map_bh)) 2395 is_mapped_to_disk = 0; 2396 if (buffer_new(&map_bh)) 2397 unmap_underlying_metadata(map_bh.b_bdev, 2398 map_bh.b_blocknr); 2399 if (PageUptodate(page)) 2400 continue; 2401 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) { 2402 kaddr = kmap_atomic(page, KM_USER0); 2403 if (block_start < from) { 2404 memset(kaddr+block_start, 0, from-block_start); 2405 dirtied_it = 1; 2406 } 2407 if (block_end > to) { 2408 memset(kaddr + to, 0, block_end - to); 2409 dirtied_it = 1; 2410 } 2411 flush_dcache_page(page); 2412 kunmap_atomic(kaddr, KM_USER0); 2413 continue; 2414 } 2415 if (buffer_uptodate(&map_bh)) 2416 continue; /* reiserfs does this */ 2417 if (block_start < from || block_end > to) { 2418 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS); 2419 2420 if (!bh) { 2421 ret = -ENOMEM; 2422 goto failed; 2423 } 2424 bh->b_state = map_bh.b_state; 2425 atomic_set(&bh->b_count, 0); 2426 bh->b_this_page = NULL; 2427 bh->b_page = page; 2428 bh->b_blocknr = map_bh.b_blocknr; 2429 bh->b_size = blocksize; 2430 bh->b_data = (char *)(long)block_start; 2431 bh->b_bdev = map_bh.b_bdev; 2432 bh->b_private = NULL; 2433 read_bh[nr_reads++] = bh; 2434 } 2435 } 2436 2437 if (nr_reads) { 2438 struct buffer_head *bh; 2439 2440 /* 2441 * The page is locked, so these buffers are protected from 2442 * any VM or truncate activity. Hence we don't need to care 2443 * for the buffer_head refcounts. 2444 */ 2445 for (i = 0; i < nr_reads; i++) { 2446 bh = read_bh[i]; 2447 lock_buffer(bh); 2448 bh->b_end_io = end_buffer_read_nobh; 2449 submit_bh(READ, bh); 2450 } 2451 for (i = 0; i < nr_reads; i++) { 2452 bh = read_bh[i]; 2453 wait_on_buffer(bh); 2454 if (!buffer_uptodate(bh)) 2455 ret = -EIO; 2456 free_buffer_head(bh); 2457 read_bh[i] = NULL; 2458 } 2459 if (ret) 2460 goto failed; 2461 } 2462 2463 if (is_mapped_to_disk) 2464 SetPageMappedToDisk(page); 2465 SetPageUptodate(page); 2466 2467 /* 2468 * Setting the page dirty here isn't necessary for the prepare_write 2469 * function - commit_write will do that. But if/when this function is 2470 * used within the pagefault handler to ensure that all mmapped pages 2471 * have backing space in the filesystem, we will need to dirty the page 2472 * if its contents were altered. 2473 */ 2474 if (dirtied_it) 2475 set_page_dirty(page); 2476 2477 return 0; 2478 2479 failed: 2480 for (i = 0; i < nr_reads; i++) { 2481 if (read_bh[i]) 2482 free_buffer_head(read_bh[i]); 2483 } 2484 2485 /* 2486 * Error recovery is pretty slack. Clear the page and mark it dirty 2487 * so we'll later zero out any blocks which _were_ allocated. 2488 */ 2489 kaddr = kmap_atomic(page, KM_USER0); 2490 memset(kaddr, 0, PAGE_CACHE_SIZE); 2491 kunmap_atomic(kaddr, KM_USER0); 2492 SetPageUptodate(page); 2493 set_page_dirty(page); 2494 return ret; 2495 } 2496 EXPORT_SYMBOL(nobh_prepare_write); 2497 2498 int nobh_commit_write(struct file *file, struct page *page, 2499 unsigned from, unsigned to) 2500 { 2501 struct inode *inode = page->mapping->host; 2502 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 2503 2504 set_page_dirty(page); 2505 if (pos > inode->i_size) { 2506 i_size_write(inode, pos); 2507 mark_inode_dirty(inode); 2508 } 2509 return 0; 2510 } 2511 EXPORT_SYMBOL(nobh_commit_write); 2512 2513 /* 2514 * nobh_writepage() - based on block_full_write_page() except 2515 * that it tries to operate without attaching bufferheads to 2516 * the page. 2517 */ 2518 int nobh_writepage(struct page *page, get_block_t *get_block, 2519 struct writeback_control *wbc) 2520 { 2521 struct inode * const inode = page->mapping->host; 2522 loff_t i_size = i_size_read(inode); 2523 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2524 unsigned offset; 2525 void *kaddr; 2526 int ret; 2527 2528 /* Is the page fully inside i_size? */ 2529 if (page->index < end_index) 2530 goto out; 2531 2532 /* Is the page fully outside i_size? (truncate in progress) */ 2533 offset = i_size & (PAGE_CACHE_SIZE-1); 2534 if (page->index >= end_index+1 || !offset) { 2535 /* 2536 * The page may have dirty, unmapped buffers. For example, 2537 * they may have been added in ext3_writepage(). Make them 2538 * freeable here, so the page does not leak. 2539 */ 2540 #if 0 2541 /* Not really sure about this - do we need this ? */ 2542 if (page->mapping->a_ops->invalidatepage) 2543 page->mapping->a_ops->invalidatepage(page, offset); 2544 #endif 2545 unlock_page(page); 2546 return 0; /* don't care */ 2547 } 2548 2549 /* 2550 * The page straddles i_size. It must be zeroed out on each and every 2551 * writepage invocation because it may be mmapped. "A file is mapped 2552 * in multiples of the page size. For a file that is not a multiple of 2553 * the page size, the remaining memory is zeroed when mapped, and 2554 * writes to that region are not written out to the file." 2555 */ 2556 kaddr = kmap_atomic(page, KM_USER0); 2557 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 2558 flush_dcache_page(page); 2559 kunmap_atomic(kaddr, KM_USER0); 2560 out: 2561 ret = mpage_writepage(page, get_block, wbc); 2562 if (ret == -EAGAIN) 2563 ret = __block_write_full_page(inode, page, get_block, wbc); 2564 return ret; 2565 } 2566 EXPORT_SYMBOL(nobh_writepage); 2567 2568 /* 2569 * This function assumes that ->prepare_write() uses nobh_prepare_write(). 2570 */ 2571 int nobh_truncate_page(struct address_space *mapping, loff_t from) 2572 { 2573 struct inode *inode = mapping->host; 2574 unsigned blocksize = 1 << inode->i_blkbits; 2575 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2576 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2577 unsigned to; 2578 struct page *page; 2579 struct address_space_operations *a_ops = mapping->a_ops; 2580 char *kaddr; 2581 int ret = 0; 2582 2583 if ((offset & (blocksize - 1)) == 0) 2584 goto out; 2585 2586 ret = -ENOMEM; 2587 page = grab_cache_page(mapping, index); 2588 if (!page) 2589 goto out; 2590 2591 to = (offset + blocksize) & ~(blocksize - 1); 2592 ret = a_ops->prepare_write(NULL, page, offset, to); 2593 if (ret == 0) { 2594 kaddr = kmap_atomic(page, KM_USER0); 2595 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 2596 flush_dcache_page(page); 2597 kunmap_atomic(kaddr, KM_USER0); 2598 set_page_dirty(page); 2599 } 2600 unlock_page(page); 2601 page_cache_release(page); 2602 out: 2603 return ret; 2604 } 2605 EXPORT_SYMBOL(nobh_truncate_page); 2606 2607 int block_truncate_page(struct address_space *mapping, 2608 loff_t from, get_block_t *get_block) 2609 { 2610 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2611 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2612 unsigned blocksize; 2613 pgoff_t iblock; 2614 unsigned length, pos; 2615 struct inode *inode = mapping->host; 2616 struct page *page; 2617 struct buffer_head *bh; 2618 void *kaddr; 2619 int err; 2620 2621 blocksize = 1 << inode->i_blkbits; 2622 length = offset & (blocksize - 1); 2623 2624 /* Block boundary? Nothing to do */ 2625 if (!length) 2626 return 0; 2627 2628 length = blocksize - length; 2629 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2630 2631 page = grab_cache_page(mapping, index); 2632 err = -ENOMEM; 2633 if (!page) 2634 goto out; 2635 2636 if (!page_has_buffers(page)) 2637 create_empty_buffers(page, blocksize, 0); 2638 2639 /* Find the buffer that contains "offset" */ 2640 bh = page_buffers(page); 2641 pos = blocksize; 2642 while (offset >= pos) { 2643 bh = bh->b_this_page; 2644 iblock++; 2645 pos += blocksize; 2646 } 2647 2648 err = 0; 2649 if (!buffer_mapped(bh)) { 2650 err = get_block(inode, iblock, bh, 0); 2651 if (err) 2652 goto unlock; 2653 /* unmapped? It's a hole - nothing to do */ 2654 if (!buffer_mapped(bh)) 2655 goto unlock; 2656 } 2657 2658 /* Ok, it's mapped. Make sure it's up-to-date */ 2659 if (PageUptodate(page)) 2660 set_buffer_uptodate(bh); 2661 2662 if (!buffer_uptodate(bh) && !buffer_delay(bh)) { 2663 err = -EIO; 2664 ll_rw_block(READ, 1, &bh); 2665 wait_on_buffer(bh); 2666 /* Uhhuh. Read error. Complain and punt. */ 2667 if (!buffer_uptodate(bh)) 2668 goto unlock; 2669 } 2670 2671 kaddr = kmap_atomic(page, KM_USER0); 2672 memset(kaddr + offset, 0, length); 2673 flush_dcache_page(page); 2674 kunmap_atomic(kaddr, KM_USER0); 2675 2676 mark_buffer_dirty(bh); 2677 err = 0; 2678 2679 unlock: 2680 unlock_page(page); 2681 page_cache_release(page); 2682 out: 2683 return err; 2684 } 2685 2686 /* 2687 * The generic ->writepage function for buffer-backed address_spaces 2688 */ 2689 int block_write_full_page(struct page *page, get_block_t *get_block, 2690 struct writeback_control *wbc) 2691 { 2692 struct inode * const inode = page->mapping->host; 2693 loff_t i_size = i_size_read(inode); 2694 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 2695 unsigned offset; 2696 void *kaddr; 2697 2698 /* Is the page fully inside i_size? */ 2699 if (page->index < end_index) 2700 return __block_write_full_page(inode, page, get_block, wbc); 2701 2702 /* Is the page fully outside i_size? (truncate in progress) */ 2703 offset = i_size & (PAGE_CACHE_SIZE-1); 2704 if (page->index >= end_index+1 || !offset) { 2705 /* 2706 * The page may have dirty, unmapped buffers. For example, 2707 * they may have been added in ext3_writepage(). Make them 2708 * freeable here, so the page does not leak. 2709 */ 2710 do_invalidatepage(page, 0); 2711 unlock_page(page); 2712 return 0; /* don't care */ 2713 } 2714 2715 /* 2716 * The page straddles i_size. It must be zeroed out on each and every 2717 * writepage invokation because it may be mmapped. "A file is mapped 2718 * in multiples of the page size. For a file that is not a multiple of 2719 * the page size, the remaining memory is zeroed when mapped, and 2720 * writes to that region are not written out to the file." 2721 */ 2722 kaddr = kmap_atomic(page, KM_USER0); 2723 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 2724 flush_dcache_page(page); 2725 kunmap_atomic(kaddr, KM_USER0); 2726 return __block_write_full_page(inode, page, get_block, wbc); 2727 } 2728 2729 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2730 get_block_t *get_block) 2731 { 2732 struct buffer_head tmp; 2733 struct inode *inode = mapping->host; 2734 tmp.b_state = 0; 2735 tmp.b_blocknr = 0; 2736 get_block(inode, block, &tmp, 0); 2737 return tmp.b_blocknr; 2738 } 2739 2740 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err) 2741 { 2742 struct buffer_head *bh = bio->bi_private; 2743 2744 if (bio->bi_size) 2745 return 1; 2746 2747 if (err == -EOPNOTSUPP) { 2748 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 2749 set_bit(BH_Eopnotsupp, &bh->b_state); 2750 } 2751 2752 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 2753 bio_put(bio); 2754 return 0; 2755 } 2756 2757 int submit_bh(int rw, struct buffer_head * bh) 2758 { 2759 struct bio *bio; 2760 int ret = 0; 2761 2762 BUG_ON(!buffer_locked(bh)); 2763 BUG_ON(!buffer_mapped(bh)); 2764 BUG_ON(!bh->b_end_io); 2765 2766 if (buffer_ordered(bh) && (rw == WRITE)) 2767 rw = WRITE_BARRIER; 2768 2769 /* 2770 * Only clear out a write error when rewriting, should this 2771 * include WRITE_SYNC as well? 2772 */ 2773 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER)) 2774 clear_buffer_write_io_error(bh); 2775 2776 /* 2777 * from here on down, it's all bio -- do the initial mapping, 2778 * submit_bio -> generic_make_request may further map this bio around 2779 */ 2780 bio = bio_alloc(GFP_NOIO, 1); 2781 2782 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2783 bio->bi_bdev = bh->b_bdev; 2784 bio->bi_io_vec[0].bv_page = bh->b_page; 2785 bio->bi_io_vec[0].bv_len = bh->b_size; 2786 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 2787 2788 bio->bi_vcnt = 1; 2789 bio->bi_idx = 0; 2790 bio->bi_size = bh->b_size; 2791 2792 bio->bi_end_io = end_bio_bh_io_sync; 2793 bio->bi_private = bh; 2794 2795 bio_get(bio); 2796 submit_bio(rw, bio); 2797 2798 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 2799 ret = -EOPNOTSUPP; 2800 2801 bio_put(bio); 2802 return ret; 2803 } 2804 2805 /** 2806 * ll_rw_block: low-level access to block devices (DEPRECATED) 2807 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 2808 * @nr: number of &struct buffer_heads in the array 2809 * @bhs: array of pointers to &struct buffer_head 2810 * 2811 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2812 * requests an I/O operation on them, either a %READ or a %WRITE. The third 2813 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2814 * are sent to disk. The fourth %READA option is described in the documentation 2815 * for generic_make_request() which ll_rw_block() calls. 2816 * 2817 * This function drops any buffer that it cannot get a lock on (with the 2818 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2819 * clean when doing a write request, and any buffer that appears to be 2820 * up-to-date when doing read request. Further it marks as clean buffers that 2821 * are processed for writing (the buffer cache won't assume that they are 2822 * actually clean until the buffer gets unlocked). 2823 * 2824 * ll_rw_block sets b_end_io to simple completion handler that marks 2825 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2826 * any waiters. 2827 * 2828 * All of the buffers must be for the same device, and must also be a 2829 * multiple of the current approved size for the device. 2830 */ 2831 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 2832 { 2833 int i; 2834 2835 for (i = 0; i < nr; i++) { 2836 struct buffer_head *bh = bhs[i]; 2837 2838 if (rw == SWRITE) 2839 lock_buffer(bh); 2840 else if (test_set_buffer_locked(bh)) 2841 continue; 2842 2843 get_bh(bh); 2844 if (rw == WRITE || rw == SWRITE) { 2845 if (test_clear_buffer_dirty(bh)) { 2846 bh->b_end_io = end_buffer_write_sync; 2847 submit_bh(WRITE, bh); 2848 continue; 2849 } 2850 } else { 2851 if (!buffer_uptodate(bh)) { 2852 bh->b_end_io = end_buffer_read_sync; 2853 submit_bh(rw, bh); 2854 continue; 2855 } 2856 } 2857 unlock_buffer(bh); 2858 put_bh(bh); 2859 } 2860 } 2861 2862 /* 2863 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2864 * and then start new I/O and then wait upon it. The caller must have a ref on 2865 * the buffer_head. 2866 */ 2867 int sync_dirty_buffer(struct buffer_head *bh) 2868 { 2869 int ret = 0; 2870 2871 WARN_ON(atomic_read(&bh->b_count) < 1); 2872 lock_buffer(bh); 2873 if (test_clear_buffer_dirty(bh)) { 2874 get_bh(bh); 2875 bh->b_end_io = end_buffer_write_sync; 2876 ret = submit_bh(WRITE, bh); 2877 wait_on_buffer(bh); 2878 if (buffer_eopnotsupp(bh)) { 2879 clear_buffer_eopnotsupp(bh); 2880 ret = -EOPNOTSUPP; 2881 } 2882 if (!ret && !buffer_uptodate(bh)) 2883 ret = -EIO; 2884 } else { 2885 unlock_buffer(bh); 2886 } 2887 return ret; 2888 } 2889 2890 /* 2891 * try_to_free_buffers() checks if all the buffers on this particular page 2892 * are unused, and releases them if so. 2893 * 2894 * Exclusion against try_to_free_buffers may be obtained by either 2895 * locking the page or by holding its mapping's private_lock. 2896 * 2897 * If the page is dirty but all the buffers are clean then we need to 2898 * be sure to mark the page clean as well. This is because the page 2899 * may be against a block device, and a later reattachment of buffers 2900 * to a dirty page will set *all* buffers dirty. Which would corrupt 2901 * filesystem data on the same device. 2902 * 2903 * The same applies to regular filesystem pages: if all the buffers are 2904 * clean then we set the page clean and proceed. To do that, we require 2905 * total exclusion from __set_page_dirty_buffers(). That is obtained with 2906 * private_lock. 2907 * 2908 * try_to_free_buffers() is non-blocking. 2909 */ 2910 static inline int buffer_busy(struct buffer_head *bh) 2911 { 2912 return atomic_read(&bh->b_count) | 2913 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2914 } 2915 2916 static int 2917 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 2918 { 2919 struct buffer_head *head = page_buffers(page); 2920 struct buffer_head *bh; 2921 2922 bh = head; 2923 do { 2924 if (buffer_write_io_error(bh) && page->mapping) 2925 set_bit(AS_EIO, &page->mapping->flags); 2926 if (buffer_busy(bh)) 2927 goto failed; 2928 bh = bh->b_this_page; 2929 } while (bh != head); 2930 2931 do { 2932 struct buffer_head *next = bh->b_this_page; 2933 2934 if (!list_empty(&bh->b_assoc_buffers)) 2935 __remove_assoc_queue(bh); 2936 bh = next; 2937 } while (bh != head); 2938 *buffers_to_free = head; 2939 __clear_page_buffers(page); 2940 return 1; 2941 failed: 2942 return 0; 2943 } 2944 2945 int try_to_free_buffers(struct page *page) 2946 { 2947 struct address_space * const mapping = page->mapping; 2948 struct buffer_head *buffers_to_free = NULL; 2949 int ret = 0; 2950 2951 BUG_ON(!PageLocked(page)); 2952 if (PageWriteback(page)) 2953 return 0; 2954 2955 if (mapping == NULL) { /* can this still happen? */ 2956 ret = drop_buffers(page, &buffers_to_free); 2957 goto out; 2958 } 2959 2960 spin_lock(&mapping->private_lock); 2961 ret = drop_buffers(page, &buffers_to_free); 2962 if (ret) { 2963 /* 2964 * If the filesystem writes its buffers by hand (eg ext3) 2965 * then we can have clean buffers against a dirty page. We 2966 * clean the page here; otherwise later reattachment of buffers 2967 * could encounter a non-uptodate page, which is unresolvable. 2968 * This only applies in the rare case where try_to_free_buffers 2969 * succeeds but the page is not freed. 2970 */ 2971 clear_page_dirty(page); 2972 } 2973 spin_unlock(&mapping->private_lock); 2974 out: 2975 if (buffers_to_free) { 2976 struct buffer_head *bh = buffers_to_free; 2977 2978 do { 2979 struct buffer_head *next = bh->b_this_page; 2980 free_buffer_head(bh); 2981 bh = next; 2982 } while (bh != buffers_to_free); 2983 } 2984 return ret; 2985 } 2986 EXPORT_SYMBOL(try_to_free_buffers); 2987 2988 int block_sync_page(struct page *page) 2989 { 2990 struct address_space *mapping; 2991 2992 smp_mb(); 2993 mapping = page_mapping(page); 2994 if (mapping) 2995 blk_run_backing_dev(mapping->backing_dev_info, page); 2996 return 0; 2997 } 2998 2999 /* 3000 * There are no bdflush tunables left. But distributions are 3001 * still running obsolete flush daemons, so we terminate them here. 3002 * 3003 * Use of bdflush() is deprecated and will be removed in a future kernel. 3004 * The `pdflush' kernel threads fully replace bdflush daemons and this call. 3005 */ 3006 asmlinkage long sys_bdflush(int func, long data) 3007 { 3008 static int msg_count; 3009 3010 if (!capable(CAP_SYS_ADMIN)) 3011 return -EPERM; 3012 3013 if (msg_count < 5) { 3014 msg_count++; 3015 printk(KERN_INFO 3016 "warning: process `%s' used the obsolete bdflush" 3017 " system call\n", current->comm); 3018 printk(KERN_INFO "Fix your initscripts?\n"); 3019 } 3020 3021 if (func == 1) 3022 do_exit(0); 3023 return 0; 3024 } 3025 3026 /* 3027 * Buffer-head allocation 3028 */ 3029 static kmem_cache_t *bh_cachep; 3030 3031 /* 3032 * Once the number of bh's in the machine exceeds this level, we start 3033 * stripping them in writeback. 3034 */ 3035 static int max_buffer_heads; 3036 3037 int buffer_heads_over_limit; 3038 3039 struct bh_accounting { 3040 int nr; /* Number of live bh's */ 3041 int ratelimit; /* Limit cacheline bouncing */ 3042 }; 3043 3044 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3045 3046 static void recalc_bh_state(void) 3047 { 3048 int i; 3049 int tot = 0; 3050 3051 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) 3052 return; 3053 __get_cpu_var(bh_accounting).ratelimit = 0; 3054 for_each_cpu(i) 3055 tot += per_cpu(bh_accounting, i).nr; 3056 buffer_heads_over_limit = (tot > max_buffer_heads); 3057 } 3058 3059 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3060 { 3061 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3062 if (ret) { 3063 get_cpu_var(bh_accounting).nr++; 3064 recalc_bh_state(); 3065 put_cpu_var(bh_accounting); 3066 } 3067 return ret; 3068 } 3069 EXPORT_SYMBOL(alloc_buffer_head); 3070 3071 void free_buffer_head(struct buffer_head *bh) 3072 { 3073 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3074 kmem_cache_free(bh_cachep, bh); 3075 get_cpu_var(bh_accounting).nr--; 3076 recalc_bh_state(); 3077 put_cpu_var(bh_accounting); 3078 } 3079 EXPORT_SYMBOL(free_buffer_head); 3080 3081 static void 3082 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) 3083 { 3084 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 3085 SLAB_CTOR_CONSTRUCTOR) { 3086 struct buffer_head * bh = (struct buffer_head *)data; 3087 3088 memset(bh, 0, sizeof(*bh)); 3089 INIT_LIST_HEAD(&bh->b_assoc_buffers); 3090 } 3091 } 3092 3093 #ifdef CONFIG_HOTPLUG_CPU 3094 static void buffer_exit_cpu(int cpu) 3095 { 3096 int i; 3097 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3098 3099 for (i = 0; i < BH_LRU_SIZE; i++) { 3100 brelse(b->bhs[i]); 3101 b->bhs[i] = NULL; 3102 } 3103 } 3104 3105 static int buffer_cpu_notify(struct notifier_block *self, 3106 unsigned long action, void *hcpu) 3107 { 3108 if (action == CPU_DEAD) 3109 buffer_exit_cpu((unsigned long)hcpu); 3110 return NOTIFY_OK; 3111 } 3112 #endif /* CONFIG_HOTPLUG_CPU */ 3113 3114 void __init buffer_init(void) 3115 { 3116 int nrpages; 3117 3118 bh_cachep = kmem_cache_create("buffer_head", 3119 sizeof(struct buffer_head), 0, 3120 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL); 3121 3122 /* 3123 * Limit the bh occupancy to 10% of ZONE_NORMAL 3124 */ 3125 nrpages = (nr_free_buffer_pages() * 10) / 100; 3126 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3127 hotcpu_notifier(buffer_cpu_notify, 0); 3128 } 3129 3130 EXPORT_SYMBOL(__bforget); 3131 EXPORT_SYMBOL(__brelse); 3132 EXPORT_SYMBOL(__wait_on_buffer); 3133 EXPORT_SYMBOL(block_commit_write); 3134 EXPORT_SYMBOL(block_prepare_write); 3135 EXPORT_SYMBOL(block_read_full_page); 3136 EXPORT_SYMBOL(block_sync_page); 3137 EXPORT_SYMBOL(block_truncate_page); 3138 EXPORT_SYMBOL(block_write_full_page); 3139 EXPORT_SYMBOL(cont_prepare_write); 3140 EXPORT_SYMBOL(end_buffer_async_write); 3141 EXPORT_SYMBOL(end_buffer_read_sync); 3142 EXPORT_SYMBOL(end_buffer_write_sync); 3143 EXPORT_SYMBOL(file_fsync); 3144 EXPORT_SYMBOL(fsync_bdev); 3145 EXPORT_SYMBOL(generic_block_bmap); 3146 EXPORT_SYMBOL(generic_commit_write); 3147 EXPORT_SYMBOL(generic_cont_expand); 3148 EXPORT_SYMBOL(init_buffer); 3149 EXPORT_SYMBOL(invalidate_bdev); 3150 EXPORT_SYMBOL(ll_rw_block); 3151 EXPORT_SYMBOL(mark_buffer_dirty); 3152 EXPORT_SYMBOL(submit_bh); 3153 EXPORT_SYMBOL(sync_dirty_buffer); 3154 EXPORT_SYMBOL(unlock_buffer); 3155