1 /* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7 /* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 9 * 10 * Removed a lot of unnecessary code and simplified things now that 11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 12 * 13 * Speed up hash, lru, and free list operations. Use gfp() for allocating 14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 15 * 16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 17 * 18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/sched/signal.h> 23 #include <linux/syscalls.h> 24 #include <linux/fs.h> 25 #include <linux/iomap.h> 26 #include <linux/mm.h> 27 #include <linux/percpu.h> 28 #include <linux/slab.h> 29 #include <linux/capability.h> 30 #include <linux/blkdev.h> 31 #include <linux/file.h> 32 #include <linux/quotaops.h> 33 #include <linux/highmem.h> 34 #include <linux/export.h> 35 #include <linux/backing-dev.h> 36 #include <linux/writeback.h> 37 #include <linux/hash.h> 38 #include <linux/suspend.h> 39 #include <linux/buffer_head.h> 40 #include <linux/task_io_accounting_ops.h> 41 #include <linux/bio.h> 42 #include <linux/notifier.h> 43 #include <linux/cpu.h> 44 #include <linux/bitops.h> 45 #include <linux/mpage.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/pagevec.h> 48 #include <trace/events/block.h> 49 50 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 51 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 52 enum rw_hint hint, struct writeback_control *wbc); 53 54 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 55 56 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 57 { 58 bh->b_end_io = handler; 59 bh->b_private = private; 60 } 61 EXPORT_SYMBOL(init_buffer); 62 63 inline void touch_buffer(struct buffer_head *bh) 64 { 65 trace_block_touch_buffer(bh); 66 mark_page_accessed(bh->b_page); 67 } 68 EXPORT_SYMBOL(touch_buffer); 69 70 void __lock_buffer(struct buffer_head *bh) 71 { 72 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 73 } 74 EXPORT_SYMBOL(__lock_buffer); 75 76 void unlock_buffer(struct buffer_head *bh) 77 { 78 clear_bit_unlock(BH_Lock, &bh->b_state); 79 smp_mb__after_atomic(); 80 wake_up_bit(&bh->b_state, BH_Lock); 81 } 82 EXPORT_SYMBOL(unlock_buffer); 83 84 /* 85 * Returns if the page has dirty or writeback buffers. If all the buffers 86 * are unlocked and clean then the PageDirty information is stale. If 87 * any of the pages are locked, it is assumed they are locked for IO. 88 */ 89 void buffer_check_dirty_writeback(struct page *page, 90 bool *dirty, bool *writeback) 91 { 92 struct buffer_head *head, *bh; 93 *dirty = false; 94 *writeback = false; 95 96 BUG_ON(!PageLocked(page)); 97 98 if (!page_has_buffers(page)) 99 return; 100 101 if (PageWriteback(page)) 102 *writeback = true; 103 104 head = page_buffers(page); 105 bh = head; 106 do { 107 if (buffer_locked(bh)) 108 *writeback = true; 109 110 if (buffer_dirty(bh)) 111 *dirty = true; 112 113 bh = bh->b_this_page; 114 } while (bh != head); 115 } 116 EXPORT_SYMBOL(buffer_check_dirty_writeback); 117 118 /* 119 * Block until a buffer comes unlocked. This doesn't stop it 120 * from becoming locked again - you have to lock it yourself 121 * if you want to preserve its state. 122 */ 123 void __wait_on_buffer(struct buffer_head * bh) 124 { 125 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 126 } 127 EXPORT_SYMBOL(__wait_on_buffer); 128 129 static void 130 __clear_page_buffers(struct page *page) 131 { 132 ClearPagePrivate(page); 133 set_page_private(page, 0); 134 put_page(page); 135 } 136 137 static void buffer_io_error(struct buffer_head *bh, char *msg) 138 { 139 if (!test_bit(BH_Quiet, &bh->b_state)) 140 printk_ratelimited(KERN_ERR 141 "Buffer I/O error on dev %pg, logical block %llu%s\n", 142 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 143 } 144 145 /* 146 * End-of-IO handler helper function which does not touch the bh after 147 * unlocking it. 148 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 149 * a race there is benign: unlock_buffer() only use the bh's address for 150 * hashing after unlocking the buffer, so it doesn't actually touch the bh 151 * itself. 152 */ 153 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 154 { 155 if (uptodate) { 156 set_buffer_uptodate(bh); 157 } else { 158 /* This happens, due to failed read-ahead attempts. */ 159 clear_buffer_uptodate(bh); 160 } 161 unlock_buffer(bh); 162 } 163 164 /* 165 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 166 * unlock the buffer. This is what ll_rw_block uses too. 167 */ 168 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 169 { 170 __end_buffer_read_notouch(bh, uptodate); 171 put_bh(bh); 172 } 173 EXPORT_SYMBOL(end_buffer_read_sync); 174 175 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 176 { 177 if (uptodate) { 178 set_buffer_uptodate(bh); 179 } else { 180 buffer_io_error(bh, ", lost sync page write"); 181 mark_buffer_write_io_error(bh); 182 clear_buffer_uptodate(bh); 183 } 184 unlock_buffer(bh); 185 put_bh(bh); 186 } 187 EXPORT_SYMBOL(end_buffer_write_sync); 188 189 /* 190 * Various filesystems appear to want __find_get_block to be non-blocking. 191 * But it's the page lock which protects the buffers. To get around this, 192 * we get exclusion from try_to_free_buffers with the blockdev mapping's 193 * private_lock. 194 * 195 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 196 * may be quite high. This code could TryLock the page, and if that 197 * succeeds, there is no need to take private_lock. (But if 198 * private_lock is contended then so is mapping->tree_lock). 199 */ 200 static struct buffer_head * 201 __find_get_block_slow(struct block_device *bdev, sector_t block) 202 { 203 struct inode *bd_inode = bdev->bd_inode; 204 struct address_space *bd_mapping = bd_inode->i_mapping; 205 struct buffer_head *ret = NULL; 206 pgoff_t index; 207 struct buffer_head *bh; 208 struct buffer_head *head; 209 struct page *page; 210 int all_mapped = 1; 211 212 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 213 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 214 if (!page) 215 goto out; 216 217 spin_lock(&bd_mapping->private_lock); 218 if (!page_has_buffers(page)) 219 goto out_unlock; 220 head = page_buffers(page); 221 bh = head; 222 do { 223 if (!buffer_mapped(bh)) 224 all_mapped = 0; 225 else if (bh->b_blocknr == block) { 226 ret = bh; 227 get_bh(bh); 228 goto out_unlock; 229 } 230 bh = bh->b_this_page; 231 } while (bh != head); 232 233 /* we might be here because some of the buffers on this page are 234 * not mapped. This is due to various races between 235 * file io on the block device and getblk. It gets dealt with 236 * elsewhere, don't buffer_error if we had some unmapped buffers 237 */ 238 if (all_mapped) { 239 printk("__find_get_block_slow() failed. " 240 "block=%llu, b_blocknr=%llu\n", 241 (unsigned long long)block, 242 (unsigned long long)bh->b_blocknr); 243 printk("b_state=0x%08lx, b_size=%zu\n", 244 bh->b_state, bh->b_size); 245 printk("device %pg blocksize: %d\n", bdev, 246 1 << bd_inode->i_blkbits); 247 } 248 out_unlock: 249 spin_unlock(&bd_mapping->private_lock); 250 put_page(page); 251 out: 252 return ret; 253 } 254 255 /* 256 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 257 */ 258 static void free_more_memory(void) 259 { 260 struct zoneref *z; 261 int nid; 262 263 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); 264 yield(); 265 266 for_each_online_node(nid) { 267 268 z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 269 gfp_zone(GFP_NOFS), NULL); 270 if (z->zone) 271 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 272 GFP_NOFS, NULL); 273 } 274 } 275 276 /* 277 * I/O completion handler for block_read_full_page() - pages 278 * which come unlocked at the end of I/O. 279 */ 280 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 281 { 282 unsigned long flags; 283 struct buffer_head *first; 284 struct buffer_head *tmp; 285 struct page *page; 286 int page_uptodate = 1; 287 288 BUG_ON(!buffer_async_read(bh)); 289 290 page = bh->b_page; 291 if (uptodate) { 292 set_buffer_uptodate(bh); 293 } else { 294 clear_buffer_uptodate(bh); 295 buffer_io_error(bh, ", async page read"); 296 SetPageError(page); 297 } 298 299 /* 300 * Be _very_ careful from here on. Bad things can happen if 301 * two buffer heads end IO at almost the same time and both 302 * decide that the page is now completely done. 303 */ 304 first = page_buffers(page); 305 local_irq_save(flags); 306 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 307 clear_buffer_async_read(bh); 308 unlock_buffer(bh); 309 tmp = bh; 310 do { 311 if (!buffer_uptodate(tmp)) 312 page_uptodate = 0; 313 if (buffer_async_read(tmp)) { 314 BUG_ON(!buffer_locked(tmp)); 315 goto still_busy; 316 } 317 tmp = tmp->b_this_page; 318 } while (tmp != bh); 319 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 320 local_irq_restore(flags); 321 322 /* 323 * If none of the buffers had errors and they are all 324 * uptodate then we can set the page uptodate. 325 */ 326 if (page_uptodate && !PageError(page)) 327 SetPageUptodate(page); 328 unlock_page(page); 329 return; 330 331 still_busy: 332 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 333 local_irq_restore(flags); 334 return; 335 } 336 337 /* 338 * Completion handler for block_write_full_page() - pages which are unlocked 339 * during I/O, and which have PageWriteback cleared upon I/O completion. 340 */ 341 void end_buffer_async_write(struct buffer_head *bh, int uptodate) 342 { 343 unsigned long flags; 344 struct buffer_head *first; 345 struct buffer_head *tmp; 346 struct page *page; 347 348 BUG_ON(!buffer_async_write(bh)); 349 350 page = bh->b_page; 351 if (uptodate) { 352 set_buffer_uptodate(bh); 353 } else { 354 buffer_io_error(bh, ", lost async page write"); 355 mark_buffer_write_io_error(bh); 356 clear_buffer_uptodate(bh); 357 SetPageError(page); 358 } 359 360 first = page_buffers(page); 361 local_irq_save(flags); 362 bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 363 364 clear_buffer_async_write(bh); 365 unlock_buffer(bh); 366 tmp = bh->b_this_page; 367 while (tmp != bh) { 368 if (buffer_async_write(tmp)) { 369 BUG_ON(!buffer_locked(tmp)); 370 goto still_busy; 371 } 372 tmp = tmp->b_this_page; 373 } 374 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 375 local_irq_restore(flags); 376 end_page_writeback(page); 377 return; 378 379 still_busy: 380 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 381 local_irq_restore(flags); 382 return; 383 } 384 EXPORT_SYMBOL(end_buffer_async_write); 385 386 /* 387 * If a page's buffers are under async readin (end_buffer_async_read 388 * completion) then there is a possibility that another thread of 389 * control could lock one of the buffers after it has completed 390 * but while some of the other buffers have not completed. This 391 * locked buffer would confuse end_buffer_async_read() into not unlocking 392 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 393 * that this buffer is not under async I/O. 394 * 395 * The page comes unlocked when it has no locked buffer_async buffers 396 * left. 397 * 398 * PageLocked prevents anyone starting new async I/O reads any of 399 * the buffers. 400 * 401 * PageWriteback is used to prevent simultaneous writeout of the same 402 * page. 403 * 404 * PageLocked prevents anyone from starting writeback of a page which is 405 * under read I/O (PageWriteback is only ever set against a locked page). 406 */ 407 static void mark_buffer_async_read(struct buffer_head *bh) 408 { 409 bh->b_end_io = end_buffer_async_read; 410 set_buffer_async_read(bh); 411 } 412 413 static void mark_buffer_async_write_endio(struct buffer_head *bh, 414 bh_end_io_t *handler) 415 { 416 bh->b_end_io = handler; 417 set_buffer_async_write(bh); 418 } 419 420 void mark_buffer_async_write(struct buffer_head *bh) 421 { 422 mark_buffer_async_write_endio(bh, end_buffer_async_write); 423 } 424 EXPORT_SYMBOL(mark_buffer_async_write); 425 426 427 /* 428 * fs/buffer.c contains helper functions for buffer-backed address space's 429 * fsync functions. A common requirement for buffer-based filesystems is 430 * that certain data from the backing blockdev needs to be written out for 431 * a successful fsync(). For example, ext2 indirect blocks need to be 432 * written back and waited upon before fsync() returns. 433 * 434 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 435 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 436 * management of a list of dependent buffers at ->i_mapping->private_list. 437 * 438 * Locking is a little subtle: try_to_free_buffers() will remove buffers 439 * from their controlling inode's queue when they are being freed. But 440 * try_to_free_buffers() will be operating against the *blockdev* mapping 441 * at the time, not against the S_ISREG file which depends on those buffers. 442 * So the locking for private_list is via the private_lock in the address_space 443 * which backs the buffers. Which is different from the address_space 444 * against which the buffers are listed. So for a particular address_space, 445 * mapping->private_lock does *not* protect mapping->private_list! In fact, 446 * mapping->private_list will always be protected by the backing blockdev's 447 * ->private_lock. 448 * 449 * Which introduces a requirement: all buffers on an address_space's 450 * ->private_list must be from the same address_space: the blockdev's. 451 * 452 * address_spaces which do not place buffers at ->private_list via these 453 * utility functions are free to use private_lock and private_list for 454 * whatever they want. The only requirement is that list_empty(private_list) 455 * be true at clear_inode() time. 456 * 457 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 458 * filesystems should do that. invalidate_inode_buffers() should just go 459 * BUG_ON(!list_empty). 460 * 461 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 462 * take an address_space, not an inode. And it should be called 463 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 464 * queued up. 465 * 466 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 467 * list if it is already on a list. Because if the buffer is on a list, 468 * it *must* already be on the right one. If not, the filesystem is being 469 * silly. This will save a ton of locking. But first we have to ensure 470 * that buffers are taken *off* the old inode's list when they are freed 471 * (presumably in truncate). That requires careful auditing of all 472 * filesystems (do it inside bforget()). It could also be done by bringing 473 * b_inode back. 474 */ 475 476 /* 477 * The buffer's backing address_space's private_lock must be held 478 */ 479 static void __remove_assoc_queue(struct buffer_head *bh) 480 { 481 list_del_init(&bh->b_assoc_buffers); 482 WARN_ON(!bh->b_assoc_map); 483 bh->b_assoc_map = NULL; 484 } 485 486 int inode_has_buffers(struct inode *inode) 487 { 488 return !list_empty(&inode->i_data.private_list); 489 } 490 491 /* 492 * osync is designed to support O_SYNC io. It waits synchronously for 493 * all already-submitted IO to complete, but does not queue any new 494 * writes to the disk. 495 * 496 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 497 * you dirty the buffers, and then use osync_inode_buffers to wait for 498 * completion. Any other dirty buffers which are not yet queued for 499 * write will not be flushed to disk by the osync. 500 */ 501 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 502 { 503 struct buffer_head *bh; 504 struct list_head *p; 505 int err = 0; 506 507 spin_lock(lock); 508 repeat: 509 list_for_each_prev(p, list) { 510 bh = BH_ENTRY(p); 511 if (buffer_locked(bh)) { 512 get_bh(bh); 513 spin_unlock(lock); 514 wait_on_buffer(bh); 515 if (!buffer_uptodate(bh)) 516 err = -EIO; 517 brelse(bh); 518 spin_lock(lock); 519 goto repeat; 520 } 521 } 522 spin_unlock(lock); 523 return err; 524 } 525 526 static void do_thaw_one(struct super_block *sb, void *unused) 527 { 528 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) 529 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 530 } 531 532 static void do_thaw_all(struct work_struct *work) 533 { 534 iterate_supers(do_thaw_one, NULL); 535 kfree(work); 536 printk(KERN_WARNING "Emergency Thaw complete\n"); 537 } 538 539 /** 540 * emergency_thaw_all -- forcibly thaw every frozen filesystem 541 * 542 * Used for emergency unfreeze of all filesystems via SysRq 543 */ 544 void emergency_thaw_all(void) 545 { 546 struct work_struct *work; 547 548 work = kmalloc(sizeof(*work), GFP_ATOMIC); 549 if (work) { 550 INIT_WORK(work, do_thaw_all); 551 schedule_work(work); 552 } 553 } 554 555 /** 556 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 557 * @mapping: the mapping which wants those buffers written 558 * 559 * Starts I/O against the buffers at mapping->private_list, and waits upon 560 * that I/O. 561 * 562 * Basically, this is a convenience function for fsync(). 563 * @mapping is a file or directory which needs those buffers to be written for 564 * a successful fsync(). 565 */ 566 int sync_mapping_buffers(struct address_space *mapping) 567 { 568 struct address_space *buffer_mapping = mapping->private_data; 569 570 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 571 return 0; 572 573 return fsync_buffers_list(&buffer_mapping->private_lock, 574 &mapping->private_list); 575 } 576 EXPORT_SYMBOL(sync_mapping_buffers); 577 578 /* 579 * Called when we've recently written block `bblock', and it is known that 580 * `bblock' was for a buffer_boundary() buffer. This means that the block at 581 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 582 * dirty, schedule it for IO. So that indirects merge nicely with their data. 583 */ 584 void write_boundary_block(struct block_device *bdev, 585 sector_t bblock, unsigned blocksize) 586 { 587 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 588 if (bh) { 589 if (buffer_dirty(bh)) 590 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 591 put_bh(bh); 592 } 593 } 594 595 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 596 { 597 struct address_space *mapping = inode->i_mapping; 598 struct address_space *buffer_mapping = bh->b_page->mapping; 599 600 mark_buffer_dirty(bh); 601 if (!mapping->private_data) { 602 mapping->private_data = buffer_mapping; 603 } else { 604 BUG_ON(mapping->private_data != buffer_mapping); 605 } 606 if (!bh->b_assoc_map) { 607 spin_lock(&buffer_mapping->private_lock); 608 list_move_tail(&bh->b_assoc_buffers, 609 &mapping->private_list); 610 bh->b_assoc_map = mapping; 611 spin_unlock(&buffer_mapping->private_lock); 612 } 613 } 614 EXPORT_SYMBOL(mark_buffer_dirty_inode); 615 616 /* 617 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode 618 * dirty. 619 * 620 * If warn is true, then emit a warning if the page is not uptodate and has 621 * not been truncated. 622 * 623 * The caller must hold lock_page_memcg(). 624 */ 625 static void __set_page_dirty(struct page *page, struct address_space *mapping, 626 int warn) 627 { 628 unsigned long flags; 629 630 spin_lock_irqsave(&mapping->tree_lock, flags); 631 if (page->mapping) { /* Race with truncate? */ 632 WARN_ON_ONCE(warn && !PageUptodate(page)); 633 account_page_dirtied(page, mapping); 634 radix_tree_tag_set(&mapping->page_tree, 635 page_index(page), PAGECACHE_TAG_DIRTY); 636 } 637 spin_unlock_irqrestore(&mapping->tree_lock, flags); 638 } 639 640 /* 641 * Add a page to the dirty page list. 642 * 643 * It is a sad fact of life that this function is called from several places 644 * deeply under spinlocking. It may not sleep. 645 * 646 * If the page has buffers, the uptodate buffers are set dirty, to preserve 647 * dirty-state coherency between the page and the buffers. It the page does 648 * not have buffers then when they are later attached they will all be set 649 * dirty. 650 * 651 * The buffers are dirtied before the page is dirtied. There's a small race 652 * window in which a writepage caller may see the page cleanness but not the 653 * buffer dirtiness. That's fine. If this code were to set the page dirty 654 * before the buffers, a concurrent writepage caller could clear the page dirty 655 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 656 * page on the dirty page list. 657 * 658 * We use private_lock to lock against try_to_free_buffers while using the 659 * page's buffer list. Also use this to protect against clean buffers being 660 * added to the page after it was set dirty. 661 * 662 * FIXME: may need to call ->reservepage here as well. That's rather up to the 663 * address_space though. 664 */ 665 int __set_page_dirty_buffers(struct page *page) 666 { 667 int newly_dirty; 668 struct address_space *mapping = page_mapping(page); 669 670 if (unlikely(!mapping)) 671 return !TestSetPageDirty(page); 672 673 spin_lock(&mapping->private_lock); 674 if (page_has_buffers(page)) { 675 struct buffer_head *head = page_buffers(page); 676 struct buffer_head *bh = head; 677 678 do { 679 set_buffer_dirty(bh); 680 bh = bh->b_this_page; 681 } while (bh != head); 682 } 683 /* 684 * Lock out page->mem_cgroup migration to keep PageDirty 685 * synchronized with per-memcg dirty page counters. 686 */ 687 lock_page_memcg(page); 688 newly_dirty = !TestSetPageDirty(page); 689 spin_unlock(&mapping->private_lock); 690 691 if (newly_dirty) 692 __set_page_dirty(page, mapping, 1); 693 694 unlock_page_memcg(page); 695 696 if (newly_dirty) 697 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 698 699 return newly_dirty; 700 } 701 EXPORT_SYMBOL(__set_page_dirty_buffers); 702 703 /* 704 * Write out and wait upon a list of buffers. 705 * 706 * We have conflicting pressures: we want to make sure that all 707 * initially dirty buffers get waited on, but that any subsequently 708 * dirtied buffers don't. After all, we don't want fsync to last 709 * forever if somebody is actively writing to the file. 710 * 711 * Do this in two main stages: first we copy dirty buffers to a 712 * temporary inode list, queueing the writes as we go. Then we clean 713 * up, waiting for those writes to complete. 714 * 715 * During this second stage, any subsequent updates to the file may end 716 * up refiling the buffer on the original inode's dirty list again, so 717 * there is a chance we will end up with a buffer queued for write but 718 * not yet completed on that list. So, as a final cleanup we go through 719 * the osync code to catch these locked, dirty buffers without requeuing 720 * any newly dirty buffers for write. 721 */ 722 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 723 { 724 struct buffer_head *bh; 725 struct list_head tmp; 726 struct address_space *mapping; 727 int err = 0, err2; 728 struct blk_plug plug; 729 730 INIT_LIST_HEAD(&tmp); 731 blk_start_plug(&plug); 732 733 spin_lock(lock); 734 while (!list_empty(list)) { 735 bh = BH_ENTRY(list->next); 736 mapping = bh->b_assoc_map; 737 __remove_assoc_queue(bh); 738 /* Avoid race with mark_buffer_dirty_inode() which does 739 * a lockless check and we rely on seeing the dirty bit */ 740 smp_mb(); 741 if (buffer_dirty(bh) || buffer_locked(bh)) { 742 list_add(&bh->b_assoc_buffers, &tmp); 743 bh->b_assoc_map = mapping; 744 if (buffer_dirty(bh)) { 745 get_bh(bh); 746 spin_unlock(lock); 747 /* 748 * Ensure any pending I/O completes so that 749 * write_dirty_buffer() actually writes the 750 * current contents - it is a noop if I/O is 751 * still in flight on potentially older 752 * contents. 753 */ 754 write_dirty_buffer(bh, REQ_SYNC); 755 756 /* 757 * Kick off IO for the previous mapping. Note 758 * that we will not run the very last mapping, 759 * wait_on_buffer() will do that for us 760 * through sync_buffer(). 761 */ 762 brelse(bh); 763 spin_lock(lock); 764 } 765 } 766 } 767 768 spin_unlock(lock); 769 blk_finish_plug(&plug); 770 spin_lock(lock); 771 772 while (!list_empty(&tmp)) { 773 bh = BH_ENTRY(tmp.prev); 774 get_bh(bh); 775 mapping = bh->b_assoc_map; 776 __remove_assoc_queue(bh); 777 /* Avoid race with mark_buffer_dirty_inode() which does 778 * a lockless check and we rely on seeing the dirty bit */ 779 smp_mb(); 780 if (buffer_dirty(bh)) { 781 list_add(&bh->b_assoc_buffers, 782 &mapping->private_list); 783 bh->b_assoc_map = mapping; 784 } 785 spin_unlock(lock); 786 wait_on_buffer(bh); 787 if (!buffer_uptodate(bh)) 788 err = -EIO; 789 brelse(bh); 790 spin_lock(lock); 791 } 792 793 spin_unlock(lock); 794 err2 = osync_buffers_list(lock, list); 795 if (err) 796 return err; 797 else 798 return err2; 799 } 800 801 /* 802 * Invalidate any and all dirty buffers on a given inode. We are 803 * probably unmounting the fs, but that doesn't mean we have already 804 * done a sync(). Just drop the buffers from the inode list. 805 * 806 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 807 * assumes that all the buffers are against the blockdev. Not true 808 * for reiserfs. 809 */ 810 void invalidate_inode_buffers(struct inode *inode) 811 { 812 if (inode_has_buffers(inode)) { 813 struct address_space *mapping = &inode->i_data; 814 struct list_head *list = &mapping->private_list; 815 struct address_space *buffer_mapping = mapping->private_data; 816 817 spin_lock(&buffer_mapping->private_lock); 818 while (!list_empty(list)) 819 __remove_assoc_queue(BH_ENTRY(list->next)); 820 spin_unlock(&buffer_mapping->private_lock); 821 } 822 } 823 EXPORT_SYMBOL(invalidate_inode_buffers); 824 825 /* 826 * Remove any clean buffers from the inode's buffer list. This is called 827 * when we're trying to free the inode itself. Those buffers can pin it. 828 * 829 * Returns true if all buffers were removed. 830 */ 831 int remove_inode_buffers(struct inode *inode) 832 { 833 int ret = 1; 834 835 if (inode_has_buffers(inode)) { 836 struct address_space *mapping = &inode->i_data; 837 struct list_head *list = &mapping->private_list; 838 struct address_space *buffer_mapping = mapping->private_data; 839 840 spin_lock(&buffer_mapping->private_lock); 841 while (!list_empty(list)) { 842 struct buffer_head *bh = BH_ENTRY(list->next); 843 if (buffer_dirty(bh)) { 844 ret = 0; 845 break; 846 } 847 __remove_assoc_queue(bh); 848 } 849 spin_unlock(&buffer_mapping->private_lock); 850 } 851 return ret; 852 } 853 854 /* 855 * Create the appropriate buffers when given a page for data area and 856 * the size of each buffer.. Use the bh->b_this_page linked list to 857 * follow the buffers created. Return NULL if unable to create more 858 * buffers. 859 * 860 * The retry flag is used to differentiate async IO (paging, swapping) 861 * which may not fail from ordinary buffer allocations. 862 */ 863 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 864 int retry) 865 { 866 struct buffer_head *bh, *head; 867 long offset; 868 869 try_again: 870 head = NULL; 871 offset = PAGE_SIZE; 872 while ((offset -= size) >= 0) { 873 bh = alloc_buffer_head(GFP_NOFS); 874 if (!bh) 875 goto no_grow; 876 877 bh->b_this_page = head; 878 bh->b_blocknr = -1; 879 head = bh; 880 881 bh->b_size = size; 882 883 /* Link the buffer to its page */ 884 set_bh_page(bh, page, offset); 885 } 886 return head; 887 /* 888 * In case anything failed, we just free everything we got. 889 */ 890 no_grow: 891 if (head) { 892 do { 893 bh = head; 894 head = head->b_this_page; 895 free_buffer_head(bh); 896 } while (head); 897 } 898 899 /* 900 * Return failure for non-async IO requests. Async IO requests 901 * are not allowed to fail, so we have to wait until buffer heads 902 * become available. But we don't want tasks sleeping with 903 * partially complete buffers, so all were released above. 904 */ 905 if (!retry) 906 return NULL; 907 908 /* We're _really_ low on memory. Now we just 909 * wait for old buffer heads to become free due to 910 * finishing IO. Since this is an async request and 911 * the reserve list is empty, we're sure there are 912 * async buffer heads in use. 913 */ 914 free_more_memory(); 915 goto try_again; 916 } 917 EXPORT_SYMBOL_GPL(alloc_page_buffers); 918 919 static inline void 920 link_dev_buffers(struct page *page, struct buffer_head *head) 921 { 922 struct buffer_head *bh, *tail; 923 924 bh = head; 925 do { 926 tail = bh; 927 bh = bh->b_this_page; 928 } while (bh); 929 tail->b_this_page = head; 930 attach_page_buffers(page, head); 931 } 932 933 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 934 { 935 sector_t retval = ~((sector_t)0); 936 loff_t sz = i_size_read(bdev->bd_inode); 937 938 if (sz) { 939 unsigned int sizebits = blksize_bits(size); 940 retval = (sz >> sizebits); 941 } 942 return retval; 943 } 944 945 /* 946 * Initialise the state of a blockdev page's buffers. 947 */ 948 static sector_t 949 init_page_buffers(struct page *page, struct block_device *bdev, 950 sector_t block, int size) 951 { 952 struct buffer_head *head = page_buffers(page); 953 struct buffer_head *bh = head; 954 int uptodate = PageUptodate(page); 955 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); 956 957 do { 958 if (!buffer_mapped(bh)) { 959 init_buffer(bh, NULL, NULL); 960 bh->b_bdev = bdev; 961 bh->b_blocknr = block; 962 if (uptodate) 963 set_buffer_uptodate(bh); 964 if (block < end_block) 965 set_buffer_mapped(bh); 966 } 967 block++; 968 bh = bh->b_this_page; 969 } while (bh != head); 970 971 /* 972 * Caller needs to validate requested block against end of device. 973 */ 974 return end_block; 975 } 976 977 /* 978 * Create the page-cache page that contains the requested block. 979 * 980 * This is used purely for blockdev mappings. 981 */ 982 static int 983 grow_dev_page(struct block_device *bdev, sector_t block, 984 pgoff_t index, int size, int sizebits, gfp_t gfp) 985 { 986 struct inode *inode = bdev->bd_inode; 987 struct page *page; 988 struct buffer_head *bh; 989 sector_t end_block; 990 int ret = 0; /* Will call free_more_memory() */ 991 gfp_t gfp_mask; 992 993 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; 994 995 /* 996 * XXX: __getblk_slow() can not really deal with failure and 997 * will endlessly loop on improvised global reclaim. Prefer 998 * looping in the allocator rather than here, at least that 999 * code knows what it's doing. 1000 */ 1001 gfp_mask |= __GFP_NOFAIL; 1002 1003 page = find_or_create_page(inode->i_mapping, index, gfp_mask); 1004 if (!page) 1005 return ret; 1006 1007 BUG_ON(!PageLocked(page)); 1008 1009 if (page_has_buffers(page)) { 1010 bh = page_buffers(page); 1011 if (bh->b_size == size) { 1012 end_block = init_page_buffers(page, bdev, 1013 (sector_t)index << sizebits, 1014 size); 1015 goto done; 1016 } 1017 if (!try_to_free_buffers(page)) 1018 goto failed; 1019 } 1020 1021 /* 1022 * Allocate some buffers for this page 1023 */ 1024 bh = alloc_page_buffers(page, size, 0); 1025 if (!bh) 1026 goto failed; 1027 1028 /* 1029 * Link the page to the buffers and initialise them. Take the 1030 * lock to be atomic wrt __find_get_block(), which does not 1031 * run under the page lock. 1032 */ 1033 spin_lock(&inode->i_mapping->private_lock); 1034 link_dev_buffers(page, bh); 1035 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, 1036 size); 1037 spin_unlock(&inode->i_mapping->private_lock); 1038 done: 1039 ret = (block < end_block) ? 1 : -ENXIO; 1040 failed: 1041 unlock_page(page); 1042 put_page(page); 1043 return ret; 1044 } 1045 1046 /* 1047 * Create buffers for the specified block device block's page. If 1048 * that page was dirty, the buffers are set dirty also. 1049 */ 1050 static int 1051 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 1052 { 1053 pgoff_t index; 1054 int sizebits; 1055 1056 sizebits = -1; 1057 do { 1058 sizebits++; 1059 } while ((size << sizebits) < PAGE_SIZE); 1060 1061 index = block >> sizebits; 1062 1063 /* 1064 * Check for a block which wants to lie outside our maximum possible 1065 * pagecache index. (this comparison is done using sector_t types). 1066 */ 1067 if (unlikely(index != block >> sizebits)) { 1068 printk(KERN_ERR "%s: requested out-of-range block %llu for " 1069 "device %pg\n", 1070 __func__, (unsigned long long)block, 1071 bdev); 1072 return -EIO; 1073 } 1074 1075 /* Create a page with the proper size buffers.. */ 1076 return grow_dev_page(bdev, block, index, size, sizebits, gfp); 1077 } 1078 1079 static struct buffer_head * 1080 __getblk_slow(struct block_device *bdev, sector_t block, 1081 unsigned size, gfp_t gfp) 1082 { 1083 /* Size must be multiple of hard sectorsize */ 1084 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1085 (size < 512 || size > PAGE_SIZE))) { 1086 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1087 size); 1088 printk(KERN_ERR "logical block size: %d\n", 1089 bdev_logical_block_size(bdev)); 1090 1091 dump_stack(); 1092 return NULL; 1093 } 1094 1095 for (;;) { 1096 struct buffer_head *bh; 1097 int ret; 1098 1099 bh = __find_get_block(bdev, block, size); 1100 if (bh) 1101 return bh; 1102 1103 ret = grow_buffers(bdev, block, size, gfp); 1104 if (ret < 0) 1105 return NULL; 1106 if (ret == 0) 1107 free_more_memory(); 1108 } 1109 } 1110 1111 /* 1112 * The relationship between dirty buffers and dirty pages: 1113 * 1114 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1115 * the page is tagged dirty in its radix tree. 1116 * 1117 * At all times, the dirtiness of the buffers represents the dirtiness of 1118 * subsections of the page. If the page has buffers, the page dirty bit is 1119 * merely a hint about the true dirty state. 1120 * 1121 * When a page is set dirty in its entirety, all its buffers are marked dirty 1122 * (if the page has buffers). 1123 * 1124 * When a buffer is marked dirty, its page is dirtied, but the page's other 1125 * buffers are not. 1126 * 1127 * Also. When blockdev buffers are explicitly read with bread(), they 1128 * individually become uptodate. But their backing page remains not 1129 * uptodate - even if all of its buffers are uptodate. A subsequent 1130 * block_read_full_page() against that page will discover all the uptodate 1131 * buffers, will set the page uptodate and will perform no I/O. 1132 */ 1133 1134 /** 1135 * mark_buffer_dirty - mark a buffer_head as needing writeout 1136 * @bh: the buffer_head to mark dirty 1137 * 1138 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 1139 * backing page dirty, then tag the page as dirty in its address_space's radix 1140 * tree and then attach the address_space's inode to its superblock's dirty 1141 * inode list. 1142 * 1143 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1144 * mapping->tree_lock and mapping->host->i_lock. 1145 */ 1146 void mark_buffer_dirty(struct buffer_head *bh) 1147 { 1148 WARN_ON_ONCE(!buffer_uptodate(bh)); 1149 1150 trace_block_dirty_buffer(bh); 1151 1152 /* 1153 * Very *carefully* optimize the it-is-already-dirty case. 1154 * 1155 * Don't let the final "is it dirty" escape to before we 1156 * perhaps modified the buffer. 1157 */ 1158 if (buffer_dirty(bh)) { 1159 smp_mb(); 1160 if (buffer_dirty(bh)) 1161 return; 1162 } 1163 1164 if (!test_set_buffer_dirty(bh)) { 1165 struct page *page = bh->b_page; 1166 struct address_space *mapping = NULL; 1167 1168 lock_page_memcg(page); 1169 if (!TestSetPageDirty(page)) { 1170 mapping = page_mapping(page); 1171 if (mapping) 1172 __set_page_dirty(page, mapping, 0); 1173 } 1174 unlock_page_memcg(page); 1175 if (mapping) 1176 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1177 } 1178 } 1179 EXPORT_SYMBOL(mark_buffer_dirty); 1180 1181 void mark_buffer_write_io_error(struct buffer_head *bh) 1182 { 1183 set_buffer_write_io_error(bh); 1184 /* FIXME: do we need to set this in both places? */ 1185 if (bh->b_page && bh->b_page->mapping) 1186 mapping_set_error(bh->b_page->mapping, -EIO); 1187 if (bh->b_assoc_map) 1188 mapping_set_error(bh->b_assoc_map, -EIO); 1189 } 1190 EXPORT_SYMBOL(mark_buffer_write_io_error); 1191 1192 /* 1193 * Decrement a buffer_head's reference count. If all buffers against a page 1194 * have zero reference count, are clean and unlocked, and if the page is clean 1195 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1196 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1197 * a page but it ends up not being freed, and buffers may later be reattached). 1198 */ 1199 void __brelse(struct buffer_head * buf) 1200 { 1201 if (atomic_read(&buf->b_count)) { 1202 put_bh(buf); 1203 return; 1204 } 1205 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1206 } 1207 EXPORT_SYMBOL(__brelse); 1208 1209 /* 1210 * bforget() is like brelse(), except it discards any 1211 * potentially dirty data. 1212 */ 1213 void __bforget(struct buffer_head *bh) 1214 { 1215 clear_buffer_dirty(bh); 1216 if (bh->b_assoc_map) { 1217 struct address_space *buffer_mapping = bh->b_page->mapping; 1218 1219 spin_lock(&buffer_mapping->private_lock); 1220 list_del_init(&bh->b_assoc_buffers); 1221 bh->b_assoc_map = NULL; 1222 spin_unlock(&buffer_mapping->private_lock); 1223 } 1224 __brelse(bh); 1225 } 1226 EXPORT_SYMBOL(__bforget); 1227 1228 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1229 { 1230 lock_buffer(bh); 1231 if (buffer_uptodate(bh)) { 1232 unlock_buffer(bh); 1233 return bh; 1234 } else { 1235 get_bh(bh); 1236 bh->b_end_io = end_buffer_read_sync; 1237 submit_bh(REQ_OP_READ, 0, bh); 1238 wait_on_buffer(bh); 1239 if (buffer_uptodate(bh)) 1240 return bh; 1241 } 1242 brelse(bh); 1243 return NULL; 1244 } 1245 1246 /* 1247 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1248 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1249 * refcount elevated by one when they're in an LRU. A buffer can only appear 1250 * once in a particular CPU's LRU. A single buffer can be present in multiple 1251 * CPU's LRUs at the same time. 1252 * 1253 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1254 * sb_find_get_block(). 1255 * 1256 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1257 * a local interrupt disable for that. 1258 */ 1259 1260 #define BH_LRU_SIZE 16 1261 1262 struct bh_lru { 1263 struct buffer_head *bhs[BH_LRU_SIZE]; 1264 }; 1265 1266 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1267 1268 #ifdef CONFIG_SMP 1269 #define bh_lru_lock() local_irq_disable() 1270 #define bh_lru_unlock() local_irq_enable() 1271 #else 1272 #define bh_lru_lock() preempt_disable() 1273 #define bh_lru_unlock() preempt_enable() 1274 #endif 1275 1276 static inline void check_irqs_on(void) 1277 { 1278 #ifdef irqs_disabled 1279 BUG_ON(irqs_disabled()); 1280 #endif 1281 } 1282 1283 /* 1284 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1285 * inserted at the front, and the buffer_head at the back if any is evicted. 1286 * Or, if already in the LRU it is moved to the front. 1287 */ 1288 static void bh_lru_install(struct buffer_head *bh) 1289 { 1290 struct buffer_head *evictee = bh; 1291 struct bh_lru *b; 1292 int i; 1293 1294 check_irqs_on(); 1295 bh_lru_lock(); 1296 1297 b = this_cpu_ptr(&bh_lrus); 1298 for (i = 0; i < BH_LRU_SIZE; i++) { 1299 swap(evictee, b->bhs[i]); 1300 if (evictee == bh) { 1301 bh_lru_unlock(); 1302 return; 1303 } 1304 } 1305 1306 get_bh(bh); 1307 bh_lru_unlock(); 1308 brelse(evictee); 1309 } 1310 1311 /* 1312 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1313 */ 1314 static struct buffer_head * 1315 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1316 { 1317 struct buffer_head *ret = NULL; 1318 unsigned int i; 1319 1320 check_irqs_on(); 1321 bh_lru_lock(); 1322 for (i = 0; i < BH_LRU_SIZE; i++) { 1323 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1324 1325 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1326 bh->b_size == size) { 1327 if (i) { 1328 while (i) { 1329 __this_cpu_write(bh_lrus.bhs[i], 1330 __this_cpu_read(bh_lrus.bhs[i - 1])); 1331 i--; 1332 } 1333 __this_cpu_write(bh_lrus.bhs[0], bh); 1334 } 1335 get_bh(bh); 1336 ret = bh; 1337 break; 1338 } 1339 } 1340 bh_lru_unlock(); 1341 return ret; 1342 } 1343 1344 /* 1345 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1346 * it in the LRU and mark it as accessed. If it is not present then return 1347 * NULL 1348 */ 1349 struct buffer_head * 1350 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1351 { 1352 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1353 1354 if (bh == NULL) { 1355 /* __find_get_block_slow will mark the page accessed */ 1356 bh = __find_get_block_slow(bdev, block); 1357 if (bh) 1358 bh_lru_install(bh); 1359 } else 1360 touch_buffer(bh); 1361 1362 return bh; 1363 } 1364 EXPORT_SYMBOL(__find_get_block); 1365 1366 /* 1367 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head 1368 * which corresponds to the passed block_device, block and size. The 1369 * returned buffer has its reference count incremented. 1370 * 1371 * __getblk_gfp() will lock up the machine if grow_dev_page's 1372 * try_to_free_buffers() attempt is failing. FIXME, perhaps? 1373 */ 1374 struct buffer_head * 1375 __getblk_gfp(struct block_device *bdev, sector_t block, 1376 unsigned size, gfp_t gfp) 1377 { 1378 struct buffer_head *bh = __find_get_block(bdev, block, size); 1379 1380 might_sleep(); 1381 if (bh == NULL) 1382 bh = __getblk_slow(bdev, block, size, gfp); 1383 return bh; 1384 } 1385 EXPORT_SYMBOL(__getblk_gfp); 1386 1387 /* 1388 * Do async read-ahead on a buffer.. 1389 */ 1390 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1391 { 1392 struct buffer_head *bh = __getblk(bdev, block, size); 1393 if (likely(bh)) { 1394 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 1395 brelse(bh); 1396 } 1397 } 1398 EXPORT_SYMBOL(__breadahead); 1399 1400 /** 1401 * __bread_gfp() - reads a specified block and returns the bh 1402 * @bdev: the block_device to read from 1403 * @block: number of block 1404 * @size: size (in bytes) to read 1405 * @gfp: page allocation flag 1406 * 1407 * Reads a specified block, and returns buffer head that contains it. 1408 * The page cache can be allocated from non-movable area 1409 * not to prevent page migration if you set gfp to zero. 1410 * It returns NULL if the block was unreadable. 1411 */ 1412 struct buffer_head * 1413 __bread_gfp(struct block_device *bdev, sector_t block, 1414 unsigned size, gfp_t gfp) 1415 { 1416 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 1417 1418 if (likely(bh) && !buffer_uptodate(bh)) 1419 bh = __bread_slow(bh); 1420 return bh; 1421 } 1422 EXPORT_SYMBOL(__bread_gfp); 1423 1424 /* 1425 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1426 * This doesn't race because it runs in each cpu either in irq 1427 * or with preempt disabled. 1428 */ 1429 static void invalidate_bh_lru(void *arg) 1430 { 1431 struct bh_lru *b = &get_cpu_var(bh_lrus); 1432 int i; 1433 1434 for (i = 0; i < BH_LRU_SIZE; i++) { 1435 brelse(b->bhs[i]); 1436 b->bhs[i] = NULL; 1437 } 1438 put_cpu_var(bh_lrus); 1439 } 1440 1441 static bool has_bh_in_lru(int cpu, void *dummy) 1442 { 1443 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1444 int i; 1445 1446 for (i = 0; i < BH_LRU_SIZE; i++) { 1447 if (b->bhs[i]) 1448 return 1; 1449 } 1450 1451 return 0; 1452 } 1453 1454 void invalidate_bh_lrus(void) 1455 { 1456 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); 1457 } 1458 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1459 1460 void set_bh_page(struct buffer_head *bh, 1461 struct page *page, unsigned long offset) 1462 { 1463 bh->b_page = page; 1464 BUG_ON(offset >= PAGE_SIZE); 1465 if (PageHighMem(page)) 1466 /* 1467 * This catches illegal uses and preserves the offset: 1468 */ 1469 bh->b_data = (char *)(0 + offset); 1470 else 1471 bh->b_data = page_address(page) + offset; 1472 } 1473 EXPORT_SYMBOL(set_bh_page); 1474 1475 /* 1476 * Called when truncating a buffer on a page completely. 1477 */ 1478 1479 /* Bits that are cleared during an invalidate */ 1480 #define BUFFER_FLAGS_DISCARD \ 1481 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1482 1 << BH_Delay | 1 << BH_Unwritten) 1483 1484 static void discard_buffer(struct buffer_head * bh) 1485 { 1486 unsigned long b_state, b_state_old; 1487 1488 lock_buffer(bh); 1489 clear_buffer_dirty(bh); 1490 bh->b_bdev = NULL; 1491 b_state = bh->b_state; 1492 for (;;) { 1493 b_state_old = cmpxchg(&bh->b_state, b_state, 1494 (b_state & ~BUFFER_FLAGS_DISCARD)); 1495 if (b_state_old == b_state) 1496 break; 1497 b_state = b_state_old; 1498 } 1499 unlock_buffer(bh); 1500 } 1501 1502 /** 1503 * block_invalidatepage - invalidate part or all of a buffer-backed page 1504 * 1505 * @page: the page which is affected 1506 * @offset: start of the range to invalidate 1507 * @length: length of the range to invalidate 1508 * 1509 * block_invalidatepage() is called when all or part of the page has become 1510 * invalidated by a truncate operation. 1511 * 1512 * block_invalidatepage() does not have to release all buffers, but it must 1513 * ensure that no dirty buffer is left outside @offset and that no I/O 1514 * is underway against any of the blocks which are outside the truncation 1515 * point. Because the caller is about to free (and possibly reuse) those 1516 * blocks on-disk. 1517 */ 1518 void block_invalidatepage(struct page *page, unsigned int offset, 1519 unsigned int length) 1520 { 1521 struct buffer_head *head, *bh, *next; 1522 unsigned int curr_off = 0; 1523 unsigned int stop = length + offset; 1524 1525 BUG_ON(!PageLocked(page)); 1526 if (!page_has_buffers(page)) 1527 goto out; 1528 1529 /* 1530 * Check for overflow 1531 */ 1532 BUG_ON(stop > PAGE_SIZE || stop < length); 1533 1534 head = page_buffers(page); 1535 bh = head; 1536 do { 1537 unsigned int next_off = curr_off + bh->b_size; 1538 next = bh->b_this_page; 1539 1540 /* 1541 * Are we still fully in range ? 1542 */ 1543 if (next_off > stop) 1544 goto out; 1545 1546 /* 1547 * is this block fully invalidated? 1548 */ 1549 if (offset <= curr_off) 1550 discard_buffer(bh); 1551 curr_off = next_off; 1552 bh = next; 1553 } while (bh != head); 1554 1555 /* 1556 * We release buffers only if the entire page is being invalidated. 1557 * The get_block cached value has been unconditionally invalidated, 1558 * so real IO is not possible anymore. 1559 */ 1560 if (offset == 0) 1561 try_to_release_page(page, 0); 1562 out: 1563 return; 1564 } 1565 EXPORT_SYMBOL(block_invalidatepage); 1566 1567 1568 /* 1569 * We attach and possibly dirty the buffers atomically wrt 1570 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1571 * is already excluded via the page lock. 1572 */ 1573 void create_empty_buffers(struct page *page, 1574 unsigned long blocksize, unsigned long b_state) 1575 { 1576 struct buffer_head *bh, *head, *tail; 1577 1578 head = alloc_page_buffers(page, blocksize, 1); 1579 bh = head; 1580 do { 1581 bh->b_state |= b_state; 1582 tail = bh; 1583 bh = bh->b_this_page; 1584 } while (bh); 1585 tail->b_this_page = head; 1586 1587 spin_lock(&page->mapping->private_lock); 1588 if (PageUptodate(page) || PageDirty(page)) { 1589 bh = head; 1590 do { 1591 if (PageDirty(page)) 1592 set_buffer_dirty(bh); 1593 if (PageUptodate(page)) 1594 set_buffer_uptodate(bh); 1595 bh = bh->b_this_page; 1596 } while (bh != head); 1597 } 1598 attach_page_buffers(page, head); 1599 spin_unlock(&page->mapping->private_lock); 1600 } 1601 EXPORT_SYMBOL(create_empty_buffers); 1602 1603 /** 1604 * clean_bdev_aliases: clean a range of buffers in block device 1605 * @bdev: Block device to clean buffers in 1606 * @block: Start of a range of blocks to clean 1607 * @len: Number of blocks to clean 1608 * 1609 * We are taking a range of blocks for data and we don't want writeback of any 1610 * buffer-cache aliases starting from return from this function and until the 1611 * moment when something will explicitly mark the buffer dirty (hopefully that 1612 * will not happen until we will free that block ;-) We don't even need to mark 1613 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1614 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1615 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1616 * would confuse anyone who might pick it with bread() afterwards... 1617 * 1618 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1619 * writeout I/O going on against recently-freed buffers. We don't wait on that 1620 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1621 * need to. That happens here. 1622 */ 1623 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1624 { 1625 struct inode *bd_inode = bdev->bd_inode; 1626 struct address_space *bd_mapping = bd_inode->i_mapping; 1627 struct pagevec pvec; 1628 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 1629 pgoff_t end; 1630 int i, count; 1631 struct buffer_head *bh; 1632 struct buffer_head *head; 1633 1634 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 1635 pagevec_init(&pvec, 0); 1636 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { 1637 count = pagevec_count(&pvec); 1638 for (i = 0; i < count; i++) { 1639 struct page *page = pvec.pages[i]; 1640 1641 if (!page_has_buffers(page)) 1642 continue; 1643 /* 1644 * We use page lock instead of bd_mapping->private_lock 1645 * to pin buffers here since we can afford to sleep and 1646 * it scales better than a global spinlock lock. 1647 */ 1648 lock_page(page); 1649 /* Recheck when the page is locked which pins bhs */ 1650 if (!page_has_buffers(page)) 1651 goto unlock_page; 1652 head = page_buffers(page); 1653 bh = head; 1654 do { 1655 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1656 goto next; 1657 if (bh->b_blocknr >= block + len) 1658 break; 1659 clear_buffer_dirty(bh); 1660 wait_on_buffer(bh); 1661 clear_buffer_req(bh); 1662 next: 1663 bh = bh->b_this_page; 1664 } while (bh != head); 1665 unlock_page: 1666 unlock_page(page); 1667 } 1668 pagevec_release(&pvec); 1669 cond_resched(); 1670 /* End of range already reached? */ 1671 if (index > end || !index) 1672 break; 1673 } 1674 } 1675 EXPORT_SYMBOL(clean_bdev_aliases); 1676 1677 /* 1678 * Size is a power-of-two in the range 512..PAGE_SIZE, 1679 * and the case we care about most is PAGE_SIZE. 1680 * 1681 * So this *could* possibly be written with those 1682 * constraints in mind (relevant mostly if some 1683 * architecture has a slow bit-scan instruction) 1684 */ 1685 static inline int block_size_bits(unsigned int blocksize) 1686 { 1687 return ilog2(blocksize); 1688 } 1689 1690 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) 1691 { 1692 BUG_ON(!PageLocked(page)); 1693 1694 if (!page_has_buffers(page)) 1695 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); 1696 return page_buffers(page); 1697 } 1698 1699 /* 1700 * NOTE! All mapped/uptodate combinations are valid: 1701 * 1702 * Mapped Uptodate Meaning 1703 * 1704 * No No "unknown" - must do get_block() 1705 * No Yes "hole" - zero-filled 1706 * Yes No "allocated" - allocated on disk, not read in 1707 * Yes Yes "valid" - allocated and up-to-date in memory. 1708 * 1709 * "Dirty" is valid only with the last case (mapped+uptodate). 1710 */ 1711 1712 /* 1713 * While block_write_full_page is writing back the dirty buffers under 1714 * the page lock, whoever dirtied the buffers may decide to clean them 1715 * again at any time. We handle that by only looking at the buffer 1716 * state inside lock_buffer(). 1717 * 1718 * If block_write_full_page() is called for regular writeback 1719 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1720 * locked buffer. This only can happen if someone has written the buffer 1721 * directly, with submit_bh(). At the address_space level PageWriteback 1722 * prevents this contention from occurring. 1723 * 1724 * If block_write_full_page() is called with wbc->sync_mode == 1725 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1726 * causes the writes to be flagged as synchronous writes. 1727 */ 1728 int __block_write_full_page(struct inode *inode, struct page *page, 1729 get_block_t *get_block, struct writeback_control *wbc, 1730 bh_end_io_t *handler) 1731 { 1732 int err; 1733 sector_t block; 1734 sector_t last_block; 1735 struct buffer_head *bh, *head; 1736 unsigned int blocksize, bbits; 1737 int nr_underway = 0; 1738 int write_flags = wbc_to_write_flags(wbc); 1739 1740 head = create_page_buffers(page, inode, 1741 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1742 1743 /* 1744 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1745 * here, and the (potentially unmapped) buffers may become dirty at 1746 * any time. If a buffer becomes dirty here after we've inspected it 1747 * then we just miss that fact, and the page stays dirty. 1748 * 1749 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 1750 * handle that here by just cleaning them. 1751 */ 1752 1753 bh = head; 1754 blocksize = bh->b_size; 1755 bbits = block_size_bits(blocksize); 1756 1757 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 1758 last_block = (i_size_read(inode) - 1) >> bbits; 1759 1760 /* 1761 * Get all the dirty buffers mapped to disk addresses and 1762 * handle any aliases from the underlying blockdev's mapping. 1763 */ 1764 do { 1765 if (block > last_block) { 1766 /* 1767 * mapped buffers outside i_size will occur, because 1768 * this page can be outside i_size when there is a 1769 * truncate in progress. 1770 */ 1771 /* 1772 * The buffer was zeroed by block_write_full_page() 1773 */ 1774 clear_buffer_dirty(bh); 1775 set_buffer_uptodate(bh); 1776 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1777 buffer_dirty(bh)) { 1778 WARN_ON(bh->b_size != blocksize); 1779 err = get_block(inode, block, bh, 1); 1780 if (err) 1781 goto recover; 1782 clear_buffer_delay(bh); 1783 if (buffer_new(bh)) { 1784 /* blockdev mappings never come here */ 1785 clear_buffer_new(bh); 1786 clean_bdev_bh_alias(bh); 1787 } 1788 } 1789 bh = bh->b_this_page; 1790 block++; 1791 } while (bh != head); 1792 1793 do { 1794 if (!buffer_mapped(bh)) 1795 continue; 1796 /* 1797 * If it's a fully non-blocking write attempt and we cannot 1798 * lock the buffer then redirty the page. Note that this can 1799 * potentially cause a busy-wait loop from writeback threads 1800 * and kswapd activity, but those code paths have their own 1801 * higher-level throttling. 1802 */ 1803 if (wbc->sync_mode != WB_SYNC_NONE) { 1804 lock_buffer(bh); 1805 } else if (!trylock_buffer(bh)) { 1806 redirty_page_for_writepage(wbc, page); 1807 continue; 1808 } 1809 if (test_clear_buffer_dirty(bh)) { 1810 mark_buffer_async_write_endio(bh, handler); 1811 } else { 1812 unlock_buffer(bh); 1813 } 1814 } while ((bh = bh->b_this_page) != head); 1815 1816 /* 1817 * The page and its buffers are protected by PageWriteback(), so we can 1818 * drop the bh refcounts early. 1819 */ 1820 BUG_ON(PageWriteback(page)); 1821 set_page_writeback(page); 1822 1823 do { 1824 struct buffer_head *next = bh->b_this_page; 1825 if (buffer_async_write(bh)) { 1826 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 1827 inode->i_write_hint, wbc); 1828 nr_underway++; 1829 } 1830 bh = next; 1831 } while (bh != head); 1832 unlock_page(page); 1833 1834 err = 0; 1835 done: 1836 if (nr_underway == 0) { 1837 /* 1838 * The page was marked dirty, but the buffers were 1839 * clean. Someone wrote them back by hand with 1840 * ll_rw_block/submit_bh. A rare case. 1841 */ 1842 end_page_writeback(page); 1843 1844 /* 1845 * The page and buffer_heads can be released at any time from 1846 * here on. 1847 */ 1848 } 1849 return err; 1850 1851 recover: 1852 /* 1853 * ENOSPC, or some other error. We may already have added some 1854 * blocks to the file, so we need to write these out to avoid 1855 * exposing stale data. 1856 * The page is currently locked and not marked for writeback 1857 */ 1858 bh = head; 1859 /* Recovery: lock and submit the mapped buffers */ 1860 do { 1861 if (buffer_mapped(bh) && buffer_dirty(bh) && 1862 !buffer_delay(bh)) { 1863 lock_buffer(bh); 1864 mark_buffer_async_write_endio(bh, handler); 1865 } else { 1866 /* 1867 * The buffer may have been set dirty during 1868 * attachment to a dirty page. 1869 */ 1870 clear_buffer_dirty(bh); 1871 } 1872 } while ((bh = bh->b_this_page) != head); 1873 SetPageError(page); 1874 BUG_ON(PageWriteback(page)); 1875 mapping_set_error(page->mapping, err); 1876 set_page_writeback(page); 1877 do { 1878 struct buffer_head *next = bh->b_this_page; 1879 if (buffer_async_write(bh)) { 1880 clear_buffer_dirty(bh); 1881 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 1882 inode->i_write_hint, wbc); 1883 nr_underway++; 1884 } 1885 bh = next; 1886 } while (bh != head); 1887 unlock_page(page); 1888 goto done; 1889 } 1890 EXPORT_SYMBOL(__block_write_full_page); 1891 1892 /* 1893 * If a page has any new buffers, zero them out here, and mark them uptodate 1894 * and dirty so they'll be written out (in order to prevent uninitialised 1895 * block data from leaking). And clear the new bit. 1896 */ 1897 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1898 { 1899 unsigned int block_start, block_end; 1900 struct buffer_head *head, *bh; 1901 1902 BUG_ON(!PageLocked(page)); 1903 if (!page_has_buffers(page)) 1904 return; 1905 1906 bh = head = page_buffers(page); 1907 block_start = 0; 1908 do { 1909 block_end = block_start + bh->b_size; 1910 1911 if (buffer_new(bh)) { 1912 if (block_end > from && block_start < to) { 1913 if (!PageUptodate(page)) { 1914 unsigned start, size; 1915 1916 start = max(from, block_start); 1917 size = min(to, block_end) - start; 1918 1919 zero_user(page, start, size); 1920 set_buffer_uptodate(bh); 1921 } 1922 1923 clear_buffer_new(bh); 1924 mark_buffer_dirty(bh); 1925 } 1926 } 1927 1928 block_start = block_end; 1929 bh = bh->b_this_page; 1930 } while (bh != head); 1931 } 1932 EXPORT_SYMBOL(page_zero_new_buffers); 1933 1934 static void 1935 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 1936 struct iomap *iomap) 1937 { 1938 loff_t offset = block << inode->i_blkbits; 1939 1940 bh->b_bdev = iomap->bdev; 1941 1942 /* 1943 * Block points to offset in file we need to map, iomap contains 1944 * the offset at which the map starts. If the map ends before the 1945 * current block, then do not map the buffer and let the caller 1946 * handle it. 1947 */ 1948 BUG_ON(offset >= iomap->offset + iomap->length); 1949 1950 switch (iomap->type) { 1951 case IOMAP_HOLE: 1952 /* 1953 * If the buffer is not up to date or beyond the current EOF, 1954 * we need to mark it as new to ensure sub-block zeroing is 1955 * executed if necessary. 1956 */ 1957 if (!buffer_uptodate(bh) || 1958 (offset >= i_size_read(inode))) 1959 set_buffer_new(bh); 1960 break; 1961 case IOMAP_DELALLOC: 1962 if (!buffer_uptodate(bh) || 1963 (offset >= i_size_read(inode))) 1964 set_buffer_new(bh); 1965 set_buffer_uptodate(bh); 1966 set_buffer_mapped(bh); 1967 set_buffer_delay(bh); 1968 break; 1969 case IOMAP_UNWRITTEN: 1970 /* 1971 * For unwritten regions, we always need to ensure that 1972 * sub-block writes cause the regions in the block we are not 1973 * writing to are zeroed. Set the buffer as new to ensure this. 1974 */ 1975 set_buffer_new(bh); 1976 set_buffer_unwritten(bh); 1977 /* FALLTHRU */ 1978 case IOMAP_MAPPED: 1979 if (offset >= i_size_read(inode)) 1980 set_buffer_new(bh); 1981 bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) + 1982 ((offset - iomap->offset) >> inode->i_blkbits); 1983 set_buffer_mapped(bh); 1984 break; 1985 } 1986 } 1987 1988 int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, 1989 get_block_t *get_block, struct iomap *iomap) 1990 { 1991 unsigned from = pos & (PAGE_SIZE - 1); 1992 unsigned to = from + len; 1993 struct inode *inode = page->mapping->host; 1994 unsigned block_start, block_end; 1995 sector_t block; 1996 int err = 0; 1997 unsigned blocksize, bbits; 1998 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 1999 2000 BUG_ON(!PageLocked(page)); 2001 BUG_ON(from > PAGE_SIZE); 2002 BUG_ON(to > PAGE_SIZE); 2003 BUG_ON(from > to); 2004 2005 head = create_page_buffers(page, inode, 0); 2006 blocksize = head->b_size; 2007 bbits = block_size_bits(blocksize); 2008 2009 block = (sector_t)page->index << (PAGE_SHIFT - bbits); 2010 2011 for(bh = head, block_start = 0; bh != head || !block_start; 2012 block++, block_start=block_end, bh = bh->b_this_page) { 2013 block_end = block_start + blocksize; 2014 if (block_end <= from || block_start >= to) { 2015 if (PageUptodate(page)) { 2016 if (!buffer_uptodate(bh)) 2017 set_buffer_uptodate(bh); 2018 } 2019 continue; 2020 } 2021 if (buffer_new(bh)) 2022 clear_buffer_new(bh); 2023 if (!buffer_mapped(bh)) { 2024 WARN_ON(bh->b_size != blocksize); 2025 if (get_block) { 2026 err = get_block(inode, block, bh, 1); 2027 if (err) 2028 break; 2029 } else { 2030 iomap_to_bh(inode, block, bh, iomap); 2031 } 2032 2033 if (buffer_new(bh)) { 2034 clean_bdev_bh_alias(bh); 2035 if (PageUptodate(page)) { 2036 clear_buffer_new(bh); 2037 set_buffer_uptodate(bh); 2038 mark_buffer_dirty(bh); 2039 continue; 2040 } 2041 if (block_end > to || block_start < from) 2042 zero_user_segments(page, 2043 to, block_end, 2044 block_start, from); 2045 continue; 2046 } 2047 } 2048 if (PageUptodate(page)) { 2049 if (!buffer_uptodate(bh)) 2050 set_buffer_uptodate(bh); 2051 continue; 2052 } 2053 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2054 !buffer_unwritten(bh) && 2055 (block_start < from || block_end > to)) { 2056 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 2057 *wait_bh++=bh; 2058 } 2059 } 2060 /* 2061 * If we issued read requests - let them complete. 2062 */ 2063 while(wait_bh > wait) { 2064 wait_on_buffer(*--wait_bh); 2065 if (!buffer_uptodate(*wait_bh)) 2066 err = -EIO; 2067 } 2068 if (unlikely(err)) 2069 page_zero_new_buffers(page, from, to); 2070 return err; 2071 } 2072 2073 int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2074 get_block_t *get_block) 2075 { 2076 return __block_write_begin_int(page, pos, len, get_block, NULL); 2077 } 2078 EXPORT_SYMBOL(__block_write_begin); 2079 2080 static int __block_commit_write(struct inode *inode, struct page *page, 2081 unsigned from, unsigned to) 2082 { 2083 unsigned block_start, block_end; 2084 int partial = 0; 2085 unsigned blocksize; 2086 struct buffer_head *bh, *head; 2087 2088 bh = head = page_buffers(page); 2089 blocksize = bh->b_size; 2090 2091 block_start = 0; 2092 do { 2093 block_end = block_start + blocksize; 2094 if (block_end <= from || block_start >= to) { 2095 if (!buffer_uptodate(bh)) 2096 partial = 1; 2097 } else { 2098 set_buffer_uptodate(bh); 2099 mark_buffer_dirty(bh); 2100 } 2101 clear_buffer_new(bh); 2102 2103 block_start = block_end; 2104 bh = bh->b_this_page; 2105 } while (bh != head); 2106 2107 /* 2108 * If this is a partial write which happened to make all buffers 2109 * uptodate then we can optimize away a bogus readpage() for 2110 * the next read(). Here we 'discover' whether the page went 2111 * uptodate as a result of this (potentially partial) write. 2112 */ 2113 if (!partial) 2114 SetPageUptodate(page); 2115 return 0; 2116 } 2117 2118 /* 2119 * block_write_begin takes care of the basic task of block allocation and 2120 * bringing partial write blocks uptodate first. 2121 * 2122 * The filesystem needs to handle block truncation upon failure. 2123 */ 2124 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2125 unsigned flags, struct page **pagep, get_block_t *get_block) 2126 { 2127 pgoff_t index = pos >> PAGE_SHIFT; 2128 struct page *page; 2129 int status; 2130 2131 page = grab_cache_page_write_begin(mapping, index, flags); 2132 if (!page) 2133 return -ENOMEM; 2134 2135 status = __block_write_begin(page, pos, len, get_block); 2136 if (unlikely(status)) { 2137 unlock_page(page); 2138 put_page(page); 2139 page = NULL; 2140 } 2141 2142 *pagep = page; 2143 return status; 2144 } 2145 EXPORT_SYMBOL(block_write_begin); 2146 2147 int block_write_end(struct file *file, struct address_space *mapping, 2148 loff_t pos, unsigned len, unsigned copied, 2149 struct page *page, void *fsdata) 2150 { 2151 struct inode *inode = mapping->host; 2152 unsigned start; 2153 2154 start = pos & (PAGE_SIZE - 1); 2155 2156 if (unlikely(copied < len)) { 2157 /* 2158 * The buffers that were written will now be uptodate, so we 2159 * don't have to worry about a readpage reading them and 2160 * overwriting a partial write. However if we have encountered 2161 * a short write and only partially written into a buffer, it 2162 * will not be marked uptodate, so a readpage might come in and 2163 * destroy our partial write. 2164 * 2165 * Do the simplest thing, and just treat any short write to a 2166 * non uptodate page as a zero-length write, and force the 2167 * caller to redo the whole thing. 2168 */ 2169 if (!PageUptodate(page)) 2170 copied = 0; 2171 2172 page_zero_new_buffers(page, start+copied, start+len); 2173 } 2174 flush_dcache_page(page); 2175 2176 /* This could be a short (even 0-length) commit */ 2177 __block_commit_write(inode, page, start, start+copied); 2178 2179 return copied; 2180 } 2181 EXPORT_SYMBOL(block_write_end); 2182 2183 int generic_write_end(struct file *file, struct address_space *mapping, 2184 loff_t pos, unsigned len, unsigned copied, 2185 struct page *page, void *fsdata) 2186 { 2187 struct inode *inode = mapping->host; 2188 loff_t old_size = inode->i_size; 2189 int i_size_changed = 0; 2190 2191 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2192 2193 /* 2194 * No need to use i_size_read() here, the i_size 2195 * cannot change under us because we hold i_mutex. 2196 * 2197 * But it's important to update i_size while still holding page lock: 2198 * page writeout could otherwise come in and zero beyond i_size. 2199 */ 2200 if (pos+copied > inode->i_size) { 2201 i_size_write(inode, pos+copied); 2202 i_size_changed = 1; 2203 } 2204 2205 unlock_page(page); 2206 put_page(page); 2207 2208 if (old_size < pos) 2209 pagecache_isize_extended(inode, old_size, pos); 2210 /* 2211 * Don't mark the inode dirty under page lock. First, it unnecessarily 2212 * makes the holding time of page lock longer. Second, it forces lock 2213 * ordering of page lock and transaction start for journaling 2214 * filesystems. 2215 */ 2216 if (i_size_changed) 2217 mark_inode_dirty(inode); 2218 2219 return copied; 2220 } 2221 EXPORT_SYMBOL(generic_write_end); 2222 2223 /* 2224 * block_is_partially_uptodate checks whether buffers within a page are 2225 * uptodate or not. 2226 * 2227 * Returns true if all buffers which correspond to a file portion 2228 * we want to read are uptodate. 2229 */ 2230 int block_is_partially_uptodate(struct page *page, unsigned long from, 2231 unsigned long count) 2232 { 2233 unsigned block_start, block_end, blocksize; 2234 unsigned to; 2235 struct buffer_head *bh, *head; 2236 int ret = 1; 2237 2238 if (!page_has_buffers(page)) 2239 return 0; 2240 2241 head = page_buffers(page); 2242 blocksize = head->b_size; 2243 to = min_t(unsigned, PAGE_SIZE - from, count); 2244 to = from + to; 2245 if (from < blocksize && to > PAGE_SIZE - blocksize) 2246 return 0; 2247 2248 bh = head; 2249 block_start = 0; 2250 do { 2251 block_end = block_start + blocksize; 2252 if (block_end > from && block_start < to) { 2253 if (!buffer_uptodate(bh)) { 2254 ret = 0; 2255 break; 2256 } 2257 if (block_end >= to) 2258 break; 2259 } 2260 block_start = block_end; 2261 bh = bh->b_this_page; 2262 } while (bh != head); 2263 2264 return ret; 2265 } 2266 EXPORT_SYMBOL(block_is_partially_uptodate); 2267 2268 /* 2269 * Generic "read page" function for block devices that have the normal 2270 * get_block functionality. This is most of the block device filesystems. 2271 * Reads the page asynchronously --- the unlock_buffer() and 2272 * set/clear_buffer_uptodate() functions propagate buffer state into the 2273 * page struct once IO has completed. 2274 */ 2275 int block_read_full_page(struct page *page, get_block_t *get_block) 2276 { 2277 struct inode *inode = page->mapping->host; 2278 sector_t iblock, lblock; 2279 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2280 unsigned int blocksize, bbits; 2281 int nr, i; 2282 int fully_mapped = 1; 2283 2284 head = create_page_buffers(page, inode, 0); 2285 blocksize = head->b_size; 2286 bbits = block_size_bits(blocksize); 2287 2288 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); 2289 lblock = (i_size_read(inode)+blocksize-1) >> bbits; 2290 bh = head; 2291 nr = 0; 2292 i = 0; 2293 2294 do { 2295 if (buffer_uptodate(bh)) 2296 continue; 2297 2298 if (!buffer_mapped(bh)) { 2299 int err = 0; 2300 2301 fully_mapped = 0; 2302 if (iblock < lblock) { 2303 WARN_ON(bh->b_size != blocksize); 2304 err = get_block(inode, iblock, bh, 0); 2305 if (err) 2306 SetPageError(page); 2307 } 2308 if (!buffer_mapped(bh)) { 2309 zero_user(page, i * blocksize, blocksize); 2310 if (!err) 2311 set_buffer_uptodate(bh); 2312 continue; 2313 } 2314 /* 2315 * get_block() might have updated the buffer 2316 * synchronously 2317 */ 2318 if (buffer_uptodate(bh)) 2319 continue; 2320 } 2321 arr[nr++] = bh; 2322 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2323 2324 if (fully_mapped) 2325 SetPageMappedToDisk(page); 2326 2327 if (!nr) { 2328 /* 2329 * All buffers are uptodate - we can set the page uptodate 2330 * as well. But not if get_block() returned an error. 2331 */ 2332 if (!PageError(page)) 2333 SetPageUptodate(page); 2334 unlock_page(page); 2335 return 0; 2336 } 2337 2338 /* Stage two: lock the buffers */ 2339 for (i = 0; i < nr; i++) { 2340 bh = arr[i]; 2341 lock_buffer(bh); 2342 mark_buffer_async_read(bh); 2343 } 2344 2345 /* 2346 * Stage 3: start the IO. Check for uptodateness 2347 * inside the buffer lock in case another process reading 2348 * the underlying blockdev brought it uptodate (the sct fix). 2349 */ 2350 for (i = 0; i < nr; i++) { 2351 bh = arr[i]; 2352 if (buffer_uptodate(bh)) 2353 end_buffer_async_read(bh, 1); 2354 else 2355 submit_bh(REQ_OP_READ, 0, bh); 2356 } 2357 return 0; 2358 } 2359 EXPORT_SYMBOL(block_read_full_page); 2360 2361 /* utility function for filesystems that need to do work on expanding 2362 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2363 * deal with the hole. 2364 */ 2365 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2366 { 2367 struct address_space *mapping = inode->i_mapping; 2368 struct page *page; 2369 void *fsdata; 2370 int err; 2371 2372 err = inode_newsize_ok(inode, size); 2373 if (err) 2374 goto out; 2375 2376 err = pagecache_write_begin(NULL, mapping, size, 0, 2377 AOP_FLAG_CONT_EXPAND, &page, &fsdata); 2378 if (err) 2379 goto out; 2380 2381 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); 2382 BUG_ON(err > 0); 2383 2384 out: 2385 return err; 2386 } 2387 EXPORT_SYMBOL(generic_cont_expand_simple); 2388 2389 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2390 loff_t pos, loff_t *bytes) 2391 { 2392 struct inode *inode = mapping->host; 2393 unsigned int blocksize = i_blocksize(inode); 2394 struct page *page; 2395 void *fsdata; 2396 pgoff_t index, curidx; 2397 loff_t curpos; 2398 unsigned zerofrom, offset, len; 2399 int err = 0; 2400 2401 index = pos >> PAGE_SHIFT; 2402 offset = pos & ~PAGE_MASK; 2403 2404 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2405 zerofrom = curpos & ~PAGE_MASK; 2406 if (zerofrom & (blocksize-1)) { 2407 *bytes |= (blocksize-1); 2408 (*bytes)++; 2409 } 2410 len = PAGE_SIZE - zerofrom; 2411 2412 err = pagecache_write_begin(file, mapping, curpos, len, 0, 2413 &page, &fsdata); 2414 if (err) 2415 goto out; 2416 zero_user(page, zerofrom, len); 2417 err = pagecache_write_end(file, mapping, curpos, len, len, 2418 page, fsdata); 2419 if (err < 0) 2420 goto out; 2421 BUG_ON(err != len); 2422 err = 0; 2423 2424 balance_dirty_pages_ratelimited(mapping); 2425 2426 if (unlikely(fatal_signal_pending(current))) { 2427 err = -EINTR; 2428 goto out; 2429 } 2430 } 2431 2432 /* page covers the boundary, find the boundary offset */ 2433 if (index == curidx) { 2434 zerofrom = curpos & ~PAGE_MASK; 2435 /* if we will expand the thing last block will be filled */ 2436 if (offset <= zerofrom) { 2437 goto out; 2438 } 2439 if (zerofrom & (blocksize-1)) { 2440 *bytes |= (blocksize-1); 2441 (*bytes)++; 2442 } 2443 len = offset - zerofrom; 2444 2445 err = pagecache_write_begin(file, mapping, curpos, len, 0, 2446 &page, &fsdata); 2447 if (err) 2448 goto out; 2449 zero_user(page, zerofrom, len); 2450 err = pagecache_write_end(file, mapping, curpos, len, len, 2451 page, fsdata); 2452 if (err < 0) 2453 goto out; 2454 BUG_ON(err != len); 2455 err = 0; 2456 } 2457 out: 2458 return err; 2459 } 2460 2461 /* 2462 * For moronic filesystems that do not allow holes in file. 2463 * We may have to extend the file. 2464 */ 2465 int cont_write_begin(struct file *file, struct address_space *mapping, 2466 loff_t pos, unsigned len, unsigned flags, 2467 struct page **pagep, void **fsdata, 2468 get_block_t *get_block, loff_t *bytes) 2469 { 2470 struct inode *inode = mapping->host; 2471 unsigned int blocksize = i_blocksize(inode); 2472 unsigned int zerofrom; 2473 int err; 2474 2475 err = cont_expand_zero(file, mapping, pos, bytes); 2476 if (err) 2477 return err; 2478 2479 zerofrom = *bytes & ~PAGE_MASK; 2480 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2481 *bytes |= (blocksize-1); 2482 (*bytes)++; 2483 } 2484 2485 return block_write_begin(mapping, pos, len, flags, pagep, get_block); 2486 } 2487 EXPORT_SYMBOL(cont_write_begin); 2488 2489 int block_commit_write(struct page *page, unsigned from, unsigned to) 2490 { 2491 struct inode *inode = page->mapping->host; 2492 __block_commit_write(inode,page,from,to); 2493 return 0; 2494 } 2495 EXPORT_SYMBOL(block_commit_write); 2496 2497 /* 2498 * block_page_mkwrite() is not allowed to change the file size as it gets 2499 * called from a page fault handler when a page is first dirtied. Hence we must 2500 * be careful to check for EOF conditions here. We set the page up correctly 2501 * for a written page which means we get ENOSPC checking when writing into 2502 * holes and correct delalloc and unwritten extent mapping on filesystems that 2503 * support these features. 2504 * 2505 * We are not allowed to take the i_mutex here so we have to play games to 2506 * protect against truncate races as the page could now be beyond EOF. Because 2507 * truncate writes the inode size before removing pages, once we have the 2508 * page lock we can determine safely if the page is beyond EOF. If it is not 2509 * beyond EOF, then the page is guaranteed safe against truncation until we 2510 * unlock the page. 2511 * 2512 * Direct callers of this function should protect against filesystem freezing 2513 * using sb_start_pagefault() - sb_end_pagefault() functions. 2514 */ 2515 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2516 get_block_t get_block) 2517 { 2518 struct page *page = vmf->page; 2519 struct inode *inode = file_inode(vma->vm_file); 2520 unsigned long end; 2521 loff_t size; 2522 int ret; 2523 2524 lock_page(page); 2525 size = i_size_read(inode); 2526 if ((page->mapping != inode->i_mapping) || 2527 (page_offset(page) > size)) { 2528 /* We overload EFAULT to mean page got truncated */ 2529 ret = -EFAULT; 2530 goto out_unlock; 2531 } 2532 2533 /* page is wholly or partially inside EOF */ 2534 if (((page->index + 1) << PAGE_SHIFT) > size) 2535 end = size & ~PAGE_MASK; 2536 else 2537 end = PAGE_SIZE; 2538 2539 ret = __block_write_begin(page, 0, end, get_block); 2540 if (!ret) 2541 ret = block_commit_write(page, 0, end); 2542 2543 if (unlikely(ret < 0)) 2544 goto out_unlock; 2545 set_page_dirty(page); 2546 wait_for_stable_page(page); 2547 return 0; 2548 out_unlock: 2549 unlock_page(page); 2550 return ret; 2551 } 2552 EXPORT_SYMBOL(block_page_mkwrite); 2553 2554 /* 2555 * nobh_write_begin()'s prereads are special: the buffer_heads are freed 2556 * immediately, while under the page lock. So it needs a special end_io 2557 * handler which does not touch the bh after unlocking it. 2558 */ 2559 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 2560 { 2561 __end_buffer_read_notouch(bh, uptodate); 2562 } 2563 2564 /* 2565 * Attach the singly-linked list of buffers created by nobh_write_begin, to 2566 * the page (converting it to circular linked list and taking care of page 2567 * dirty races). 2568 */ 2569 static void attach_nobh_buffers(struct page *page, struct buffer_head *head) 2570 { 2571 struct buffer_head *bh; 2572 2573 BUG_ON(!PageLocked(page)); 2574 2575 spin_lock(&page->mapping->private_lock); 2576 bh = head; 2577 do { 2578 if (PageDirty(page)) 2579 set_buffer_dirty(bh); 2580 if (!bh->b_this_page) 2581 bh->b_this_page = head; 2582 bh = bh->b_this_page; 2583 } while (bh != head); 2584 attach_page_buffers(page, head); 2585 spin_unlock(&page->mapping->private_lock); 2586 } 2587 2588 /* 2589 * On entry, the page is fully not uptodate. 2590 * On exit the page is fully uptodate in the areas outside (from,to) 2591 * The filesystem needs to handle block truncation upon failure. 2592 */ 2593 int nobh_write_begin(struct address_space *mapping, 2594 loff_t pos, unsigned len, unsigned flags, 2595 struct page **pagep, void **fsdata, 2596 get_block_t *get_block) 2597 { 2598 struct inode *inode = mapping->host; 2599 const unsigned blkbits = inode->i_blkbits; 2600 const unsigned blocksize = 1 << blkbits; 2601 struct buffer_head *head, *bh; 2602 struct page *page; 2603 pgoff_t index; 2604 unsigned from, to; 2605 unsigned block_in_page; 2606 unsigned block_start, block_end; 2607 sector_t block_in_file; 2608 int nr_reads = 0; 2609 int ret = 0; 2610 int is_mapped_to_disk = 1; 2611 2612 index = pos >> PAGE_SHIFT; 2613 from = pos & (PAGE_SIZE - 1); 2614 to = from + len; 2615 2616 page = grab_cache_page_write_begin(mapping, index, flags); 2617 if (!page) 2618 return -ENOMEM; 2619 *pagep = page; 2620 *fsdata = NULL; 2621 2622 if (page_has_buffers(page)) { 2623 ret = __block_write_begin(page, pos, len, get_block); 2624 if (unlikely(ret)) 2625 goto out_release; 2626 return ret; 2627 } 2628 2629 if (PageMappedToDisk(page)) 2630 return 0; 2631 2632 /* 2633 * Allocate buffers so that we can keep track of state, and potentially 2634 * attach them to the page if an error occurs. In the common case of 2635 * no error, they will just be freed again without ever being attached 2636 * to the page (which is all OK, because we're under the page lock). 2637 * 2638 * Be careful: the buffer linked list is a NULL terminated one, rather 2639 * than the circular one we're used to. 2640 */ 2641 head = alloc_page_buffers(page, blocksize, 0); 2642 if (!head) { 2643 ret = -ENOMEM; 2644 goto out_release; 2645 } 2646 2647 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 2648 2649 /* 2650 * We loop across all blocks in the page, whether or not they are 2651 * part of the affected region. This is so we can discover if the 2652 * page is fully mapped-to-disk. 2653 */ 2654 for (block_start = 0, block_in_page = 0, bh = head; 2655 block_start < PAGE_SIZE; 2656 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 2657 int create; 2658 2659 block_end = block_start + blocksize; 2660 bh->b_state = 0; 2661 create = 1; 2662 if (block_start >= to) 2663 create = 0; 2664 ret = get_block(inode, block_in_file + block_in_page, 2665 bh, create); 2666 if (ret) 2667 goto failed; 2668 if (!buffer_mapped(bh)) 2669 is_mapped_to_disk = 0; 2670 if (buffer_new(bh)) 2671 clean_bdev_bh_alias(bh); 2672 if (PageUptodate(page)) { 2673 set_buffer_uptodate(bh); 2674 continue; 2675 } 2676 if (buffer_new(bh) || !buffer_mapped(bh)) { 2677 zero_user_segments(page, block_start, from, 2678 to, block_end); 2679 continue; 2680 } 2681 if (buffer_uptodate(bh)) 2682 continue; /* reiserfs does this */ 2683 if (block_start < from || block_end > to) { 2684 lock_buffer(bh); 2685 bh->b_end_io = end_buffer_read_nobh; 2686 submit_bh(REQ_OP_READ, 0, bh); 2687 nr_reads++; 2688 } 2689 } 2690 2691 if (nr_reads) { 2692 /* 2693 * The page is locked, so these buffers are protected from 2694 * any VM or truncate activity. Hence we don't need to care 2695 * for the buffer_head refcounts. 2696 */ 2697 for (bh = head; bh; bh = bh->b_this_page) { 2698 wait_on_buffer(bh); 2699 if (!buffer_uptodate(bh)) 2700 ret = -EIO; 2701 } 2702 if (ret) 2703 goto failed; 2704 } 2705 2706 if (is_mapped_to_disk) 2707 SetPageMappedToDisk(page); 2708 2709 *fsdata = head; /* to be released by nobh_write_end */ 2710 2711 return 0; 2712 2713 failed: 2714 BUG_ON(!ret); 2715 /* 2716 * Error recovery is a bit difficult. We need to zero out blocks that 2717 * were newly allocated, and dirty them to ensure they get written out. 2718 * Buffers need to be attached to the page at this point, otherwise 2719 * the handling of potential IO errors during writeout would be hard 2720 * (could try doing synchronous writeout, but what if that fails too?) 2721 */ 2722 attach_nobh_buffers(page, head); 2723 page_zero_new_buffers(page, from, to); 2724 2725 out_release: 2726 unlock_page(page); 2727 put_page(page); 2728 *pagep = NULL; 2729 2730 return ret; 2731 } 2732 EXPORT_SYMBOL(nobh_write_begin); 2733 2734 int nobh_write_end(struct file *file, struct address_space *mapping, 2735 loff_t pos, unsigned len, unsigned copied, 2736 struct page *page, void *fsdata) 2737 { 2738 struct inode *inode = page->mapping->host; 2739 struct buffer_head *head = fsdata; 2740 struct buffer_head *bh; 2741 BUG_ON(fsdata != NULL && page_has_buffers(page)); 2742 2743 if (unlikely(copied < len) && head) 2744 attach_nobh_buffers(page, head); 2745 if (page_has_buffers(page)) 2746 return generic_write_end(file, mapping, pos, len, 2747 copied, page, fsdata); 2748 2749 SetPageUptodate(page); 2750 set_page_dirty(page); 2751 if (pos+copied > inode->i_size) { 2752 i_size_write(inode, pos+copied); 2753 mark_inode_dirty(inode); 2754 } 2755 2756 unlock_page(page); 2757 put_page(page); 2758 2759 while (head) { 2760 bh = head; 2761 head = head->b_this_page; 2762 free_buffer_head(bh); 2763 } 2764 2765 return copied; 2766 } 2767 EXPORT_SYMBOL(nobh_write_end); 2768 2769 /* 2770 * nobh_writepage() - based on block_full_write_page() except 2771 * that it tries to operate without attaching bufferheads to 2772 * the page. 2773 */ 2774 int nobh_writepage(struct page *page, get_block_t *get_block, 2775 struct writeback_control *wbc) 2776 { 2777 struct inode * const inode = page->mapping->host; 2778 loff_t i_size = i_size_read(inode); 2779 const pgoff_t end_index = i_size >> PAGE_SHIFT; 2780 unsigned offset; 2781 int ret; 2782 2783 /* Is the page fully inside i_size? */ 2784 if (page->index < end_index) 2785 goto out; 2786 2787 /* Is the page fully outside i_size? (truncate in progress) */ 2788 offset = i_size & (PAGE_SIZE-1); 2789 if (page->index >= end_index+1 || !offset) { 2790 /* 2791 * The page may have dirty, unmapped buffers. For example, 2792 * they may have been added in ext3_writepage(). Make them 2793 * freeable here, so the page does not leak. 2794 */ 2795 #if 0 2796 /* Not really sure about this - do we need this ? */ 2797 if (page->mapping->a_ops->invalidatepage) 2798 page->mapping->a_ops->invalidatepage(page, offset); 2799 #endif 2800 unlock_page(page); 2801 return 0; /* don't care */ 2802 } 2803 2804 /* 2805 * The page straddles i_size. It must be zeroed out on each and every 2806 * writepage invocation because it may be mmapped. "A file is mapped 2807 * in multiples of the page size. For a file that is not a multiple of 2808 * the page size, the remaining memory is zeroed when mapped, and 2809 * writes to that region are not written out to the file." 2810 */ 2811 zero_user_segment(page, offset, PAGE_SIZE); 2812 out: 2813 ret = mpage_writepage(page, get_block, wbc); 2814 if (ret == -EAGAIN) 2815 ret = __block_write_full_page(inode, page, get_block, wbc, 2816 end_buffer_async_write); 2817 return ret; 2818 } 2819 EXPORT_SYMBOL(nobh_writepage); 2820 2821 int nobh_truncate_page(struct address_space *mapping, 2822 loff_t from, get_block_t *get_block) 2823 { 2824 pgoff_t index = from >> PAGE_SHIFT; 2825 unsigned offset = from & (PAGE_SIZE-1); 2826 unsigned blocksize; 2827 sector_t iblock; 2828 unsigned length, pos; 2829 struct inode *inode = mapping->host; 2830 struct page *page; 2831 struct buffer_head map_bh; 2832 int err; 2833 2834 blocksize = i_blocksize(inode); 2835 length = offset & (blocksize - 1); 2836 2837 /* Block boundary? Nothing to do */ 2838 if (!length) 2839 return 0; 2840 2841 length = blocksize - length; 2842 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 2843 2844 page = grab_cache_page(mapping, index); 2845 err = -ENOMEM; 2846 if (!page) 2847 goto out; 2848 2849 if (page_has_buffers(page)) { 2850 has_buffers: 2851 unlock_page(page); 2852 put_page(page); 2853 return block_truncate_page(mapping, from, get_block); 2854 } 2855 2856 /* Find the buffer that contains "offset" */ 2857 pos = blocksize; 2858 while (offset >= pos) { 2859 iblock++; 2860 pos += blocksize; 2861 } 2862 2863 map_bh.b_size = blocksize; 2864 map_bh.b_state = 0; 2865 err = get_block(inode, iblock, &map_bh, 0); 2866 if (err) 2867 goto unlock; 2868 /* unmapped? It's a hole - nothing to do */ 2869 if (!buffer_mapped(&map_bh)) 2870 goto unlock; 2871 2872 /* Ok, it's mapped. Make sure it's up-to-date */ 2873 if (!PageUptodate(page)) { 2874 err = mapping->a_ops->readpage(NULL, page); 2875 if (err) { 2876 put_page(page); 2877 goto out; 2878 } 2879 lock_page(page); 2880 if (!PageUptodate(page)) { 2881 err = -EIO; 2882 goto unlock; 2883 } 2884 if (page_has_buffers(page)) 2885 goto has_buffers; 2886 } 2887 zero_user(page, offset, length); 2888 set_page_dirty(page); 2889 err = 0; 2890 2891 unlock: 2892 unlock_page(page); 2893 put_page(page); 2894 out: 2895 return err; 2896 } 2897 EXPORT_SYMBOL(nobh_truncate_page); 2898 2899 int block_truncate_page(struct address_space *mapping, 2900 loff_t from, get_block_t *get_block) 2901 { 2902 pgoff_t index = from >> PAGE_SHIFT; 2903 unsigned offset = from & (PAGE_SIZE-1); 2904 unsigned blocksize; 2905 sector_t iblock; 2906 unsigned length, pos; 2907 struct inode *inode = mapping->host; 2908 struct page *page; 2909 struct buffer_head *bh; 2910 int err; 2911 2912 blocksize = i_blocksize(inode); 2913 length = offset & (blocksize - 1); 2914 2915 /* Block boundary? Nothing to do */ 2916 if (!length) 2917 return 0; 2918 2919 length = blocksize - length; 2920 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 2921 2922 page = grab_cache_page(mapping, index); 2923 err = -ENOMEM; 2924 if (!page) 2925 goto out; 2926 2927 if (!page_has_buffers(page)) 2928 create_empty_buffers(page, blocksize, 0); 2929 2930 /* Find the buffer that contains "offset" */ 2931 bh = page_buffers(page); 2932 pos = blocksize; 2933 while (offset >= pos) { 2934 bh = bh->b_this_page; 2935 iblock++; 2936 pos += blocksize; 2937 } 2938 2939 err = 0; 2940 if (!buffer_mapped(bh)) { 2941 WARN_ON(bh->b_size != blocksize); 2942 err = get_block(inode, iblock, bh, 0); 2943 if (err) 2944 goto unlock; 2945 /* unmapped? It's a hole - nothing to do */ 2946 if (!buffer_mapped(bh)) 2947 goto unlock; 2948 } 2949 2950 /* Ok, it's mapped. Make sure it's up-to-date */ 2951 if (PageUptodate(page)) 2952 set_buffer_uptodate(bh); 2953 2954 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2955 err = -EIO; 2956 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 2957 wait_on_buffer(bh); 2958 /* Uhhuh. Read error. Complain and punt. */ 2959 if (!buffer_uptodate(bh)) 2960 goto unlock; 2961 } 2962 2963 zero_user(page, offset, length); 2964 mark_buffer_dirty(bh); 2965 err = 0; 2966 2967 unlock: 2968 unlock_page(page); 2969 put_page(page); 2970 out: 2971 return err; 2972 } 2973 EXPORT_SYMBOL(block_truncate_page); 2974 2975 /* 2976 * The generic ->writepage function for buffer-backed address_spaces 2977 */ 2978 int block_write_full_page(struct page *page, get_block_t *get_block, 2979 struct writeback_control *wbc) 2980 { 2981 struct inode * const inode = page->mapping->host; 2982 loff_t i_size = i_size_read(inode); 2983 const pgoff_t end_index = i_size >> PAGE_SHIFT; 2984 unsigned offset; 2985 2986 /* Is the page fully inside i_size? */ 2987 if (page->index < end_index) 2988 return __block_write_full_page(inode, page, get_block, wbc, 2989 end_buffer_async_write); 2990 2991 /* Is the page fully outside i_size? (truncate in progress) */ 2992 offset = i_size & (PAGE_SIZE-1); 2993 if (page->index >= end_index+1 || !offset) { 2994 /* 2995 * The page may have dirty, unmapped buffers. For example, 2996 * they may have been added in ext3_writepage(). Make them 2997 * freeable here, so the page does not leak. 2998 */ 2999 do_invalidatepage(page, 0, PAGE_SIZE); 3000 unlock_page(page); 3001 return 0; /* don't care */ 3002 } 3003 3004 /* 3005 * The page straddles i_size. It must be zeroed out on each and every 3006 * writepage invocation because it may be mmapped. "A file is mapped 3007 * in multiples of the page size. For a file that is not a multiple of 3008 * the page size, the remaining memory is zeroed when mapped, and 3009 * writes to that region are not written out to the file." 3010 */ 3011 zero_user_segment(page, offset, PAGE_SIZE); 3012 return __block_write_full_page(inode, page, get_block, wbc, 3013 end_buffer_async_write); 3014 } 3015 EXPORT_SYMBOL(block_write_full_page); 3016 3017 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 3018 get_block_t *get_block) 3019 { 3020 struct inode *inode = mapping->host; 3021 struct buffer_head tmp = { 3022 .b_size = i_blocksize(inode), 3023 }; 3024 3025 get_block(inode, block, &tmp, 0); 3026 return tmp.b_blocknr; 3027 } 3028 EXPORT_SYMBOL(generic_block_bmap); 3029 3030 static void end_bio_bh_io_sync(struct bio *bio) 3031 { 3032 struct buffer_head *bh = bio->bi_private; 3033 3034 if (unlikely(bio_flagged(bio, BIO_QUIET))) 3035 set_bit(BH_Quiet, &bh->b_state); 3036 3037 bh->b_end_io(bh, !bio->bi_status); 3038 bio_put(bio); 3039 } 3040 3041 /* 3042 * This allows us to do IO even on the odd last sectors 3043 * of a device, even if the block size is some multiple 3044 * of the physical sector size. 3045 * 3046 * We'll just truncate the bio to the size of the device, 3047 * and clear the end of the buffer head manually. 3048 * 3049 * Truly out-of-range accesses will turn into actual IO 3050 * errors, this only handles the "we need to be able to 3051 * do IO at the final sector" case. 3052 */ 3053 void guard_bio_eod(int op, struct bio *bio) 3054 { 3055 sector_t maxsector; 3056 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 3057 unsigned truncated_bytes; 3058 3059 maxsector = get_capacity(bio->bi_disk); 3060 if (!maxsector) 3061 return; 3062 3063 /* 3064 * If the *whole* IO is past the end of the device, 3065 * let it through, and the IO layer will turn it into 3066 * an EIO. 3067 */ 3068 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 3069 return; 3070 3071 maxsector -= bio->bi_iter.bi_sector; 3072 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 3073 return; 3074 3075 /* Uhhuh. We've got a bio that straddles the device size! */ 3076 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 3077 3078 /* Truncate the bio.. */ 3079 bio->bi_iter.bi_size -= truncated_bytes; 3080 bvec->bv_len -= truncated_bytes; 3081 3082 /* ..and clear the end of the buffer for reads */ 3083 if (op == REQ_OP_READ) { 3084 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 3085 truncated_bytes); 3086 } 3087 } 3088 3089 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 3090 enum rw_hint write_hint, struct writeback_control *wbc) 3091 { 3092 struct bio *bio; 3093 3094 BUG_ON(!buffer_locked(bh)); 3095 BUG_ON(!buffer_mapped(bh)); 3096 BUG_ON(!bh->b_end_io); 3097 BUG_ON(buffer_delay(bh)); 3098 BUG_ON(buffer_unwritten(bh)); 3099 3100 /* 3101 * Only clear out a write error when rewriting 3102 */ 3103 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 3104 clear_buffer_write_io_error(bh); 3105 3106 /* 3107 * from here on down, it's all bio -- do the initial mapping, 3108 * submit_bio -> generic_make_request may further map this bio around 3109 */ 3110 bio = bio_alloc(GFP_NOIO, 1); 3111 3112 if (wbc) { 3113 wbc_init_bio(wbc, bio); 3114 wbc_account_io(wbc, bh->b_page, bh->b_size); 3115 } 3116 3117 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3118 bio_set_dev(bio, bh->b_bdev); 3119 bio->bi_write_hint = write_hint; 3120 3121 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 3122 BUG_ON(bio->bi_iter.bi_size != bh->b_size); 3123 3124 bio->bi_end_io = end_bio_bh_io_sync; 3125 bio->bi_private = bh; 3126 3127 /* Take care of bh's that straddle the end of the device */ 3128 guard_bio_eod(op, bio); 3129 3130 if (buffer_meta(bh)) 3131 op_flags |= REQ_META; 3132 if (buffer_prio(bh)) 3133 op_flags |= REQ_PRIO; 3134 bio_set_op_attrs(bio, op, op_flags); 3135 3136 submit_bio(bio); 3137 return 0; 3138 } 3139 3140 int submit_bh(int op, int op_flags, struct buffer_head *bh) 3141 { 3142 return submit_bh_wbc(op, op_flags, bh, 0, NULL); 3143 } 3144 EXPORT_SYMBOL(submit_bh); 3145 3146 /** 3147 * ll_rw_block: low-level access to block devices (DEPRECATED) 3148 * @op: whether to %READ or %WRITE 3149 * @op_flags: req_flag_bits 3150 * @nr: number of &struct buffer_heads in the array 3151 * @bhs: array of pointers to &struct buffer_head 3152 * 3153 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 3154 * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE. 3155 * @op_flags contains flags modifying the detailed I/O behavior, most notably 3156 * %REQ_RAHEAD. 3157 * 3158 * This function drops any buffer that it cannot get a lock on (with the 3159 * BH_Lock state bit), any buffer that appears to be clean when doing a write 3160 * request, and any buffer that appears to be up-to-date when doing read 3161 * request. Further it marks as clean buffers that are processed for 3162 * writing (the buffer cache won't assume that they are actually clean 3163 * until the buffer gets unlocked). 3164 * 3165 * ll_rw_block sets b_end_io to simple completion handler that marks 3166 * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 3167 * any waiters. 3168 * 3169 * All of the buffers must be for the same device, and must also be a 3170 * multiple of the current approved size for the device. 3171 */ 3172 void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) 3173 { 3174 int i; 3175 3176 for (i = 0; i < nr; i++) { 3177 struct buffer_head *bh = bhs[i]; 3178 3179 if (!trylock_buffer(bh)) 3180 continue; 3181 if (op == WRITE) { 3182 if (test_clear_buffer_dirty(bh)) { 3183 bh->b_end_io = end_buffer_write_sync; 3184 get_bh(bh); 3185 submit_bh(op, op_flags, bh); 3186 continue; 3187 } 3188 } else { 3189 if (!buffer_uptodate(bh)) { 3190 bh->b_end_io = end_buffer_read_sync; 3191 get_bh(bh); 3192 submit_bh(op, op_flags, bh); 3193 continue; 3194 } 3195 } 3196 unlock_buffer(bh); 3197 } 3198 } 3199 EXPORT_SYMBOL(ll_rw_block); 3200 3201 void write_dirty_buffer(struct buffer_head *bh, int op_flags) 3202 { 3203 lock_buffer(bh); 3204 if (!test_clear_buffer_dirty(bh)) { 3205 unlock_buffer(bh); 3206 return; 3207 } 3208 bh->b_end_io = end_buffer_write_sync; 3209 get_bh(bh); 3210 submit_bh(REQ_OP_WRITE, op_flags, bh); 3211 } 3212 EXPORT_SYMBOL(write_dirty_buffer); 3213 3214 /* 3215 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3216 * and then start new I/O and then wait upon it. The caller must have a ref on 3217 * the buffer_head. 3218 */ 3219 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) 3220 { 3221 int ret = 0; 3222 3223 WARN_ON(atomic_read(&bh->b_count) < 1); 3224 lock_buffer(bh); 3225 if (test_clear_buffer_dirty(bh)) { 3226 get_bh(bh); 3227 bh->b_end_io = end_buffer_write_sync; 3228 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); 3229 wait_on_buffer(bh); 3230 if (!ret && !buffer_uptodate(bh)) 3231 ret = -EIO; 3232 } else { 3233 unlock_buffer(bh); 3234 } 3235 return ret; 3236 } 3237 EXPORT_SYMBOL(__sync_dirty_buffer); 3238 3239 int sync_dirty_buffer(struct buffer_head *bh) 3240 { 3241 return __sync_dirty_buffer(bh, REQ_SYNC); 3242 } 3243 EXPORT_SYMBOL(sync_dirty_buffer); 3244 3245 /* 3246 * try_to_free_buffers() checks if all the buffers on this particular page 3247 * are unused, and releases them if so. 3248 * 3249 * Exclusion against try_to_free_buffers may be obtained by either 3250 * locking the page or by holding its mapping's private_lock. 3251 * 3252 * If the page is dirty but all the buffers are clean then we need to 3253 * be sure to mark the page clean as well. This is because the page 3254 * may be against a block device, and a later reattachment of buffers 3255 * to a dirty page will set *all* buffers dirty. Which would corrupt 3256 * filesystem data on the same device. 3257 * 3258 * The same applies to regular filesystem pages: if all the buffers are 3259 * clean then we set the page clean and proceed. To do that, we require 3260 * total exclusion from __set_page_dirty_buffers(). That is obtained with 3261 * private_lock. 3262 * 3263 * try_to_free_buffers() is non-blocking. 3264 */ 3265 static inline int buffer_busy(struct buffer_head *bh) 3266 { 3267 return atomic_read(&bh->b_count) | 3268 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 3269 } 3270 3271 static int 3272 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 3273 { 3274 struct buffer_head *head = page_buffers(page); 3275 struct buffer_head *bh; 3276 3277 bh = head; 3278 do { 3279 if (buffer_busy(bh)) 3280 goto failed; 3281 bh = bh->b_this_page; 3282 } while (bh != head); 3283 3284 do { 3285 struct buffer_head *next = bh->b_this_page; 3286 3287 if (bh->b_assoc_map) 3288 __remove_assoc_queue(bh); 3289 bh = next; 3290 } while (bh != head); 3291 *buffers_to_free = head; 3292 __clear_page_buffers(page); 3293 return 1; 3294 failed: 3295 return 0; 3296 } 3297 3298 int try_to_free_buffers(struct page *page) 3299 { 3300 struct address_space * const mapping = page->mapping; 3301 struct buffer_head *buffers_to_free = NULL; 3302 int ret = 0; 3303 3304 BUG_ON(!PageLocked(page)); 3305 if (PageWriteback(page)) 3306 return 0; 3307 3308 if (mapping == NULL) { /* can this still happen? */ 3309 ret = drop_buffers(page, &buffers_to_free); 3310 goto out; 3311 } 3312 3313 spin_lock(&mapping->private_lock); 3314 ret = drop_buffers(page, &buffers_to_free); 3315 3316 /* 3317 * If the filesystem writes its buffers by hand (eg ext3) 3318 * then we can have clean buffers against a dirty page. We 3319 * clean the page here; otherwise the VM will never notice 3320 * that the filesystem did any IO at all. 3321 * 3322 * Also, during truncate, discard_buffer will have marked all 3323 * the page's buffers clean. We discover that here and clean 3324 * the page also. 3325 * 3326 * private_lock must be held over this entire operation in order 3327 * to synchronise against __set_page_dirty_buffers and prevent the 3328 * dirty bit from being lost. 3329 */ 3330 if (ret) 3331 cancel_dirty_page(page); 3332 spin_unlock(&mapping->private_lock); 3333 out: 3334 if (buffers_to_free) { 3335 struct buffer_head *bh = buffers_to_free; 3336 3337 do { 3338 struct buffer_head *next = bh->b_this_page; 3339 free_buffer_head(bh); 3340 bh = next; 3341 } while (bh != buffers_to_free); 3342 } 3343 return ret; 3344 } 3345 EXPORT_SYMBOL(try_to_free_buffers); 3346 3347 /* 3348 * There are no bdflush tunables left. But distributions are 3349 * still running obsolete flush daemons, so we terminate them here. 3350 * 3351 * Use of bdflush() is deprecated and will be removed in a future kernel. 3352 * The `flush-X' kernel threads fully replace bdflush daemons and this call. 3353 */ 3354 SYSCALL_DEFINE2(bdflush, int, func, long, data) 3355 { 3356 static int msg_count; 3357 3358 if (!capable(CAP_SYS_ADMIN)) 3359 return -EPERM; 3360 3361 if (msg_count < 5) { 3362 msg_count++; 3363 printk(KERN_INFO 3364 "warning: process `%s' used the obsolete bdflush" 3365 " system call\n", current->comm); 3366 printk(KERN_INFO "Fix your initscripts?\n"); 3367 } 3368 3369 if (func == 1) 3370 do_exit(0); 3371 return 0; 3372 } 3373 3374 /* 3375 * Buffer-head allocation 3376 */ 3377 static struct kmem_cache *bh_cachep __read_mostly; 3378 3379 /* 3380 * Once the number of bh's in the machine exceeds this level, we start 3381 * stripping them in writeback. 3382 */ 3383 static unsigned long max_buffer_heads; 3384 3385 int buffer_heads_over_limit; 3386 3387 struct bh_accounting { 3388 int nr; /* Number of live bh's */ 3389 int ratelimit; /* Limit cacheline bouncing */ 3390 }; 3391 3392 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3393 3394 static void recalc_bh_state(void) 3395 { 3396 int i; 3397 int tot = 0; 3398 3399 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3400 return; 3401 __this_cpu_write(bh_accounting.ratelimit, 0); 3402 for_each_online_cpu(i) 3403 tot += per_cpu(bh_accounting, i).nr; 3404 buffer_heads_over_limit = (tot > max_buffer_heads); 3405 } 3406 3407 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3408 { 3409 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3410 if (ret) { 3411 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3412 preempt_disable(); 3413 __this_cpu_inc(bh_accounting.nr); 3414 recalc_bh_state(); 3415 preempt_enable(); 3416 } 3417 return ret; 3418 } 3419 EXPORT_SYMBOL(alloc_buffer_head); 3420 3421 void free_buffer_head(struct buffer_head *bh) 3422 { 3423 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3424 kmem_cache_free(bh_cachep, bh); 3425 preempt_disable(); 3426 __this_cpu_dec(bh_accounting.nr); 3427 recalc_bh_state(); 3428 preempt_enable(); 3429 } 3430 EXPORT_SYMBOL(free_buffer_head); 3431 3432 static int buffer_exit_cpu_dead(unsigned int cpu) 3433 { 3434 int i; 3435 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3436 3437 for (i = 0; i < BH_LRU_SIZE; i++) { 3438 brelse(b->bhs[i]); 3439 b->bhs[i] = NULL; 3440 } 3441 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3442 per_cpu(bh_accounting, cpu).nr = 0; 3443 return 0; 3444 } 3445 3446 /** 3447 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3448 * @bh: struct buffer_head 3449 * 3450 * Return true if the buffer is up-to-date and false, 3451 * with the buffer locked, if not. 3452 */ 3453 int bh_uptodate_or_lock(struct buffer_head *bh) 3454 { 3455 if (!buffer_uptodate(bh)) { 3456 lock_buffer(bh); 3457 if (!buffer_uptodate(bh)) 3458 return 0; 3459 unlock_buffer(bh); 3460 } 3461 return 1; 3462 } 3463 EXPORT_SYMBOL(bh_uptodate_or_lock); 3464 3465 /** 3466 * bh_submit_read - Submit a locked buffer for reading 3467 * @bh: struct buffer_head 3468 * 3469 * Returns zero on success and -EIO on error. 3470 */ 3471 int bh_submit_read(struct buffer_head *bh) 3472 { 3473 BUG_ON(!buffer_locked(bh)); 3474 3475 if (buffer_uptodate(bh)) { 3476 unlock_buffer(bh); 3477 return 0; 3478 } 3479 3480 get_bh(bh); 3481 bh->b_end_io = end_buffer_read_sync; 3482 submit_bh(REQ_OP_READ, 0, bh); 3483 wait_on_buffer(bh); 3484 if (buffer_uptodate(bh)) 3485 return 0; 3486 return -EIO; 3487 } 3488 EXPORT_SYMBOL(bh_submit_read); 3489 3490 /* 3491 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. 3492 * 3493 * Returns the offset within the file on success, and -ENOENT otherwise. 3494 */ 3495 static loff_t 3496 page_seek_hole_data(struct page *page, loff_t lastoff, int whence) 3497 { 3498 loff_t offset = page_offset(page); 3499 struct buffer_head *bh, *head; 3500 bool seek_data = whence == SEEK_DATA; 3501 3502 if (lastoff < offset) 3503 lastoff = offset; 3504 3505 bh = head = page_buffers(page); 3506 do { 3507 offset += bh->b_size; 3508 if (lastoff >= offset) 3509 continue; 3510 3511 /* 3512 * Unwritten extents that have data in the page cache covering 3513 * them can be identified by the BH_Unwritten state flag. 3514 * Pages with multiple buffers might have a mix of holes, data 3515 * and unwritten extents - any buffer with valid data in it 3516 * should have BH_Uptodate flag set on it. 3517 */ 3518 3519 if ((buffer_unwritten(bh) || buffer_uptodate(bh)) == seek_data) 3520 return lastoff; 3521 3522 lastoff = offset; 3523 } while ((bh = bh->b_this_page) != head); 3524 return -ENOENT; 3525 } 3526 3527 /* 3528 * Seek for SEEK_DATA / SEEK_HOLE in the page cache. 3529 * 3530 * Within unwritten extents, the page cache determines which parts are holes 3531 * and which are data: unwritten and uptodate buffer heads count as data; 3532 * everything else counts as a hole. 3533 * 3534 * Returns the resulting offset on successs, and -ENOENT otherwise. 3535 */ 3536 loff_t 3537 page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, 3538 int whence) 3539 { 3540 pgoff_t index = offset >> PAGE_SHIFT; 3541 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); 3542 loff_t lastoff = offset; 3543 struct pagevec pvec; 3544 3545 if (length <= 0) 3546 return -ENOENT; 3547 3548 pagevec_init(&pvec, 0); 3549 3550 do { 3551 unsigned nr_pages, i; 3552 3553 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, 3554 end - 1); 3555 if (nr_pages == 0) 3556 break; 3557 3558 for (i = 0; i < nr_pages; i++) { 3559 struct page *page = pvec.pages[i]; 3560 3561 /* 3562 * At this point, the page may be truncated or 3563 * invalidated (changing page->mapping to NULL), or 3564 * even swizzled back from swapper_space to tmpfs file 3565 * mapping. However, page->index will not change 3566 * because we have a reference on the page. 3567 * 3568 * If current page offset is beyond where we've ended, 3569 * we've found a hole. 3570 */ 3571 if (whence == SEEK_HOLE && 3572 lastoff < page_offset(page)) 3573 goto check_range; 3574 3575 lock_page(page); 3576 if (likely(page->mapping == inode->i_mapping) && 3577 page_has_buffers(page)) { 3578 lastoff = page_seek_hole_data(page, lastoff, whence); 3579 if (lastoff >= 0) { 3580 unlock_page(page); 3581 goto check_range; 3582 } 3583 } 3584 unlock_page(page); 3585 lastoff = page_offset(page) + PAGE_SIZE; 3586 } 3587 pagevec_release(&pvec); 3588 } while (index < end); 3589 3590 /* When no page at lastoff and we are not done, we found a hole. */ 3591 if (whence != SEEK_HOLE) 3592 goto not_found; 3593 3594 check_range: 3595 if (lastoff < offset + length) 3596 goto out; 3597 not_found: 3598 lastoff = -ENOENT; 3599 out: 3600 pagevec_release(&pvec); 3601 return lastoff; 3602 } 3603 3604 void __init buffer_init(void) 3605 { 3606 unsigned long nrpages; 3607 int ret; 3608 3609 bh_cachep = kmem_cache_create("buffer_head", 3610 sizeof(struct buffer_head), 0, 3611 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3612 SLAB_MEM_SPREAD), 3613 NULL); 3614 3615 /* 3616 * Limit the bh occupancy to 10% of ZONE_NORMAL 3617 */ 3618 nrpages = (nr_free_buffer_pages() * 10) / 100; 3619 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3620 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3621 NULL, buffer_exit_cpu_dead); 3622 WARN_ON(ret < 0); 3623 } 3624