xref: /openbmc/linux/fs/buffer.c (revision 600a711c)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/export.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56 
57 static int sleep_on_buffer(void *word)
58 {
59 	io_schedule();
60 	return 0;
61 }
62 
63 void __lock_buffer(struct buffer_head *bh)
64 {
65 	wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
66 							TASK_UNINTERRUPTIBLE);
67 }
68 EXPORT_SYMBOL(__lock_buffer);
69 
70 void unlock_buffer(struct buffer_head *bh)
71 {
72 	clear_bit_unlock(BH_Lock, &bh->b_state);
73 	smp_mb__after_clear_bit();
74 	wake_up_bit(&bh->b_state, BH_Lock);
75 }
76 EXPORT_SYMBOL(unlock_buffer);
77 
78 /*
79  * Block until a buffer comes unlocked.  This doesn't stop it
80  * from becoming locked again - you have to lock it yourself
81  * if you want to preserve its state.
82  */
83 void __wait_on_buffer(struct buffer_head * bh)
84 {
85 	wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
86 }
87 EXPORT_SYMBOL(__wait_on_buffer);
88 
89 static void
90 __clear_page_buffers(struct page *page)
91 {
92 	ClearPagePrivate(page);
93 	set_page_private(page, 0);
94 	page_cache_release(page);
95 }
96 
97 
98 static int quiet_error(struct buffer_head *bh)
99 {
100 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
101 		return 0;
102 	return 1;
103 }
104 
105 
106 static void buffer_io_error(struct buffer_head *bh)
107 {
108 	char b[BDEVNAME_SIZE];
109 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 			bdevname(bh->b_bdev, b),
111 			(unsigned long long)bh->b_blocknr);
112 }
113 
114 /*
115  * End-of-IO handler helper function which does not touch the bh after
116  * unlocking it.
117  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
118  * a race there is benign: unlock_buffer() only use the bh's address for
119  * hashing after unlocking the buffer, so it doesn't actually touch the bh
120  * itself.
121  */
122 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
123 {
124 	if (uptodate) {
125 		set_buffer_uptodate(bh);
126 	} else {
127 		/* This happens, due to failed READA attempts. */
128 		clear_buffer_uptodate(bh);
129 	}
130 	unlock_buffer(bh);
131 }
132 
133 /*
134  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
135  * unlock the buffer. This is what ll_rw_block uses too.
136  */
137 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
138 {
139 	__end_buffer_read_notouch(bh, uptodate);
140 	put_bh(bh);
141 }
142 EXPORT_SYMBOL(end_buffer_read_sync);
143 
144 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
145 {
146 	char b[BDEVNAME_SIZE];
147 
148 	if (uptodate) {
149 		set_buffer_uptodate(bh);
150 	} else {
151 		if (!quiet_error(bh)) {
152 			buffer_io_error(bh);
153 			printk(KERN_WARNING "lost page write due to "
154 					"I/O error on %s\n",
155 				       bdevname(bh->b_bdev, b));
156 		}
157 		set_buffer_write_io_error(bh);
158 		clear_buffer_uptodate(bh);
159 	}
160 	unlock_buffer(bh);
161 	put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_write_sync);
164 
165 /*
166  * Various filesystems appear to want __find_get_block to be non-blocking.
167  * But it's the page lock which protects the buffers.  To get around this,
168  * we get exclusion from try_to_free_buffers with the blockdev mapping's
169  * private_lock.
170  *
171  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
172  * may be quite high.  This code could TryLock the page, and if that
173  * succeeds, there is no need to take private_lock. (But if
174  * private_lock is contended then so is mapping->tree_lock).
175  */
176 static struct buffer_head *
177 __find_get_block_slow(struct block_device *bdev, sector_t block)
178 {
179 	struct inode *bd_inode = bdev->bd_inode;
180 	struct address_space *bd_mapping = bd_inode->i_mapping;
181 	struct buffer_head *ret = NULL;
182 	pgoff_t index;
183 	struct buffer_head *bh;
184 	struct buffer_head *head;
185 	struct page *page;
186 	int all_mapped = 1;
187 
188 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
189 	page = find_get_page(bd_mapping, index);
190 	if (!page)
191 		goto out;
192 
193 	spin_lock(&bd_mapping->private_lock);
194 	if (!page_has_buffers(page))
195 		goto out_unlock;
196 	head = page_buffers(page);
197 	bh = head;
198 	do {
199 		if (!buffer_mapped(bh))
200 			all_mapped = 0;
201 		else if (bh->b_blocknr == block) {
202 			ret = bh;
203 			get_bh(bh);
204 			goto out_unlock;
205 		}
206 		bh = bh->b_this_page;
207 	} while (bh != head);
208 
209 	/* we might be here because some of the buffers on this page are
210 	 * not mapped.  This is due to various races between
211 	 * file io on the block device and getblk.  It gets dealt with
212 	 * elsewhere, don't buffer_error if we had some unmapped buffers
213 	 */
214 	if (all_mapped) {
215 		char b[BDEVNAME_SIZE];
216 
217 		printk("__find_get_block_slow() failed. "
218 			"block=%llu, b_blocknr=%llu\n",
219 			(unsigned long long)block,
220 			(unsigned long long)bh->b_blocknr);
221 		printk("b_state=0x%08lx, b_size=%zu\n",
222 			bh->b_state, bh->b_size);
223 		printk("device %s blocksize: %d\n", bdevname(bdev, b),
224 			1 << bd_inode->i_blkbits);
225 	}
226 out_unlock:
227 	spin_unlock(&bd_mapping->private_lock);
228 	page_cache_release(page);
229 out:
230 	return ret;
231 }
232 
233 /*
234  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
235  */
236 static void free_more_memory(void)
237 {
238 	struct zone *zone;
239 	int nid;
240 
241 	wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
242 	yield();
243 
244 	for_each_online_node(nid) {
245 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
246 						gfp_zone(GFP_NOFS), NULL,
247 						&zone);
248 		if (zone)
249 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
250 						GFP_NOFS, NULL);
251 	}
252 }
253 
254 /*
255  * I/O completion handler for block_read_full_page() - pages
256  * which come unlocked at the end of I/O.
257  */
258 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
259 {
260 	unsigned long flags;
261 	struct buffer_head *first;
262 	struct buffer_head *tmp;
263 	struct page *page;
264 	int page_uptodate = 1;
265 
266 	BUG_ON(!buffer_async_read(bh));
267 
268 	page = bh->b_page;
269 	if (uptodate) {
270 		set_buffer_uptodate(bh);
271 	} else {
272 		clear_buffer_uptodate(bh);
273 		if (!quiet_error(bh))
274 			buffer_io_error(bh);
275 		SetPageError(page);
276 	}
277 
278 	/*
279 	 * Be _very_ careful from here on. Bad things can happen if
280 	 * two buffer heads end IO at almost the same time and both
281 	 * decide that the page is now completely done.
282 	 */
283 	first = page_buffers(page);
284 	local_irq_save(flags);
285 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
286 	clear_buffer_async_read(bh);
287 	unlock_buffer(bh);
288 	tmp = bh;
289 	do {
290 		if (!buffer_uptodate(tmp))
291 			page_uptodate = 0;
292 		if (buffer_async_read(tmp)) {
293 			BUG_ON(!buffer_locked(tmp));
294 			goto still_busy;
295 		}
296 		tmp = tmp->b_this_page;
297 	} while (tmp != bh);
298 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
299 	local_irq_restore(flags);
300 
301 	/*
302 	 * If none of the buffers had errors and they are all
303 	 * uptodate then we can set the page uptodate.
304 	 */
305 	if (page_uptodate && !PageError(page))
306 		SetPageUptodate(page);
307 	unlock_page(page);
308 	return;
309 
310 still_busy:
311 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
312 	local_irq_restore(flags);
313 	return;
314 }
315 
316 /*
317  * Completion handler for block_write_full_page() - pages which are unlocked
318  * during I/O, and which have PageWriteback cleared upon I/O completion.
319  */
320 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
321 {
322 	char b[BDEVNAME_SIZE];
323 	unsigned long flags;
324 	struct buffer_head *first;
325 	struct buffer_head *tmp;
326 	struct page *page;
327 
328 	BUG_ON(!buffer_async_write(bh));
329 
330 	page = bh->b_page;
331 	if (uptodate) {
332 		set_buffer_uptodate(bh);
333 	} else {
334 		if (!quiet_error(bh)) {
335 			buffer_io_error(bh);
336 			printk(KERN_WARNING "lost page write due to "
337 					"I/O error on %s\n",
338 			       bdevname(bh->b_bdev, b));
339 		}
340 		set_bit(AS_EIO, &page->mapping->flags);
341 		set_buffer_write_io_error(bh);
342 		clear_buffer_uptodate(bh);
343 		SetPageError(page);
344 	}
345 
346 	first = page_buffers(page);
347 	local_irq_save(flags);
348 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
349 
350 	clear_buffer_async_write(bh);
351 	unlock_buffer(bh);
352 	tmp = bh->b_this_page;
353 	while (tmp != bh) {
354 		if (buffer_async_write(tmp)) {
355 			BUG_ON(!buffer_locked(tmp));
356 			goto still_busy;
357 		}
358 		tmp = tmp->b_this_page;
359 	}
360 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 	local_irq_restore(flags);
362 	end_page_writeback(page);
363 	return;
364 
365 still_busy:
366 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
367 	local_irq_restore(flags);
368 	return;
369 }
370 EXPORT_SYMBOL(end_buffer_async_write);
371 
372 /*
373  * If a page's buffers are under async readin (end_buffer_async_read
374  * completion) then there is a possibility that another thread of
375  * control could lock one of the buffers after it has completed
376  * but while some of the other buffers have not completed.  This
377  * locked buffer would confuse end_buffer_async_read() into not unlocking
378  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
379  * that this buffer is not under async I/O.
380  *
381  * The page comes unlocked when it has no locked buffer_async buffers
382  * left.
383  *
384  * PageLocked prevents anyone starting new async I/O reads any of
385  * the buffers.
386  *
387  * PageWriteback is used to prevent simultaneous writeout of the same
388  * page.
389  *
390  * PageLocked prevents anyone from starting writeback of a page which is
391  * under read I/O (PageWriteback is only ever set against a locked page).
392  */
393 static void mark_buffer_async_read(struct buffer_head *bh)
394 {
395 	bh->b_end_io = end_buffer_async_read;
396 	set_buffer_async_read(bh);
397 }
398 
399 static void mark_buffer_async_write_endio(struct buffer_head *bh,
400 					  bh_end_io_t *handler)
401 {
402 	bh->b_end_io = handler;
403 	set_buffer_async_write(bh);
404 }
405 
406 void mark_buffer_async_write(struct buffer_head *bh)
407 {
408 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
409 }
410 EXPORT_SYMBOL(mark_buffer_async_write);
411 
412 
413 /*
414  * fs/buffer.c contains helper functions for buffer-backed address space's
415  * fsync functions.  A common requirement for buffer-based filesystems is
416  * that certain data from the backing blockdev needs to be written out for
417  * a successful fsync().  For example, ext2 indirect blocks need to be
418  * written back and waited upon before fsync() returns.
419  *
420  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
421  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
422  * management of a list of dependent buffers at ->i_mapping->private_list.
423  *
424  * Locking is a little subtle: try_to_free_buffers() will remove buffers
425  * from their controlling inode's queue when they are being freed.  But
426  * try_to_free_buffers() will be operating against the *blockdev* mapping
427  * at the time, not against the S_ISREG file which depends on those buffers.
428  * So the locking for private_list is via the private_lock in the address_space
429  * which backs the buffers.  Which is different from the address_space
430  * against which the buffers are listed.  So for a particular address_space,
431  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
432  * mapping->private_list will always be protected by the backing blockdev's
433  * ->private_lock.
434  *
435  * Which introduces a requirement: all buffers on an address_space's
436  * ->private_list must be from the same address_space: the blockdev's.
437  *
438  * address_spaces which do not place buffers at ->private_list via these
439  * utility functions are free to use private_lock and private_list for
440  * whatever they want.  The only requirement is that list_empty(private_list)
441  * be true at clear_inode() time.
442  *
443  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
444  * filesystems should do that.  invalidate_inode_buffers() should just go
445  * BUG_ON(!list_empty).
446  *
447  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
448  * take an address_space, not an inode.  And it should be called
449  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
450  * queued up.
451  *
452  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
453  * list if it is already on a list.  Because if the buffer is on a list,
454  * it *must* already be on the right one.  If not, the filesystem is being
455  * silly.  This will save a ton of locking.  But first we have to ensure
456  * that buffers are taken *off* the old inode's list when they are freed
457  * (presumably in truncate).  That requires careful auditing of all
458  * filesystems (do it inside bforget()).  It could also be done by bringing
459  * b_inode back.
460  */
461 
462 /*
463  * The buffer's backing address_space's private_lock must be held
464  */
465 static void __remove_assoc_queue(struct buffer_head *bh)
466 {
467 	list_del_init(&bh->b_assoc_buffers);
468 	WARN_ON(!bh->b_assoc_map);
469 	if (buffer_write_io_error(bh))
470 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
471 	bh->b_assoc_map = NULL;
472 }
473 
474 int inode_has_buffers(struct inode *inode)
475 {
476 	return !list_empty(&inode->i_data.private_list);
477 }
478 
479 /*
480  * osync is designed to support O_SYNC io.  It waits synchronously for
481  * all already-submitted IO to complete, but does not queue any new
482  * writes to the disk.
483  *
484  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
485  * you dirty the buffers, and then use osync_inode_buffers to wait for
486  * completion.  Any other dirty buffers which are not yet queued for
487  * write will not be flushed to disk by the osync.
488  */
489 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
490 {
491 	struct buffer_head *bh;
492 	struct list_head *p;
493 	int err = 0;
494 
495 	spin_lock(lock);
496 repeat:
497 	list_for_each_prev(p, list) {
498 		bh = BH_ENTRY(p);
499 		if (buffer_locked(bh)) {
500 			get_bh(bh);
501 			spin_unlock(lock);
502 			wait_on_buffer(bh);
503 			if (!buffer_uptodate(bh))
504 				err = -EIO;
505 			brelse(bh);
506 			spin_lock(lock);
507 			goto repeat;
508 		}
509 	}
510 	spin_unlock(lock);
511 	return err;
512 }
513 
514 static void do_thaw_one(struct super_block *sb, void *unused)
515 {
516 	char b[BDEVNAME_SIZE];
517 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
518 		printk(KERN_WARNING "Emergency Thaw on %s\n",
519 		       bdevname(sb->s_bdev, b));
520 }
521 
522 static void do_thaw_all(struct work_struct *work)
523 {
524 	iterate_supers(do_thaw_one, NULL);
525 	kfree(work);
526 	printk(KERN_WARNING "Emergency Thaw complete\n");
527 }
528 
529 /**
530  * emergency_thaw_all -- forcibly thaw every frozen filesystem
531  *
532  * Used for emergency unfreeze of all filesystems via SysRq
533  */
534 void emergency_thaw_all(void)
535 {
536 	struct work_struct *work;
537 
538 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
539 	if (work) {
540 		INIT_WORK(work, do_thaw_all);
541 		schedule_work(work);
542 	}
543 }
544 
545 /**
546  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
547  * @mapping: the mapping which wants those buffers written
548  *
549  * Starts I/O against the buffers at mapping->private_list, and waits upon
550  * that I/O.
551  *
552  * Basically, this is a convenience function for fsync().
553  * @mapping is a file or directory which needs those buffers to be written for
554  * a successful fsync().
555  */
556 int sync_mapping_buffers(struct address_space *mapping)
557 {
558 	struct address_space *buffer_mapping = mapping->assoc_mapping;
559 
560 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
561 		return 0;
562 
563 	return fsync_buffers_list(&buffer_mapping->private_lock,
564 					&mapping->private_list);
565 }
566 EXPORT_SYMBOL(sync_mapping_buffers);
567 
568 /*
569  * Called when we've recently written block `bblock', and it is known that
570  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
571  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
572  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
573  */
574 void write_boundary_block(struct block_device *bdev,
575 			sector_t bblock, unsigned blocksize)
576 {
577 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
578 	if (bh) {
579 		if (buffer_dirty(bh))
580 			ll_rw_block(WRITE, 1, &bh);
581 		put_bh(bh);
582 	}
583 }
584 
585 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
586 {
587 	struct address_space *mapping = inode->i_mapping;
588 	struct address_space *buffer_mapping = bh->b_page->mapping;
589 
590 	mark_buffer_dirty(bh);
591 	if (!mapping->assoc_mapping) {
592 		mapping->assoc_mapping = buffer_mapping;
593 	} else {
594 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
595 	}
596 	if (!bh->b_assoc_map) {
597 		spin_lock(&buffer_mapping->private_lock);
598 		list_move_tail(&bh->b_assoc_buffers,
599 				&mapping->private_list);
600 		bh->b_assoc_map = mapping;
601 		spin_unlock(&buffer_mapping->private_lock);
602 	}
603 }
604 EXPORT_SYMBOL(mark_buffer_dirty_inode);
605 
606 /*
607  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
608  * dirty.
609  *
610  * If warn is true, then emit a warning if the page is not uptodate and has
611  * not been truncated.
612  */
613 static void __set_page_dirty(struct page *page,
614 		struct address_space *mapping, int warn)
615 {
616 	spin_lock_irq(&mapping->tree_lock);
617 	if (page->mapping) {	/* Race with truncate? */
618 		WARN_ON_ONCE(warn && !PageUptodate(page));
619 		account_page_dirtied(page, mapping);
620 		radix_tree_tag_set(&mapping->page_tree,
621 				page_index(page), PAGECACHE_TAG_DIRTY);
622 	}
623 	spin_unlock_irq(&mapping->tree_lock);
624 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
625 }
626 
627 /*
628  * Add a page to the dirty page list.
629  *
630  * It is a sad fact of life that this function is called from several places
631  * deeply under spinlocking.  It may not sleep.
632  *
633  * If the page has buffers, the uptodate buffers are set dirty, to preserve
634  * dirty-state coherency between the page and the buffers.  It the page does
635  * not have buffers then when they are later attached they will all be set
636  * dirty.
637  *
638  * The buffers are dirtied before the page is dirtied.  There's a small race
639  * window in which a writepage caller may see the page cleanness but not the
640  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
641  * before the buffers, a concurrent writepage caller could clear the page dirty
642  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
643  * page on the dirty page list.
644  *
645  * We use private_lock to lock against try_to_free_buffers while using the
646  * page's buffer list.  Also use this to protect against clean buffers being
647  * added to the page after it was set dirty.
648  *
649  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
650  * address_space though.
651  */
652 int __set_page_dirty_buffers(struct page *page)
653 {
654 	int newly_dirty;
655 	struct address_space *mapping = page_mapping(page);
656 
657 	if (unlikely(!mapping))
658 		return !TestSetPageDirty(page);
659 
660 	spin_lock(&mapping->private_lock);
661 	if (page_has_buffers(page)) {
662 		struct buffer_head *head = page_buffers(page);
663 		struct buffer_head *bh = head;
664 
665 		do {
666 			set_buffer_dirty(bh);
667 			bh = bh->b_this_page;
668 		} while (bh != head);
669 	}
670 	newly_dirty = !TestSetPageDirty(page);
671 	spin_unlock(&mapping->private_lock);
672 
673 	if (newly_dirty)
674 		__set_page_dirty(page, mapping, 1);
675 	return newly_dirty;
676 }
677 EXPORT_SYMBOL(__set_page_dirty_buffers);
678 
679 /*
680  * Write out and wait upon a list of buffers.
681  *
682  * We have conflicting pressures: we want to make sure that all
683  * initially dirty buffers get waited on, but that any subsequently
684  * dirtied buffers don't.  After all, we don't want fsync to last
685  * forever if somebody is actively writing to the file.
686  *
687  * Do this in two main stages: first we copy dirty buffers to a
688  * temporary inode list, queueing the writes as we go.  Then we clean
689  * up, waiting for those writes to complete.
690  *
691  * During this second stage, any subsequent updates to the file may end
692  * up refiling the buffer on the original inode's dirty list again, so
693  * there is a chance we will end up with a buffer queued for write but
694  * not yet completed on that list.  So, as a final cleanup we go through
695  * the osync code to catch these locked, dirty buffers without requeuing
696  * any newly dirty buffers for write.
697  */
698 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
699 {
700 	struct buffer_head *bh;
701 	struct list_head tmp;
702 	struct address_space *mapping;
703 	int err = 0, err2;
704 	struct blk_plug plug;
705 
706 	INIT_LIST_HEAD(&tmp);
707 	blk_start_plug(&plug);
708 
709 	spin_lock(lock);
710 	while (!list_empty(list)) {
711 		bh = BH_ENTRY(list->next);
712 		mapping = bh->b_assoc_map;
713 		__remove_assoc_queue(bh);
714 		/* Avoid race with mark_buffer_dirty_inode() which does
715 		 * a lockless check and we rely on seeing the dirty bit */
716 		smp_mb();
717 		if (buffer_dirty(bh) || buffer_locked(bh)) {
718 			list_add(&bh->b_assoc_buffers, &tmp);
719 			bh->b_assoc_map = mapping;
720 			if (buffer_dirty(bh)) {
721 				get_bh(bh);
722 				spin_unlock(lock);
723 				/*
724 				 * Ensure any pending I/O completes so that
725 				 * write_dirty_buffer() actually writes the
726 				 * current contents - it is a noop if I/O is
727 				 * still in flight on potentially older
728 				 * contents.
729 				 */
730 				write_dirty_buffer(bh, WRITE_SYNC);
731 
732 				/*
733 				 * Kick off IO for the previous mapping. Note
734 				 * that we will not run the very last mapping,
735 				 * wait_on_buffer() will do that for us
736 				 * through sync_buffer().
737 				 */
738 				brelse(bh);
739 				spin_lock(lock);
740 			}
741 		}
742 	}
743 
744 	spin_unlock(lock);
745 	blk_finish_plug(&plug);
746 	spin_lock(lock);
747 
748 	while (!list_empty(&tmp)) {
749 		bh = BH_ENTRY(tmp.prev);
750 		get_bh(bh);
751 		mapping = bh->b_assoc_map;
752 		__remove_assoc_queue(bh);
753 		/* Avoid race with mark_buffer_dirty_inode() which does
754 		 * a lockless check and we rely on seeing the dirty bit */
755 		smp_mb();
756 		if (buffer_dirty(bh)) {
757 			list_add(&bh->b_assoc_buffers,
758 				 &mapping->private_list);
759 			bh->b_assoc_map = mapping;
760 		}
761 		spin_unlock(lock);
762 		wait_on_buffer(bh);
763 		if (!buffer_uptodate(bh))
764 			err = -EIO;
765 		brelse(bh);
766 		spin_lock(lock);
767 	}
768 
769 	spin_unlock(lock);
770 	err2 = osync_buffers_list(lock, list);
771 	if (err)
772 		return err;
773 	else
774 		return err2;
775 }
776 
777 /*
778  * Invalidate any and all dirty buffers on a given inode.  We are
779  * probably unmounting the fs, but that doesn't mean we have already
780  * done a sync().  Just drop the buffers from the inode list.
781  *
782  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
783  * assumes that all the buffers are against the blockdev.  Not true
784  * for reiserfs.
785  */
786 void invalidate_inode_buffers(struct inode *inode)
787 {
788 	if (inode_has_buffers(inode)) {
789 		struct address_space *mapping = &inode->i_data;
790 		struct list_head *list = &mapping->private_list;
791 		struct address_space *buffer_mapping = mapping->assoc_mapping;
792 
793 		spin_lock(&buffer_mapping->private_lock);
794 		while (!list_empty(list))
795 			__remove_assoc_queue(BH_ENTRY(list->next));
796 		spin_unlock(&buffer_mapping->private_lock);
797 	}
798 }
799 EXPORT_SYMBOL(invalidate_inode_buffers);
800 
801 /*
802  * Remove any clean buffers from the inode's buffer list.  This is called
803  * when we're trying to free the inode itself.  Those buffers can pin it.
804  *
805  * Returns true if all buffers were removed.
806  */
807 int remove_inode_buffers(struct inode *inode)
808 {
809 	int ret = 1;
810 
811 	if (inode_has_buffers(inode)) {
812 		struct address_space *mapping = &inode->i_data;
813 		struct list_head *list = &mapping->private_list;
814 		struct address_space *buffer_mapping = mapping->assoc_mapping;
815 
816 		spin_lock(&buffer_mapping->private_lock);
817 		while (!list_empty(list)) {
818 			struct buffer_head *bh = BH_ENTRY(list->next);
819 			if (buffer_dirty(bh)) {
820 				ret = 0;
821 				break;
822 			}
823 			__remove_assoc_queue(bh);
824 		}
825 		spin_unlock(&buffer_mapping->private_lock);
826 	}
827 	return ret;
828 }
829 
830 /*
831  * Create the appropriate buffers when given a page for data area and
832  * the size of each buffer.. Use the bh->b_this_page linked list to
833  * follow the buffers created.  Return NULL if unable to create more
834  * buffers.
835  *
836  * The retry flag is used to differentiate async IO (paging, swapping)
837  * which may not fail from ordinary buffer allocations.
838  */
839 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
840 		int retry)
841 {
842 	struct buffer_head *bh, *head;
843 	long offset;
844 
845 try_again:
846 	head = NULL;
847 	offset = PAGE_SIZE;
848 	while ((offset -= size) >= 0) {
849 		bh = alloc_buffer_head(GFP_NOFS);
850 		if (!bh)
851 			goto no_grow;
852 
853 		bh->b_bdev = NULL;
854 		bh->b_this_page = head;
855 		bh->b_blocknr = -1;
856 		head = bh;
857 
858 		bh->b_state = 0;
859 		atomic_set(&bh->b_count, 0);
860 		bh->b_size = size;
861 
862 		/* Link the buffer to its page */
863 		set_bh_page(bh, page, offset);
864 
865 		init_buffer(bh, NULL, NULL);
866 	}
867 	return head;
868 /*
869  * In case anything failed, we just free everything we got.
870  */
871 no_grow:
872 	if (head) {
873 		do {
874 			bh = head;
875 			head = head->b_this_page;
876 			free_buffer_head(bh);
877 		} while (head);
878 	}
879 
880 	/*
881 	 * Return failure for non-async IO requests.  Async IO requests
882 	 * are not allowed to fail, so we have to wait until buffer heads
883 	 * become available.  But we don't want tasks sleeping with
884 	 * partially complete buffers, so all were released above.
885 	 */
886 	if (!retry)
887 		return NULL;
888 
889 	/* We're _really_ low on memory. Now we just
890 	 * wait for old buffer heads to become free due to
891 	 * finishing IO.  Since this is an async request and
892 	 * the reserve list is empty, we're sure there are
893 	 * async buffer heads in use.
894 	 */
895 	free_more_memory();
896 	goto try_again;
897 }
898 EXPORT_SYMBOL_GPL(alloc_page_buffers);
899 
900 static inline void
901 link_dev_buffers(struct page *page, struct buffer_head *head)
902 {
903 	struct buffer_head *bh, *tail;
904 
905 	bh = head;
906 	do {
907 		tail = bh;
908 		bh = bh->b_this_page;
909 	} while (bh);
910 	tail->b_this_page = head;
911 	attach_page_buffers(page, head);
912 }
913 
914 /*
915  * Initialise the state of a blockdev page's buffers.
916  */
917 static sector_t
918 init_page_buffers(struct page *page, struct block_device *bdev,
919 			sector_t block, int size)
920 {
921 	struct buffer_head *head = page_buffers(page);
922 	struct buffer_head *bh = head;
923 	int uptodate = PageUptodate(page);
924 	sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
925 
926 	do {
927 		if (!buffer_mapped(bh)) {
928 			init_buffer(bh, NULL, NULL);
929 			bh->b_bdev = bdev;
930 			bh->b_blocknr = block;
931 			if (uptodate)
932 				set_buffer_uptodate(bh);
933 			if (block < end_block)
934 				set_buffer_mapped(bh);
935 		}
936 		block++;
937 		bh = bh->b_this_page;
938 	} while (bh != head);
939 
940 	/*
941 	 * Caller needs to validate requested block against end of device.
942 	 */
943 	return end_block;
944 }
945 
946 /*
947  * Create the page-cache page that contains the requested block.
948  *
949  * This is used purely for blockdev mappings.
950  */
951 static int
952 grow_dev_page(struct block_device *bdev, sector_t block,
953 		pgoff_t index, int size, int sizebits)
954 {
955 	struct inode *inode = bdev->bd_inode;
956 	struct page *page;
957 	struct buffer_head *bh;
958 	sector_t end_block;
959 	int ret = 0;		/* Will call free_more_memory() */
960 
961 	page = find_or_create_page(inode->i_mapping, index,
962 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
963 	if (!page)
964 		return ret;
965 
966 	BUG_ON(!PageLocked(page));
967 
968 	if (page_has_buffers(page)) {
969 		bh = page_buffers(page);
970 		if (bh->b_size == size) {
971 			end_block = init_page_buffers(page, bdev,
972 						index << sizebits, size);
973 			goto done;
974 		}
975 		if (!try_to_free_buffers(page))
976 			goto failed;
977 	}
978 
979 	/*
980 	 * Allocate some buffers for this page
981 	 */
982 	bh = alloc_page_buffers(page, size, 0);
983 	if (!bh)
984 		goto failed;
985 
986 	/*
987 	 * Link the page to the buffers and initialise them.  Take the
988 	 * lock to be atomic wrt __find_get_block(), which does not
989 	 * run under the page lock.
990 	 */
991 	spin_lock(&inode->i_mapping->private_lock);
992 	link_dev_buffers(page, bh);
993 	end_block = init_page_buffers(page, bdev, index << sizebits, size);
994 	spin_unlock(&inode->i_mapping->private_lock);
995 done:
996 	ret = (block < end_block) ? 1 : -ENXIO;
997 failed:
998 	unlock_page(page);
999 	page_cache_release(page);
1000 	return ret;
1001 }
1002 
1003 /*
1004  * Create buffers for the specified block device block's page.  If
1005  * that page was dirty, the buffers are set dirty also.
1006  */
1007 static int
1008 grow_buffers(struct block_device *bdev, sector_t block, int size)
1009 {
1010 	pgoff_t index;
1011 	int sizebits;
1012 
1013 	sizebits = -1;
1014 	do {
1015 		sizebits++;
1016 	} while ((size << sizebits) < PAGE_SIZE);
1017 
1018 	index = block >> sizebits;
1019 
1020 	/*
1021 	 * Check for a block which wants to lie outside our maximum possible
1022 	 * pagecache index.  (this comparison is done using sector_t types).
1023 	 */
1024 	if (unlikely(index != block >> sizebits)) {
1025 		char b[BDEVNAME_SIZE];
1026 
1027 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1028 			"device %s\n",
1029 			__func__, (unsigned long long)block,
1030 			bdevname(bdev, b));
1031 		return -EIO;
1032 	}
1033 
1034 	/* Create a page with the proper size buffers.. */
1035 	return grow_dev_page(bdev, block, index, size, sizebits);
1036 }
1037 
1038 static struct buffer_head *
1039 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1040 {
1041 	/* Size must be multiple of hard sectorsize */
1042 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1043 			(size < 512 || size > PAGE_SIZE))) {
1044 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1045 					size);
1046 		printk(KERN_ERR "logical block size: %d\n",
1047 					bdev_logical_block_size(bdev));
1048 
1049 		dump_stack();
1050 		return NULL;
1051 	}
1052 
1053 	for (;;) {
1054 		struct buffer_head *bh;
1055 		int ret;
1056 
1057 		bh = __find_get_block(bdev, block, size);
1058 		if (bh)
1059 			return bh;
1060 
1061 		ret = grow_buffers(bdev, block, size);
1062 		if (ret < 0)
1063 			return NULL;
1064 		if (ret == 0)
1065 			free_more_memory();
1066 	}
1067 }
1068 
1069 /*
1070  * The relationship between dirty buffers and dirty pages:
1071  *
1072  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1073  * the page is tagged dirty in its radix tree.
1074  *
1075  * At all times, the dirtiness of the buffers represents the dirtiness of
1076  * subsections of the page.  If the page has buffers, the page dirty bit is
1077  * merely a hint about the true dirty state.
1078  *
1079  * When a page is set dirty in its entirety, all its buffers are marked dirty
1080  * (if the page has buffers).
1081  *
1082  * When a buffer is marked dirty, its page is dirtied, but the page's other
1083  * buffers are not.
1084  *
1085  * Also.  When blockdev buffers are explicitly read with bread(), they
1086  * individually become uptodate.  But their backing page remains not
1087  * uptodate - even if all of its buffers are uptodate.  A subsequent
1088  * block_read_full_page() against that page will discover all the uptodate
1089  * buffers, will set the page uptodate and will perform no I/O.
1090  */
1091 
1092 /**
1093  * mark_buffer_dirty - mark a buffer_head as needing writeout
1094  * @bh: the buffer_head to mark dirty
1095  *
1096  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1097  * backing page dirty, then tag the page as dirty in its address_space's radix
1098  * tree and then attach the address_space's inode to its superblock's dirty
1099  * inode list.
1100  *
1101  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1102  * mapping->tree_lock and mapping->host->i_lock.
1103  */
1104 void mark_buffer_dirty(struct buffer_head *bh)
1105 {
1106 	WARN_ON_ONCE(!buffer_uptodate(bh));
1107 
1108 	/*
1109 	 * Very *carefully* optimize the it-is-already-dirty case.
1110 	 *
1111 	 * Don't let the final "is it dirty" escape to before we
1112 	 * perhaps modified the buffer.
1113 	 */
1114 	if (buffer_dirty(bh)) {
1115 		smp_mb();
1116 		if (buffer_dirty(bh))
1117 			return;
1118 	}
1119 
1120 	if (!test_set_buffer_dirty(bh)) {
1121 		struct page *page = bh->b_page;
1122 		if (!TestSetPageDirty(page)) {
1123 			struct address_space *mapping = page_mapping(page);
1124 			if (mapping)
1125 				__set_page_dirty(page, mapping, 0);
1126 		}
1127 	}
1128 }
1129 EXPORT_SYMBOL(mark_buffer_dirty);
1130 
1131 /*
1132  * Decrement a buffer_head's reference count.  If all buffers against a page
1133  * have zero reference count, are clean and unlocked, and if the page is clean
1134  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1135  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1136  * a page but it ends up not being freed, and buffers may later be reattached).
1137  */
1138 void __brelse(struct buffer_head * buf)
1139 {
1140 	if (atomic_read(&buf->b_count)) {
1141 		put_bh(buf);
1142 		return;
1143 	}
1144 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1145 }
1146 EXPORT_SYMBOL(__brelse);
1147 
1148 /*
1149  * bforget() is like brelse(), except it discards any
1150  * potentially dirty data.
1151  */
1152 void __bforget(struct buffer_head *bh)
1153 {
1154 	clear_buffer_dirty(bh);
1155 	if (bh->b_assoc_map) {
1156 		struct address_space *buffer_mapping = bh->b_page->mapping;
1157 
1158 		spin_lock(&buffer_mapping->private_lock);
1159 		list_del_init(&bh->b_assoc_buffers);
1160 		bh->b_assoc_map = NULL;
1161 		spin_unlock(&buffer_mapping->private_lock);
1162 	}
1163 	__brelse(bh);
1164 }
1165 EXPORT_SYMBOL(__bforget);
1166 
1167 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1168 {
1169 	lock_buffer(bh);
1170 	if (buffer_uptodate(bh)) {
1171 		unlock_buffer(bh);
1172 		return bh;
1173 	} else {
1174 		get_bh(bh);
1175 		bh->b_end_io = end_buffer_read_sync;
1176 		submit_bh(READ, bh);
1177 		wait_on_buffer(bh);
1178 		if (buffer_uptodate(bh))
1179 			return bh;
1180 	}
1181 	brelse(bh);
1182 	return NULL;
1183 }
1184 
1185 /*
1186  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1187  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1188  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1189  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1190  * CPU's LRUs at the same time.
1191  *
1192  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1193  * sb_find_get_block().
1194  *
1195  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1196  * a local interrupt disable for that.
1197  */
1198 
1199 #define BH_LRU_SIZE	8
1200 
1201 struct bh_lru {
1202 	struct buffer_head *bhs[BH_LRU_SIZE];
1203 };
1204 
1205 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1206 
1207 #ifdef CONFIG_SMP
1208 #define bh_lru_lock()	local_irq_disable()
1209 #define bh_lru_unlock()	local_irq_enable()
1210 #else
1211 #define bh_lru_lock()	preempt_disable()
1212 #define bh_lru_unlock()	preempt_enable()
1213 #endif
1214 
1215 static inline void check_irqs_on(void)
1216 {
1217 #ifdef irqs_disabled
1218 	BUG_ON(irqs_disabled());
1219 #endif
1220 }
1221 
1222 /*
1223  * The LRU management algorithm is dopey-but-simple.  Sorry.
1224  */
1225 static void bh_lru_install(struct buffer_head *bh)
1226 {
1227 	struct buffer_head *evictee = NULL;
1228 
1229 	check_irqs_on();
1230 	bh_lru_lock();
1231 	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1232 		struct buffer_head *bhs[BH_LRU_SIZE];
1233 		int in;
1234 		int out = 0;
1235 
1236 		get_bh(bh);
1237 		bhs[out++] = bh;
1238 		for (in = 0; in < BH_LRU_SIZE; in++) {
1239 			struct buffer_head *bh2 =
1240 				__this_cpu_read(bh_lrus.bhs[in]);
1241 
1242 			if (bh2 == bh) {
1243 				__brelse(bh2);
1244 			} else {
1245 				if (out >= BH_LRU_SIZE) {
1246 					BUG_ON(evictee != NULL);
1247 					evictee = bh2;
1248 				} else {
1249 					bhs[out++] = bh2;
1250 				}
1251 			}
1252 		}
1253 		while (out < BH_LRU_SIZE)
1254 			bhs[out++] = NULL;
1255 		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1256 	}
1257 	bh_lru_unlock();
1258 
1259 	if (evictee)
1260 		__brelse(evictee);
1261 }
1262 
1263 /*
1264  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1265  */
1266 static struct buffer_head *
1267 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1268 {
1269 	struct buffer_head *ret = NULL;
1270 	unsigned int i;
1271 
1272 	check_irqs_on();
1273 	bh_lru_lock();
1274 	for (i = 0; i < BH_LRU_SIZE; i++) {
1275 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1276 
1277 		if (bh && bh->b_bdev == bdev &&
1278 				bh->b_blocknr == block && bh->b_size == size) {
1279 			if (i) {
1280 				while (i) {
1281 					__this_cpu_write(bh_lrus.bhs[i],
1282 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1283 					i--;
1284 				}
1285 				__this_cpu_write(bh_lrus.bhs[0], bh);
1286 			}
1287 			get_bh(bh);
1288 			ret = bh;
1289 			break;
1290 		}
1291 	}
1292 	bh_lru_unlock();
1293 	return ret;
1294 }
1295 
1296 /*
1297  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1298  * it in the LRU and mark it as accessed.  If it is not present then return
1299  * NULL
1300  */
1301 struct buffer_head *
1302 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1303 {
1304 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1305 
1306 	if (bh == NULL) {
1307 		bh = __find_get_block_slow(bdev, block);
1308 		if (bh)
1309 			bh_lru_install(bh);
1310 	}
1311 	if (bh)
1312 		touch_buffer(bh);
1313 	return bh;
1314 }
1315 EXPORT_SYMBOL(__find_get_block);
1316 
1317 /*
1318  * __getblk will locate (and, if necessary, create) the buffer_head
1319  * which corresponds to the passed block_device, block and size. The
1320  * returned buffer has its reference count incremented.
1321  *
1322  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1323  * attempt is failing.  FIXME, perhaps?
1324  */
1325 struct buffer_head *
1326 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1327 {
1328 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1329 
1330 	might_sleep();
1331 	if (bh == NULL)
1332 		bh = __getblk_slow(bdev, block, size);
1333 	return bh;
1334 }
1335 EXPORT_SYMBOL(__getblk);
1336 
1337 /*
1338  * Do async read-ahead on a buffer..
1339  */
1340 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1341 {
1342 	struct buffer_head *bh = __getblk(bdev, block, size);
1343 	if (likely(bh)) {
1344 		ll_rw_block(READA, 1, &bh);
1345 		brelse(bh);
1346 	}
1347 }
1348 EXPORT_SYMBOL(__breadahead);
1349 
1350 /**
1351  *  __bread() - reads a specified block and returns the bh
1352  *  @bdev: the block_device to read from
1353  *  @block: number of block
1354  *  @size: size (in bytes) to read
1355  *
1356  *  Reads a specified block, and returns buffer head that contains it.
1357  *  It returns NULL if the block was unreadable.
1358  */
1359 struct buffer_head *
1360 __bread(struct block_device *bdev, sector_t block, unsigned size)
1361 {
1362 	struct buffer_head *bh = __getblk(bdev, block, size);
1363 
1364 	if (likely(bh) && !buffer_uptodate(bh))
1365 		bh = __bread_slow(bh);
1366 	return bh;
1367 }
1368 EXPORT_SYMBOL(__bread);
1369 
1370 /*
1371  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1372  * This doesn't race because it runs in each cpu either in irq
1373  * or with preempt disabled.
1374  */
1375 static void invalidate_bh_lru(void *arg)
1376 {
1377 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1378 	int i;
1379 
1380 	for (i = 0; i < BH_LRU_SIZE; i++) {
1381 		brelse(b->bhs[i]);
1382 		b->bhs[i] = NULL;
1383 	}
1384 	put_cpu_var(bh_lrus);
1385 }
1386 
1387 static bool has_bh_in_lru(int cpu, void *dummy)
1388 {
1389 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1390 	int i;
1391 
1392 	for (i = 0; i < BH_LRU_SIZE; i++) {
1393 		if (b->bhs[i])
1394 			return 1;
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 void invalidate_bh_lrus(void)
1401 {
1402 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1403 }
1404 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1405 
1406 void set_bh_page(struct buffer_head *bh,
1407 		struct page *page, unsigned long offset)
1408 {
1409 	bh->b_page = page;
1410 	BUG_ON(offset >= PAGE_SIZE);
1411 	if (PageHighMem(page))
1412 		/*
1413 		 * This catches illegal uses and preserves the offset:
1414 		 */
1415 		bh->b_data = (char *)(0 + offset);
1416 	else
1417 		bh->b_data = page_address(page) + offset;
1418 }
1419 EXPORT_SYMBOL(set_bh_page);
1420 
1421 /*
1422  * Called when truncating a buffer on a page completely.
1423  */
1424 static void discard_buffer(struct buffer_head * bh)
1425 {
1426 	lock_buffer(bh);
1427 	clear_buffer_dirty(bh);
1428 	bh->b_bdev = NULL;
1429 	clear_buffer_mapped(bh);
1430 	clear_buffer_req(bh);
1431 	clear_buffer_new(bh);
1432 	clear_buffer_delay(bh);
1433 	clear_buffer_unwritten(bh);
1434 	unlock_buffer(bh);
1435 }
1436 
1437 /**
1438  * block_invalidatepage - invalidate part or all of a buffer-backed page
1439  *
1440  * @page: the page which is affected
1441  * @offset: the index of the truncation point
1442  *
1443  * block_invalidatepage() is called when all or part of the page has become
1444  * invalidated by a truncate operation.
1445  *
1446  * block_invalidatepage() does not have to release all buffers, but it must
1447  * ensure that no dirty buffer is left outside @offset and that no I/O
1448  * is underway against any of the blocks which are outside the truncation
1449  * point.  Because the caller is about to free (and possibly reuse) those
1450  * blocks on-disk.
1451  */
1452 void block_invalidatepage(struct page *page, unsigned long offset)
1453 {
1454 	struct buffer_head *head, *bh, *next;
1455 	unsigned int curr_off = 0;
1456 
1457 	BUG_ON(!PageLocked(page));
1458 	if (!page_has_buffers(page))
1459 		goto out;
1460 
1461 	head = page_buffers(page);
1462 	bh = head;
1463 	do {
1464 		unsigned int next_off = curr_off + bh->b_size;
1465 		next = bh->b_this_page;
1466 
1467 		/*
1468 		 * is this block fully invalidated?
1469 		 */
1470 		if (offset <= curr_off)
1471 			discard_buffer(bh);
1472 		curr_off = next_off;
1473 		bh = next;
1474 	} while (bh != head);
1475 
1476 	/*
1477 	 * We release buffers only if the entire page is being invalidated.
1478 	 * The get_block cached value has been unconditionally invalidated,
1479 	 * so real IO is not possible anymore.
1480 	 */
1481 	if (offset == 0)
1482 		try_to_release_page(page, 0);
1483 out:
1484 	return;
1485 }
1486 EXPORT_SYMBOL(block_invalidatepage);
1487 
1488 /*
1489  * We attach and possibly dirty the buffers atomically wrt
1490  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1491  * is already excluded via the page lock.
1492  */
1493 void create_empty_buffers(struct page *page,
1494 			unsigned long blocksize, unsigned long b_state)
1495 {
1496 	struct buffer_head *bh, *head, *tail;
1497 
1498 	head = alloc_page_buffers(page, blocksize, 1);
1499 	bh = head;
1500 	do {
1501 		bh->b_state |= b_state;
1502 		tail = bh;
1503 		bh = bh->b_this_page;
1504 	} while (bh);
1505 	tail->b_this_page = head;
1506 
1507 	spin_lock(&page->mapping->private_lock);
1508 	if (PageUptodate(page) || PageDirty(page)) {
1509 		bh = head;
1510 		do {
1511 			if (PageDirty(page))
1512 				set_buffer_dirty(bh);
1513 			if (PageUptodate(page))
1514 				set_buffer_uptodate(bh);
1515 			bh = bh->b_this_page;
1516 		} while (bh != head);
1517 	}
1518 	attach_page_buffers(page, head);
1519 	spin_unlock(&page->mapping->private_lock);
1520 }
1521 EXPORT_SYMBOL(create_empty_buffers);
1522 
1523 /*
1524  * We are taking a block for data and we don't want any output from any
1525  * buffer-cache aliases starting from return from that function and
1526  * until the moment when something will explicitly mark the buffer
1527  * dirty (hopefully that will not happen until we will free that block ;-)
1528  * We don't even need to mark it not-uptodate - nobody can expect
1529  * anything from a newly allocated buffer anyway. We used to used
1530  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1531  * don't want to mark the alias unmapped, for example - it would confuse
1532  * anyone who might pick it with bread() afterwards...
1533  *
1534  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1535  * be writeout I/O going on against recently-freed buffers.  We don't
1536  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1537  * only if we really need to.  That happens here.
1538  */
1539 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1540 {
1541 	struct buffer_head *old_bh;
1542 
1543 	might_sleep();
1544 
1545 	old_bh = __find_get_block_slow(bdev, block);
1546 	if (old_bh) {
1547 		clear_buffer_dirty(old_bh);
1548 		wait_on_buffer(old_bh);
1549 		clear_buffer_req(old_bh);
1550 		__brelse(old_bh);
1551 	}
1552 }
1553 EXPORT_SYMBOL(unmap_underlying_metadata);
1554 
1555 /*
1556  * NOTE! All mapped/uptodate combinations are valid:
1557  *
1558  *	Mapped	Uptodate	Meaning
1559  *
1560  *	No	No		"unknown" - must do get_block()
1561  *	No	Yes		"hole" - zero-filled
1562  *	Yes	No		"allocated" - allocated on disk, not read in
1563  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1564  *
1565  * "Dirty" is valid only with the last case (mapped+uptodate).
1566  */
1567 
1568 /*
1569  * While block_write_full_page is writing back the dirty buffers under
1570  * the page lock, whoever dirtied the buffers may decide to clean them
1571  * again at any time.  We handle that by only looking at the buffer
1572  * state inside lock_buffer().
1573  *
1574  * If block_write_full_page() is called for regular writeback
1575  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1576  * locked buffer.   This only can happen if someone has written the buffer
1577  * directly, with submit_bh().  At the address_space level PageWriteback
1578  * prevents this contention from occurring.
1579  *
1580  * If block_write_full_page() is called with wbc->sync_mode ==
1581  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1582  * causes the writes to be flagged as synchronous writes.
1583  */
1584 static int __block_write_full_page(struct inode *inode, struct page *page,
1585 			get_block_t *get_block, struct writeback_control *wbc,
1586 			bh_end_io_t *handler)
1587 {
1588 	int err;
1589 	sector_t block;
1590 	sector_t last_block;
1591 	struct buffer_head *bh, *head;
1592 	const unsigned blocksize = 1 << inode->i_blkbits;
1593 	int nr_underway = 0;
1594 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1595 			WRITE_SYNC : WRITE);
1596 
1597 	BUG_ON(!PageLocked(page));
1598 
1599 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1600 
1601 	if (!page_has_buffers(page)) {
1602 		create_empty_buffers(page, blocksize,
1603 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1604 	}
1605 
1606 	/*
1607 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1608 	 * here, and the (potentially unmapped) buffers may become dirty at
1609 	 * any time.  If a buffer becomes dirty here after we've inspected it
1610 	 * then we just miss that fact, and the page stays dirty.
1611 	 *
1612 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1613 	 * handle that here by just cleaning them.
1614 	 */
1615 
1616 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1617 	head = page_buffers(page);
1618 	bh = head;
1619 
1620 	/*
1621 	 * Get all the dirty buffers mapped to disk addresses and
1622 	 * handle any aliases from the underlying blockdev's mapping.
1623 	 */
1624 	do {
1625 		if (block > last_block) {
1626 			/*
1627 			 * mapped buffers outside i_size will occur, because
1628 			 * this page can be outside i_size when there is a
1629 			 * truncate in progress.
1630 			 */
1631 			/*
1632 			 * The buffer was zeroed by block_write_full_page()
1633 			 */
1634 			clear_buffer_dirty(bh);
1635 			set_buffer_uptodate(bh);
1636 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1637 			   buffer_dirty(bh)) {
1638 			WARN_ON(bh->b_size != blocksize);
1639 			err = get_block(inode, block, bh, 1);
1640 			if (err)
1641 				goto recover;
1642 			clear_buffer_delay(bh);
1643 			if (buffer_new(bh)) {
1644 				/* blockdev mappings never come here */
1645 				clear_buffer_new(bh);
1646 				unmap_underlying_metadata(bh->b_bdev,
1647 							bh->b_blocknr);
1648 			}
1649 		}
1650 		bh = bh->b_this_page;
1651 		block++;
1652 	} while (bh != head);
1653 
1654 	do {
1655 		if (!buffer_mapped(bh))
1656 			continue;
1657 		/*
1658 		 * If it's a fully non-blocking write attempt and we cannot
1659 		 * lock the buffer then redirty the page.  Note that this can
1660 		 * potentially cause a busy-wait loop from writeback threads
1661 		 * and kswapd activity, but those code paths have their own
1662 		 * higher-level throttling.
1663 		 */
1664 		if (wbc->sync_mode != WB_SYNC_NONE) {
1665 			lock_buffer(bh);
1666 		} else if (!trylock_buffer(bh)) {
1667 			redirty_page_for_writepage(wbc, page);
1668 			continue;
1669 		}
1670 		if (test_clear_buffer_dirty(bh)) {
1671 			mark_buffer_async_write_endio(bh, handler);
1672 		} else {
1673 			unlock_buffer(bh);
1674 		}
1675 	} while ((bh = bh->b_this_page) != head);
1676 
1677 	/*
1678 	 * The page and its buffers are protected by PageWriteback(), so we can
1679 	 * drop the bh refcounts early.
1680 	 */
1681 	BUG_ON(PageWriteback(page));
1682 	set_page_writeback(page);
1683 
1684 	do {
1685 		struct buffer_head *next = bh->b_this_page;
1686 		if (buffer_async_write(bh)) {
1687 			submit_bh(write_op, bh);
1688 			nr_underway++;
1689 		}
1690 		bh = next;
1691 	} while (bh != head);
1692 	unlock_page(page);
1693 
1694 	err = 0;
1695 done:
1696 	if (nr_underway == 0) {
1697 		/*
1698 		 * The page was marked dirty, but the buffers were
1699 		 * clean.  Someone wrote them back by hand with
1700 		 * ll_rw_block/submit_bh.  A rare case.
1701 		 */
1702 		end_page_writeback(page);
1703 
1704 		/*
1705 		 * The page and buffer_heads can be released at any time from
1706 		 * here on.
1707 		 */
1708 	}
1709 	return err;
1710 
1711 recover:
1712 	/*
1713 	 * ENOSPC, or some other error.  We may already have added some
1714 	 * blocks to the file, so we need to write these out to avoid
1715 	 * exposing stale data.
1716 	 * The page is currently locked and not marked for writeback
1717 	 */
1718 	bh = head;
1719 	/* Recovery: lock and submit the mapped buffers */
1720 	do {
1721 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1722 		    !buffer_delay(bh)) {
1723 			lock_buffer(bh);
1724 			mark_buffer_async_write_endio(bh, handler);
1725 		} else {
1726 			/*
1727 			 * The buffer may have been set dirty during
1728 			 * attachment to a dirty page.
1729 			 */
1730 			clear_buffer_dirty(bh);
1731 		}
1732 	} while ((bh = bh->b_this_page) != head);
1733 	SetPageError(page);
1734 	BUG_ON(PageWriteback(page));
1735 	mapping_set_error(page->mapping, err);
1736 	set_page_writeback(page);
1737 	do {
1738 		struct buffer_head *next = bh->b_this_page;
1739 		if (buffer_async_write(bh)) {
1740 			clear_buffer_dirty(bh);
1741 			submit_bh(write_op, bh);
1742 			nr_underway++;
1743 		}
1744 		bh = next;
1745 	} while (bh != head);
1746 	unlock_page(page);
1747 	goto done;
1748 }
1749 
1750 /*
1751  * If a page has any new buffers, zero them out here, and mark them uptodate
1752  * and dirty so they'll be written out (in order to prevent uninitialised
1753  * block data from leaking). And clear the new bit.
1754  */
1755 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1756 {
1757 	unsigned int block_start, block_end;
1758 	struct buffer_head *head, *bh;
1759 
1760 	BUG_ON(!PageLocked(page));
1761 	if (!page_has_buffers(page))
1762 		return;
1763 
1764 	bh = head = page_buffers(page);
1765 	block_start = 0;
1766 	do {
1767 		block_end = block_start + bh->b_size;
1768 
1769 		if (buffer_new(bh)) {
1770 			if (block_end > from && block_start < to) {
1771 				if (!PageUptodate(page)) {
1772 					unsigned start, size;
1773 
1774 					start = max(from, block_start);
1775 					size = min(to, block_end) - start;
1776 
1777 					zero_user(page, start, size);
1778 					set_buffer_uptodate(bh);
1779 				}
1780 
1781 				clear_buffer_new(bh);
1782 				mark_buffer_dirty(bh);
1783 			}
1784 		}
1785 
1786 		block_start = block_end;
1787 		bh = bh->b_this_page;
1788 	} while (bh != head);
1789 }
1790 EXPORT_SYMBOL(page_zero_new_buffers);
1791 
1792 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1793 		get_block_t *get_block)
1794 {
1795 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1796 	unsigned to = from + len;
1797 	struct inode *inode = page->mapping->host;
1798 	unsigned block_start, block_end;
1799 	sector_t block;
1800 	int err = 0;
1801 	unsigned blocksize, bbits;
1802 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1803 
1804 	BUG_ON(!PageLocked(page));
1805 	BUG_ON(from > PAGE_CACHE_SIZE);
1806 	BUG_ON(to > PAGE_CACHE_SIZE);
1807 	BUG_ON(from > to);
1808 
1809 	blocksize = 1 << inode->i_blkbits;
1810 	if (!page_has_buffers(page))
1811 		create_empty_buffers(page, blocksize, 0);
1812 	head = page_buffers(page);
1813 
1814 	bbits = inode->i_blkbits;
1815 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1816 
1817 	for(bh = head, block_start = 0; bh != head || !block_start;
1818 	    block++, block_start=block_end, bh = bh->b_this_page) {
1819 		block_end = block_start + blocksize;
1820 		if (block_end <= from || block_start >= to) {
1821 			if (PageUptodate(page)) {
1822 				if (!buffer_uptodate(bh))
1823 					set_buffer_uptodate(bh);
1824 			}
1825 			continue;
1826 		}
1827 		if (buffer_new(bh))
1828 			clear_buffer_new(bh);
1829 		if (!buffer_mapped(bh)) {
1830 			WARN_ON(bh->b_size != blocksize);
1831 			err = get_block(inode, block, bh, 1);
1832 			if (err)
1833 				break;
1834 			if (buffer_new(bh)) {
1835 				unmap_underlying_metadata(bh->b_bdev,
1836 							bh->b_blocknr);
1837 				if (PageUptodate(page)) {
1838 					clear_buffer_new(bh);
1839 					set_buffer_uptodate(bh);
1840 					mark_buffer_dirty(bh);
1841 					continue;
1842 				}
1843 				if (block_end > to || block_start < from)
1844 					zero_user_segments(page,
1845 						to, block_end,
1846 						block_start, from);
1847 				continue;
1848 			}
1849 		}
1850 		if (PageUptodate(page)) {
1851 			if (!buffer_uptodate(bh))
1852 				set_buffer_uptodate(bh);
1853 			continue;
1854 		}
1855 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1856 		    !buffer_unwritten(bh) &&
1857 		     (block_start < from || block_end > to)) {
1858 			ll_rw_block(READ, 1, &bh);
1859 			*wait_bh++=bh;
1860 		}
1861 	}
1862 	/*
1863 	 * If we issued read requests - let them complete.
1864 	 */
1865 	while(wait_bh > wait) {
1866 		wait_on_buffer(*--wait_bh);
1867 		if (!buffer_uptodate(*wait_bh))
1868 			err = -EIO;
1869 	}
1870 	if (unlikely(err))
1871 		page_zero_new_buffers(page, from, to);
1872 	return err;
1873 }
1874 EXPORT_SYMBOL(__block_write_begin);
1875 
1876 static int __block_commit_write(struct inode *inode, struct page *page,
1877 		unsigned from, unsigned to)
1878 {
1879 	unsigned block_start, block_end;
1880 	int partial = 0;
1881 	unsigned blocksize;
1882 	struct buffer_head *bh, *head;
1883 
1884 	blocksize = 1 << inode->i_blkbits;
1885 
1886 	for(bh = head = page_buffers(page), block_start = 0;
1887 	    bh != head || !block_start;
1888 	    block_start=block_end, bh = bh->b_this_page) {
1889 		block_end = block_start + blocksize;
1890 		if (block_end <= from || block_start >= to) {
1891 			if (!buffer_uptodate(bh))
1892 				partial = 1;
1893 		} else {
1894 			set_buffer_uptodate(bh);
1895 			mark_buffer_dirty(bh);
1896 		}
1897 		clear_buffer_new(bh);
1898 	}
1899 
1900 	/*
1901 	 * If this is a partial write which happened to make all buffers
1902 	 * uptodate then we can optimize away a bogus readpage() for
1903 	 * the next read(). Here we 'discover' whether the page went
1904 	 * uptodate as a result of this (potentially partial) write.
1905 	 */
1906 	if (!partial)
1907 		SetPageUptodate(page);
1908 	return 0;
1909 }
1910 
1911 /*
1912  * block_write_begin takes care of the basic task of block allocation and
1913  * bringing partial write blocks uptodate first.
1914  *
1915  * The filesystem needs to handle block truncation upon failure.
1916  */
1917 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1918 		unsigned flags, struct page **pagep, get_block_t *get_block)
1919 {
1920 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1921 	struct page *page;
1922 	int status;
1923 
1924 	page = grab_cache_page_write_begin(mapping, index, flags);
1925 	if (!page)
1926 		return -ENOMEM;
1927 
1928 	status = __block_write_begin(page, pos, len, get_block);
1929 	if (unlikely(status)) {
1930 		unlock_page(page);
1931 		page_cache_release(page);
1932 		page = NULL;
1933 	}
1934 
1935 	*pagep = page;
1936 	return status;
1937 }
1938 EXPORT_SYMBOL(block_write_begin);
1939 
1940 int block_write_end(struct file *file, struct address_space *mapping,
1941 			loff_t pos, unsigned len, unsigned copied,
1942 			struct page *page, void *fsdata)
1943 {
1944 	struct inode *inode = mapping->host;
1945 	unsigned start;
1946 
1947 	start = pos & (PAGE_CACHE_SIZE - 1);
1948 
1949 	if (unlikely(copied < len)) {
1950 		/*
1951 		 * The buffers that were written will now be uptodate, so we
1952 		 * don't have to worry about a readpage reading them and
1953 		 * overwriting a partial write. However if we have encountered
1954 		 * a short write and only partially written into a buffer, it
1955 		 * will not be marked uptodate, so a readpage might come in and
1956 		 * destroy our partial write.
1957 		 *
1958 		 * Do the simplest thing, and just treat any short write to a
1959 		 * non uptodate page as a zero-length write, and force the
1960 		 * caller to redo the whole thing.
1961 		 */
1962 		if (!PageUptodate(page))
1963 			copied = 0;
1964 
1965 		page_zero_new_buffers(page, start+copied, start+len);
1966 	}
1967 	flush_dcache_page(page);
1968 
1969 	/* This could be a short (even 0-length) commit */
1970 	__block_commit_write(inode, page, start, start+copied);
1971 
1972 	return copied;
1973 }
1974 EXPORT_SYMBOL(block_write_end);
1975 
1976 int generic_write_end(struct file *file, struct address_space *mapping,
1977 			loff_t pos, unsigned len, unsigned copied,
1978 			struct page *page, void *fsdata)
1979 {
1980 	struct inode *inode = mapping->host;
1981 	int i_size_changed = 0;
1982 
1983 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1984 
1985 	/*
1986 	 * No need to use i_size_read() here, the i_size
1987 	 * cannot change under us because we hold i_mutex.
1988 	 *
1989 	 * But it's important to update i_size while still holding page lock:
1990 	 * page writeout could otherwise come in and zero beyond i_size.
1991 	 */
1992 	if (pos+copied > inode->i_size) {
1993 		i_size_write(inode, pos+copied);
1994 		i_size_changed = 1;
1995 	}
1996 
1997 	unlock_page(page);
1998 	page_cache_release(page);
1999 
2000 	/*
2001 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2002 	 * makes the holding time of page lock longer. Second, it forces lock
2003 	 * ordering of page lock and transaction start for journaling
2004 	 * filesystems.
2005 	 */
2006 	if (i_size_changed)
2007 		mark_inode_dirty(inode);
2008 
2009 	return copied;
2010 }
2011 EXPORT_SYMBOL(generic_write_end);
2012 
2013 /*
2014  * block_is_partially_uptodate checks whether buffers within a page are
2015  * uptodate or not.
2016  *
2017  * Returns true if all buffers which correspond to a file portion
2018  * we want to read are uptodate.
2019  */
2020 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2021 					unsigned long from)
2022 {
2023 	struct inode *inode = page->mapping->host;
2024 	unsigned block_start, block_end, blocksize;
2025 	unsigned to;
2026 	struct buffer_head *bh, *head;
2027 	int ret = 1;
2028 
2029 	if (!page_has_buffers(page))
2030 		return 0;
2031 
2032 	blocksize = 1 << inode->i_blkbits;
2033 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2034 	to = from + to;
2035 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2036 		return 0;
2037 
2038 	head = page_buffers(page);
2039 	bh = head;
2040 	block_start = 0;
2041 	do {
2042 		block_end = block_start + blocksize;
2043 		if (block_end > from && block_start < to) {
2044 			if (!buffer_uptodate(bh)) {
2045 				ret = 0;
2046 				break;
2047 			}
2048 			if (block_end >= to)
2049 				break;
2050 		}
2051 		block_start = block_end;
2052 		bh = bh->b_this_page;
2053 	} while (bh != head);
2054 
2055 	return ret;
2056 }
2057 EXPORT_SYMBOL(block_is_partially_uptodate);
2058 
2059 /*
2060  * Generic "read page" function for block devices that have the normal
2061  * get_block functionality. This is most of the block device filesystems.
2062  * Reads the page asynchronously --- the unlock_buffer() and
2063  * set/clear_buffer_uptodate() functions propagate buffer state into the
2064  * page struct once IO has completed.
2065  */
2066 int block_read_full_page(struct page *page, get_block_t *get_block)
2067 {
2068 	struct inode *inode = page->mapping->host;
2069 	sector_t iblock, lblock;
2070 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2071 	unsigned int blocksize;
2072 	int nr, i;
2073 	int fully_mapped = 1;
2074 
2075 	BUG_ON(!PageLocked(page));
2076 	blocksize = 1 << inode->i_blkbits;
2077 	if (!page_has_buffers(page))
2078 		create_empty_buffers(page, blocksize, 0);
2079 	head = page_buffers(page);
2080 
2081 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2082 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2083 	bh = head;
2084 	nr = 0;
2085 	i = 0;
2086 
2087 	do {
2088 		if (buffer_uptodate(bh))
2089 			continue;
2090 
2091 		if (!buffer_mapped(bh)) {
2092 			int err = 0;
2093 
2094 			fully_mapped = 0;
2095 			if (iblock < lblock) {
2096 				WARN_ON(bh->b_size != blocksize);
2097 				err = get_block(inode, iblock, bh, 0);
2098 				if (err)
2099 					SetPageError(page);
2100 			}
2101 			if (!buffer_mapped(bh)) {
2102 				zero_user(page, i * blocksize, blocksize);
2103 				if (!err)
2104 					set_buffer_uptodate(bh);
2105 				continue;
2106 			}
2107 			/*
2108 			 * get_block() might have updated the buffer
2109 			 * synchronously
2110 			 */
2111 			if (buffer_uptodate(bh))
2112 				continue;
2113 		}
2114 		arr[nr++] = bh;
2115 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2116 
2117 	if (fully_mapped)
2118 		SetPageMappedToDisk(page);
2119 
2120 	if (!nr) {
2121 		/*
2122 		 * All buffers are uptodate - we can set the page uptodate
2123 		 * as well. But not if get_block() returned an error.
2124 		 */
2125 		if (!PageError(page))
2126 			SetPageUptodate(page);
2127 		unlock_page(page);
2128 		return 0;
2129 	}
2130 
2131 	/* Stage two: lock the buffers */
2132 	for (i = 0; i < nr; i++) {
2133 		bh = arr[i];
2134 		lock_buffer(bh);
2135 		mark_buffer_async_read(bh);
2136 	}
2137 
2138 	/*
2139 	 * Stage 3: start the IO.  Check for uptodateness
2140 	 * inside the buffer lock in case another process reading
2141 	 * the underlying blockdev brought it uptodate (the sct fix).
2142 	 */
2143 	for (i = 0; i < nr; i++) {
2144 		bh = arr[i];
2145 		if (buffer_uptodate(bh))
2146 			end_buffer_async_read(bh, 1);
2147 		else
2148 			submit_bh(READ, bh);
2149 	}
2150 	return 0;
2151 }
2152 EXPORT_SYMBOL(block_read_full_page);
2153 
2154 /* utility function for filesystems that need to do work on expanding
2155  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2156  * deal with the hole.
2157  */
2158 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2159 {
2160 	struct address_space *mapping = inode->i_mapping;
2161 	struct page *page;
2162 	void *fsdata;
2163 	int err;
2164 
2165 	err = inode_newsize_ok(inode, size);
2166 	if (err)
2167 		goto out;
2168 
2169 	err = pagecache_write_begin(NULL, mapping, size, 0,
2170 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2171 				&page, &fsdata);
2172 	if (err)
2173 		goto out;
2174 
2175 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2176 	BUG_ON(err > 0);
2177 
2178 out:
2179 	return err;
2180 }
2181 EXPORT_SYMBOL(generic_cont_expand_simple);
2182 
2183 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2184 			    loff_t pos, loff_t *bytes)
2185 {
2186 	struct inode *inode = mapping->host;
2187 	unsigned blocksize = 1 << inode->i_blkbits;
2188 	struct page *page;
2189 	void *fsdata;
2190 	pgoff_t index, curidx;
2191 	loff_t curpos;
2192 	unsigned zerofrom, offset, len;
2193 	int err = 0;
2194 
2195 	index = pos >> PAGE_CACHE_SHIFT;
2196 	offset = pos & ~PAGE_CACHE_MASK;
2197 
2198 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2199 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2200 		if (zerofrom & (blocksize-1)) {
2201 			*bytes |= (blocksize-1);
2202 			(*bytes)++;
2203 		}
2204 		len = PAGE_CACHE_SIZE - zerofrom;
2205 
2206 		err = pagecache_write_begin(file, mapping, curpos, len,
2207 						AOP_FLAG_UNINTERRUPTIBLE,
2208 						&page, &fsdata);
2209 		if (err)
2210 			goto out;
2211 		zero_user(page, zerofrom, len);
2212 		err = pagecache_write_end(file, mapping, curpos, len, len,
2213 						page, fsdata);
2214 		if (err < 0)
2215 			goto out;
2216 		BUG_ON(err != len);
2217 		err = 0;
2218 
2219 		balance_dirty_pages_ratelimited(mapping);
2220 	}
2221 
2222 	/* page covers the boundary, find the boundary offset */
2223 	if (index == curidx) {
2224 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2225 		/* if we will expand the thing last block will be filled */
2226 		if (offset <= zerofrom) {
2227 			goto out;
2228 		}
2229 		if (zerofrom & (blocksize-1)) {
2230 			*bytes |= (blocksize-1);
2231 			(*bytes)++;
2232 		}
2233 		len = offset - zerofrom;
2234 
2235 		err = pagecache_write_begin(file, mapping, curpos, len,
2236 						AOP_FLAG_UNINTERRUPTIBLE,
2237 						&page, &fsdata);
2238 		if (err)
2239 			goto out;
2240 		zero_user(page, zerofrom, len);
2241 		err = pagecache_write_end(file, mapping, curpos, len, len,
2242 						page, fsdata);
2243 		if (err < 0)
2244 			goto out;
2245 		BUG_ON(err != len);
2246 		err = 0;
2247 	}
2248 out:
2249 	return err;
2250 }
2251 
2252 /*
2253  * For moronic filesystems that do not allow holes in file.
2254  * We may have to extend the file.
2255  */
2256 int cont_write_begin(struct file *file, struct address_space *mapping,
2257 			loff_t pos, unsigned len, unsigned flags,
2258 			struct page **pagep, void **fsdata,
2259 			get_block_t *get_block, loff_t *bytes)
2260 {
2261 	struct inode *inode = mapping->host;
2262 	unsigned blocksize = 1 << inode->i_blkbits;
2263 	unsigned zerofrom;
2264 	int err;
2265 
2266 	err = cont_expand_zero(file, mapping, pos, bytes);
2267 	if (err)
2268 		return err;
2269 
2270 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2271 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2272 		*bytes |= (blocksize-1);
2273 		(*bytes)++;
2274 	}
2275 
2276 	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2277 }
2278 EXPORT_SYMBOL(cont_write_begin);
2279 
2280 int block_commit_write(struct page *page, unsigned from, unsigned to)
2281 {
2282 	struct inode *inode = page->mapping->host;
2283 	__block_commit_write(inode,page,from,to);
2284 	return 0;
2285 }
2286 EXPORT_SYMBOL(block_commit_write);
2287 
2288 /*
2289  * block_page_mkwrite() is not allowed to change the file size as it gets
2290  * called from a page fault handler when a page is first dirtied. Hence we must
2291  * be careful to check for EOF conditions here. We set the page up correctly
2292  * for a written page which means we get ENOSPC checking when writing into
2293  * holes and correct delalloc and unwritten extent mapping on filesystems that
2294  * support these features.
2295  *
2296  * We are not allowed to take the i_mutex here so we have to play games to
2297  * protect against truncate races as the page could now be beyond EOF.  Because
2298  * truncate writes the inode size before removing pages, once we have the
2299  * page lock we can determine safely if the page is beyond EOF. If it is not
2300  * beyond EOF, then the page is guaranteed safe against truncation until we
2301  * unlock the page.
2302  *
2303  * Direct callers of this function should protect against filesystem freezing
2304  * using sb_start_write() - sb_end_write() functions.
2305  */
2306 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2307 			 get_block_t get_block)
2308 {
2309 	struct page *page = vmf->page;
2310 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2311 	unsigned long end;
2312 	loff_t size;
2313 	int ret;
2314 
2315 	/*
2316 	 * Update file times before taking page lock. We may end up failing the
2317 	 * fault so this update may be superfluous but who really cares...
2318 	 */
2319 	file_update_time(vma->vm_file);
2320 
2321 	lock_page(page);
2322 	size = i_size_read(inode);
2323 	if ((page->mapping != inode->i_mapping) ||
2324 	    (page_offset(page) > size)) {
2325 		/* We overload EFAULT to mean page got truncated */
2326 		ret = -EFAULT;
2327 		goto out_unlock;
2328 	}
2329 
2330 	/* page is wholly or partially inside EOF */
2331 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2332 		end = size & ~PAGE_CACHE_MASK;
2333 	else
2334 		end = PAGE_CACHE_SIZE;
2335 
2336 	ret = __block_write_begin(page, 0, end, get_block);
2337 	if (!ret)
2338 		ret = block_commit_write(page, 0, end);
2339 
2340 	if (unlikely(ret < 0))
2341 		goto out_unlock;
2342 	set_page_dirty(page);
2343 	wait_on_page_writeback(page);
2344 	return 0;
2345 out_unlock:
2346 	unlock_page(page);
2347 	return ret;
2348 }
2349 EXPORT_SYMBOL(__block_page_mkwrite);
2350 
2351 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2352 		   get_block_t get_block)
2353 {
2354 	int ret;
2355 	struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2356 
2357 	sb_start_pagefault(sb);
2358 	ret = __block_page_mkwrite(vma, vmf, get_block);
2359 	sb_end_pagefault(sb);
2360 	return block_page_mkwrite_return(ret);
2361 }
2362 EXPORT_SYMBOL(block_page_mkwrite);
2363 
2364 /*
2365  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2366  * immediately, while under the page lock.  So it needs a special end_io
2367  * handler which does not touch the bh after unlocking it.
2368  */
2369 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2370 {
2371 	__end_buffer_read_notouch(bh, uptodate);
2372 }
2373 
2374 /*
2375  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2376  * the page (converting it to circular linked list and taking care of page
2377  * dirty races).
2378  */
2379 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2380 {
2381 	struct buffer_head *bh;
2382 
2383 	BUG_ON(!PageLocked(page));
2384 
2385 	spin_lock(&page->mapping->private_lock);
2386 	bh = head;
2387 	do {
2388 		if (PageDirty(page))
2389 			set_buffer_dirty(bh);
2390 		if (!bh->b_this_page)
2391 			bh->b_this_page = head;
2392 		bh = bh->b_this_page;
2393 	} while (bh != head);
2394 	attach_page_buffers(page, head);
2395 	spin_unlock(&page->mapping->private_lock);
2396 }
2397 
2398 /*
2399  * On entry, the page is fully not uptodate.
2400  * On exit the page is fully uptodate in the areas outside (from,to)
2401  * The filesystem needs to handle block truncation upon failure.
2402  */
2403 int nobh_write_begin(struct address_space *mapping,
2404 			loff_t pos, unsigned len, unsigned flags,
2405 			struct page **pagep, void **fsdata,
2406 			get_block_t *get_block)
2407 {
2408 	struct inode *inode = mapping->host;
2409 	const unsigned blkbits = inode->i_blkbits;
2410 	const unsigned blocksize = 1 << blkbits;
2411 	struct buffer_head *head, *bh;
2412 	struct page *page;
2413 	pgoff_t index;
2414 	unsigned from, to;
2415 	unsigned block_in_page;
2416 	unsigned block_start, block_end;
2417 	sector_t block_in_file;
2418 	int nr_reads = 0;
2419 	int ret = 0;
2420 	int is_mapped_to_disk = 1;
2421 
2422 	index = pos >> PAGE_CACHE_SHIFT;
2423 	from = pos & (PAGE_CACHE_SIZE - 1);
2424 	to = from + len;
2425 
2426 	page = grab_cache_page_write_begin(mapping, index, flags);
2427 	if (!page)
2428 		return -ENOMEM;
2429 	*pagep = page;
2430 	*fsdata = NULL;
2431 
2432 	if (page_has_buffers(page)) {
2433 		ret = __block_write_begin(page, pos, len, get_block);
2434 		if (unlikely(ret))
2435 			goto out_release;
2436 		return ret;
2437 	}
2438 
2439 	if (PageMappedToDisk(page))
2440 		return 0;
2441 
2442 	/*
2443 	 * Allocate buffers so that we can keep track of state, and potentially
2444 	 * attach them to the page if an error occurs. In the common case of
2445 	 * no error, they will just be freed again without ever being attached
2446 	 * to the page (which is all OK, because we're under the page lock).
2447 	 *
2448 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2449 	 * than the circular one we're used to.
2450 	 */
2451 	head = alloc_page_buffers(page, blocksize, 0);
2452 	if (!head) {
2453 		ret = -ENOMEM;
2454 		goto out_release;
2455 	}
2456 
2457 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2458 
2459 	/*
2460 	 * We loop across all blocks in the page, whether or not they are
2461 	 * part of the affected region.  This is so we can discover if the
2462 	 * page is fully mapped-to-disk.
2463 	 */
2464 	for (block_start = 0, block_in_page = 0, bh = head;
2465 		  block_start < PAGE_CACHE_SIZE;
2466 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2467 		int create;
2468 
2469 		block_end = block_start + blocksize;
2470 		bh->b_state = 0;
2471 		create = 1;
2472 		if (block_start >= to)
2473 			create = 0;
2474 		ret = get_block(inode, block_in_file + block_in_page,
2475 					bh, create);
2476 		if (ret)
2477 			goto failed;
2478 		if (!buffer_mapped(bh))
2479 			is_mapped_to_disk = 0;
2480 		if (buffer_new(bh))
2481 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2482 		if (PageUptodate(page)) {
2483 			set_buffer_uptodate(bh);
2484 			continue;
2485 		}
2486 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2487 			zero_user_segments(page, block_start, from,
2488 							to, block_end);
2489 			continue;
2490 		}
2491 		if (buffer_uptodate(bh))
2492 			continue;	/* reiserfs does this */
2493 		if (block_start < from || block_end > to) {
2494 			lock_buffer(bh);
2495 			bh->b_end_io = end_buffer_read_nobh;
2496 			submit_bh(READ, bh);
2497 			nr_reads++;
2498 		}
2499 	}
2500 
2501 	if (nr_reads) {
2502 		/*
2503 		 * The page is locked, so these buffers are protected from
2504 		 * any VM or truncate activity.  Hence we don't need to care
2505 		 * for the buffer_head refcounts.
2506 		 */
2507 		for (bh = head; bh; bh = bh->b_this_page) {
2508 			wait_on_buffer(bh);
2509 			if (!buffer_uptodate(bh))
2510 				ret = -EIO;
2511 		}
2512 		if (ret)
2513 			goto failed;
2514 	}
2515 
2516 	if (is_mapped_to_disk)
2517 		SetPageMappedToDisk(page);
2518 
2519 	*fsdata = head; /* to be released by nobh_write_end */
2520 
2521 	return 0;
2522 
2523 failed:
2524 	BUG_ON(!ret);
2525 	/*
2526 	 * Error recovery is a bit difficult. We need to zero out blocks that
2527 	 * were newly allocated, and dirty them to ensure they get written out.
2528 	 * Buffers need to be attached to the page at this point, otherwise
2529 	 * the handling of potential IO errors during writeout would be hard
2530 	 * (could try doing synchronous writeout, but what if that fails too?)
2531 	 */
2532 	attach_nobh_buffers(page, head);
2533 	page_zero_new_buffers(page, from, to);
2534 
2535 out_release:
2536 	unlock_page(page);
2537 	page_cache_release(page);
2538 	*pagep = NULL;
2539 
2540 	return ret;
2541 }
2542 EXPORT_SYMBOL(nobh_write_begin);
2543 
2544 int nobh_write_end(struct file *file, struct address_space *mapping,
2545 			loff_t pos, unsigned len, unsigned copied,
2546 			struct page *page, void *fsdata)
2547 {
2548 	struct inode *inode = page->mapping->host;
2549 	struct buffer_head *head = fsdata;
2550 	struct buffer_head *bh;
2551 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2552 
2553 	if (unlikely(copied < len) && head)
2554 		attach_nobh_buffers(page, head);
2555 	if (page_has_buffers(page))
2556 		return generic_write_end(file, mapping, pos, len,
2557 					copied, page, fsdata);
2558 
2559 	SetPageUptodate(page);
2560 	set_page_dirty(page);
2561 	if (pos+copied > inode->i_size) {
2562 		i_size_write(inode, pos+copied);
2563 		mark_inode_dirty(inode);
2564 	}
2565 
2566 	unlock_page(page);
2567 	page_cache_release(page);
2568 
2569 	while (head) {
2570 		bh = head;
2571 		head = head->b_this_page;
2572 		free_buffer_head(bh);
2573 	}
2574 
2575 	return copied;
2576 }
2577 EXPORT_SYMBOL(nobh_write_end);
2578 
2579 /*
2580  * nobh_writepage() - based on block_full_write_page() except
2581  * that it tries to operate without attaching bufferheads to
2582  * the page.
2583  */
2584 int nobh_writepage(struct page *page, get_block_t *get_block,
2585 			struct writeback_control *wbc)
2586 {
2587 	struct inode * const inode = page->mapping->host;
2588 	loff_t i_size = i_size_read(inode);
2589 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2590 	unsigned offset;
2591 	int ret;
2592 
2593 	/* Is the page fully inside i_size? */
2594 	if (page->index < end_index)
2595 		goto out;
2596 
2597 	/* Is the page fully outside i_size? (truncate in progress) */
2598 	offset = i_size & (PAGE_CACHE_SIZE-1);
2599 	if (page->index >= end_index+1 || !offset) {
2600 		/*
2601 		 * The page may have dirty, unmapped buffers.  For example,
2602 		 * they may have been added in ext3_writepage().  Make them
2603 		 * freeable here, so the page does not leak.
2604 		 */
2605 #if 0
2606 		/* Not really sure about this  - do we need this ? */
2607 		if (page->mapping->a_ops->invalidatepage)
2608 			page->mapping->a_ops->invalidatepage(page, offset);
2609 #endif
2610 		unlock_page(page);
2611 		return 0; /* don't care */
2612 	}
2613 
2614 	/*
2615 	 * The page straddles i_size.  It must be zeroed out on each and every
2616 	 * writepage invocation because it may be mmapped.  "A file is mapped
2617 	 * in multiples of the page size.  For a file that is not a multiple of
2618 	 * the  page size, the remaining memory is zeroed when mapped, and
2619 	 * writes to that region are not written out to the file."
2620 	 */
2621 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2622 out:
2623 	ret = mpage_writepage(page, get_block, wbc);
2624 	if (ret == -EAGAIN)
2625 		ret = __block_write_full_page(inode, page, get_block, wbc,
2626 					      end_buffer_async_write);
2627 	return ret;
2628 }
2629 EXPORT_SYMBOL(nobh_writepage);
2630 
2631 int nobh_truncate_page(struct address_space *mapping,
2632 			loff_t from, get_block_t *get_block)
2633 {
2634 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2635 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2636 	unsigned blocksize;
2637 	sector_t iblock;
2638 	unsigned length, pos;
2639 	struct inode *inode = mapping->host;
2640 	struct page *page;
2641 	struct buffer_head map_bh;
2642 	int err;
2643 
2644 	blocksize = 1 << inode->i_blkbits;
2645 	length = offset & (blocksize - 1);
2646 
2647 	/* Block boundary? Nothing to do */
2648 	if (!length)
2649 		return 0;
2650 
2651 	length = blocksize - length;
2652 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2653 
2654 	page = grab_cache_page(mapping, index);
2655 	err = -ENOMEM;
2656 	if (!page)
2657 		goto out;
2658 
2659 	if (page_has_buffers(page)) {
2660 has_buffers:
2661 		unlock_page(page);
2662 		page_cache_release(page);
2663 		return block_truncate_page(mapping, from, get_block);
2664 	}
2665 
2666 	/* Find the buffer that contains "offset" */
2667 	pos = blocksize;
2668 	while (offset >= pos) {
2669 		iblock++;
2670 		pos += blocksize;
2671 	}
2672 
2673 	map_bh.b_size = blocksize;
2674 	map_bh.b_state = 0;
2675 	err = get_block(inode, iblock, &map_bh, 0);
2676 	if (err)
2677 		goto unlock;
2678 	/* unmapped? It's a hole - nothing to do */
2679 	if (!buffer_mapped(&map_bh))
2680 		goto unlock;
2681 
2682 	/* Ok, it's mapped. Make sure it's up-to-date */
2683 	if (!PageUptodate(page)) {
2684 		err = mapping->a_ops->readpage(NULL, page);
2685 		if (err) {
2686 			page_cache_release(page);
2687 			goto out;
2688 		}
2689 		lock_page(page);
2690 		if (!PageUptodate(page)) {
2691 			err = -EIO;
2692 			goto unlock;
2693 		}
2694 		if (page_has_buffers(page))
2695 			goto has_buffers;
2696 	}
2697 	zero_user(page, offset, length);
2698 	set_page_dirty(page);
2699 	err = 0;
2700 
2701 unlock:
2702 	unlock_page(page);
2703 	page_cache_release(page);
2704 out:
2705 	return err;
2706 }
2707 EXPORT_SYMBOL(nobh_truncate_page);
2708 
2709 int block_truncate_page(struct address_space *mapping,
2710 			loff_t from, get_block_t *get_block)
2711 {
2712 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2713 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2714 	unsigned blocksize;
2715 	sector_t iblock;
2716 	unsigned length, pos;
2717 	struct inode *inode = mapping->host;
2718 	struct page *page;
2719 	struct buffer_head *bh;
2720 	int err;
2721 
2722 	blocksize = 1 << inode->i_blkbits;
2723 	length = offset & (blocksize - 1);
2724 
2725 	/* Block boundary? Nothing to do */
2726 	if (!length)
2727 		return 0;
2728 
2729 	length = blocksize - length;
2730 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2731 
2732 	page = grab_cache_page(mapping, index);
2733 	err = -ENOMEM;
2734 	if (!page)
2735 		goto out;
2736 
2737 	if (!page_has_buffers(page))
2738 		create_empty_buffers(page, blocksize, 0);
2739 
2740 	/* Find the buffer that contains "offset" */
2741 	bh = page_buffers(page);
2742 	pos = blocksize;
2743 	while (offset >= pos) {
2744 		bh = bh->b_this_page;
2745 		iblock++;
2746 		pos += blocksize;
2747 	}
2748 
2749 	err = 0;
2750 	if (!buffer_mapped(bh)) {
2751 		WARN_ON(bh->b_size != blocksize);
2752 		err = get_block(inode, iblock, bh, 0);
2753 		if (err)
2754 			goto unlock;
2755 		/* unmapped? It's a hole - nothing to do */
2756 		if (!buffer_mapped(bh))
2757 			goto unlock;
2758 	}
2759 
2760 	/* Ok, it's mapped. Make sure it's up-to-date */
2761 	if (PageUptodate(page))
2762 		set_buffer_uptodate(bh);
2763 
2764 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2765 		err = -EIO;
2766 		ll_rw_block(READ, 1, &bh);
2767 		wait_on_buffer(bh);
2768 		/* Uhhuh. Read error. Complain and punt. */
2769 		if (!buffer_uptodate(bh))
2770 			goto unlock;
2771 	}
2772 
2773 	zero_user(page, offset, length);
2774 	mark_buffer_dirty(bh);
2775 	err = 0;
2776 
2777 unlock:
2778 	unlock_page(page);
2779 	page_cache_release(page);
2780 out:
2781 	return err;
2782 }
2783 EXPORT_SYMBOL(block_truncate_page);
2784 
2785 /*
2786  * The generic ->writepage function for buffer-backed address_spaces
2787  * this form passes in the end_io handler used to finish the IO.
2788  */
2789 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2790 			struct writeback_control *wbc, bh_end_io_t *handler)
2791 {
2792 	struct inode * const inode = page->mapping->host;
2793 	loff_t i_size = i_size_read(inode);
2794 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2795 	unsigned offset;
2796 
2797 	/* Is the page fully inside i_size? */
2798 	if (page->index < end_index)
2799 		return __block_write_full_page(inode, page, get_block, wbc,
2800 					       handler);
2801 
2802 	/* Is the page fully outside i_size? (truncate in progress) */
2803 	offset = i_size & (PAGE_CACHE_SIZE-1);
2804 	if (page->index >= end_index+1 || !offset) {
2805 		/*
2806 		 * The page may have dirty, unmapped buffers.  For example,
2807 		 * they may have been added in ext3_writepage().  Make them
2808 		 * freeable here, so the page does not leak.
2809 		 */
2810 		do_invalidatepage(page, 0);
2811 		unlock_page(page);
2812 		return 0; /* don't care */
2813 	}
2814 
2815 	/*
2816 	 * The page straddles i_size.  It must be zeroed out on each and every
2817 	 * writepage invocation because it may be mmapped.  "A file is mapped
2818 	 * in multiples of the page size.  For a file that is not a multiple of
2819 	 * the  page size, the remaining memory is zeroed when mapped, and
2820 	 * writes to that region are not written out to the file."
2821 	 */
2822 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2823 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2824 }
2825 EXPORT_SYMBOL(block_write_full_page_endio);
2826 
2827 /*
2828  * The generic ->writepage function for buffer-backed address_spaces
2829  */
2830 int block_write_full_page(struct page *page, get_block_t *get_block,
2831 			struct writeback_control *wbc)
2832 {
2833 	return block_write_full_page_endio(page, get_block, wbc,
2834 					   end_buffer_async_write);
2835 }
2836 EXPORT_SYMBOL(block_write_full_page);
2837 
2838 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2839 			    get_block_t *get_block)
2840 {
2841 	struct buffer_head tmp;
2842 	struct inode *inode = mapping->host;
2843 	tmp.b_state = 0;
2844 	tmp.b_blocknr = 0;
2845 	tmp.b_size = 1 << inode->i_blkbits;
2846 	get_block(inode, block, &tmp, 0);
2847 	return tmp.b_blocknr;
2848 }
2849 EXPORT_SYMBOL(generic_block_bmap);
2850 
2851 static void end_bio_bh_io_sync(struct bio *bio, int err)
2852 {
2853 	struct buffer_head *bh = bio->bi_private;
2854 
2855 	if (err == -EOPNOTSUPP) {
2856 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2857 	}
2858 
2859 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2860 		set_bit(BH_Quiet, &bh->b_state);
2861 
2862 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2863 	bio_put(bio);
2864 }
2865 
2866 int submit_bh(int rw, struct buffer_head * bh)
2867 {
2868 	struct bio *bio;
2869 	int ret = 0;
2870 
2871 	BUG_ON(!buffer_locked(bh));
2872 	BUG_ON(!buffer_mapped(bh));
2873 	BUG_ON(!bh->b_end_io);
2874 	BUG_ON(buffer_delay(bh));
2875 	BUG_ON(buffer_unwritten(bh));
2876 
2877 	/*
2878 	 * Only clear out a write error when rewriting
2879 	 */
2880 	if (test_set_buffer_req(bh) && (rw & WRITE))
2881 		clear_buffer_write_io_error(bh);
2882 
2883 	/*
2884 	 * from here on down, it's all bio -- do the initial mapping,
2885 	 * submit_bio -> generic_make_request may further map this bio around
2886 	 */
2887 	bio = bio_alloc(GFP_NOIO, 1);
2888 
2889 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2890 	bio->bi_bdev = bh->b_bdev;
2891 	bio->bi_io_vec[0].bv_page = bh->b_page;
2892 	bio->bi_io_vec[0].bv_len = bh->b_size;
2893 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2894 
2895 	bio->bi_vcnt = 1;
2896 	bio->bi_idx = 0;
2897 	bio->bi_size = bh->b_size;
2898 
2899 	bio->bi_end_io = end_bio_bh_io_sync;
2900 	bio->bi_private = bh;
2901 
2902 	bio_get(bio);
2903 	submit_bio(rw, bio);
2904 
2905 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2906 		ret = -EOPNOTSUPP;
2907 
2908 	bio_put(bio);
2909 	return ret;
2910 }
2911 EXPORT_SYMBOL(submit_bh);
2912 
2913 /**
2914  * ll_rw_block: low-level access to block devices (DEPRECATED)
2915  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2916  * @nr: number of &struct buffer_heads in the array
2917  * @bhs: array of pointers to &struct buffer_head
2918  *
2919  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2920  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2921  * %READA option is described in the documentation for generic_make_request()
2922  * which ll_rw_block() calls.
2923  *
2924  * This function drops any buffer that it cannot get a lock on (with the
2925  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2926  * request, and any buffer that appears to be up-to-date when doing read
2927  * request.  Further it marks as clean buffers that are processed for
2928  * writing (the buffer cache won't assume that they are actually clean
2929  * until the buffer gets unlocked).
2930  *
2931  * ll_rw_block sets b_end_io to simple completion handler that marks
2932  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2933  * any waiters.
2934  *
2935  * All of the buffers must be for the same device, and must also be a
2936  * multiple of the current approved size for the device.
2937  */
2938 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2939 {
2940 	int i;
2941 
2942 	for (i = 0; i < nr; i++) {
2943 		struct buffer_head *bh = bhs[i];
2944 
2945 		if (!trylock_buffer(bh))
2946 			continue;
2947 		if (rw == WRITE) {
2948 			if (test_clear_buffer_dirty(bh)) {
2949 				bh->b_end_io = end_buffer_write_sync;
2950 				get_bh(bh);
2951 				submit_bh(WRITE, bh);
2952 				continue;
2953 			}
2954 		} else {
2955 			if (!buffer_uptodate(bh)) {
2956 				bh->b_end_io = end_buffer_read_sync;
2957 				get_bh(bh);
2958 				submit_bh(rw, bh);
2959 				continue;
2960 			}
2961 		}
2962 		unlock_buffer(bh);
2963 	}
2964 }
2965 EXPORT_SYMBOL(ll_rw_block);
2966 
2967 void write_dirty_buffer(struct buffer_head *bh, int rw)
2968 {
2969 	lock_buffer(bh);
2970 	if (!test_clear_buffer_dirty(bh)) {
2971 		unlock_buffer(bh);
2972 		return;
2973 	}
2974 	bh->b_end_io = end_buffer_write_sync;
2975 	get_bh(bh);
2976 	submit_bh(rw, bh);
2977 }
2978 EXPORT_SYMBOL(write_dirty_buffer);
2979 
2980 /*
2981  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2982  * and then start new I/O and then wait upon it.  The caller must have a ref on
2983  * the buffer_head.
2984  */
2985 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
2986 {
2987 	int ret = 0;
2988 
2989 	WARN_ON(atomic_read(&bh->b_count) < 1);
2990 	lock_buffer(bh);
2991 	if (test_clear_buffer_dirty(bh)) {
2992 		get_bh(bh);
2993 		bh->b_end_io = end_buffer_write_sync;
2994 		ret = submit_bh(rw, bh);
2995 		wait_on_buffer(bh);
2996 		if (!ret && !buffer_uptodate(bh))
2997 			ret = -EIO;
2998 	} else {
2999 		unlock_buffer(bh);
3000 	}
3001 	return ret;
3002 }
3003 EXPORT_SYMBOL(__sync_dirty_buffer);
3004 
3005 int sync_dirty_buffer(struct buffer_head *bh)
3006 {
3007 	return __sync_dirty_buffer(bh, WRITE_SYNC);
3008 }
3009 EXPORT_SYMBOL(sync_dirty_buffer);
3010 
3011 /*
3012  * try_to_free_buffers() checks if all the buffers on this particular page
3013  * are unused, and releases them if so.
3014  *
3015  * Exclusion against try_to_free_buffers may be obtained by either
3016  * locking the page or by holding its mapping's private_lock.
3017  *
3018  * If the page is dirty but all the buffers are clean then we need to
3019  * be sure to mark the page clean as well.  This is because the page
3020  * may be against a block device, and a later reattachment of buffers
3021  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3022  * filesystem data on the same device.
3023  *
3024  * The same applies to regular filesystem pages: if all the buffers are
3025  * clean then we set the page clean and proceed.  To do that, we require
3026  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3027  * private_lock.
3028  *
3029  * try_to_free_buffers() is non-blocking.
3030  */
3031 static inline int buffer_busy(struct buffer_head *bh)
3032 {
3033 	return atomic_read(&bh->b_count) |
3034 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3035 }
3036 
3037 static int
3038 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3039 {
3040 	struct buffer_head *head = page_buffers(page);
3041 	struct buffer_head *bh;
3042 
3043 	bh = head;
3044 	do {
3045 		if (buffer_write_io_error(bh) && page->mapping)
3046 			set_bit(AS_EIO, &page->mapping->flags);
3047 		if (buffer_busy(bh))
3048 			goto failed;
3049 		bh = bh->b_this_page;
3050 	} while (bh != head);
3051 
3052 	do {
3053 		struct buffer_head *next = bh->b_this_page;
3054 
3055 		if (bh->b_assoc_map)
3056 			__remove_assoc_queue(bh);
3057 		bh = next;
3058 	} while (bh != head);
3059 	*buffers_to_free = head;
3060 	__clear_page_buffers(page);
3061 	return 1;
3062 failed:
3063 	return 0;
3064 }
3065 
3066 int try_to_free_buffers(struct page *page)
3067 {
3068 	struct address_space * const mapping = page->mapping;
3069 	struct buffer_head *buffers_to_free = NULL;
3070 	int ret = 0;
3071 
3072 	BUG_ON(!PageLocked(page));
3073 	if (PageWriteback(page))
3074 		return 0;
3075 
3076 	if (mapping == NULL) {		/* can this still happen? */
3077 		ret = drop_buffers(page, &buffers_to_free);
3078 		goto out;
3079 	}
3080 
3081 	spin_lock(&mapping->private_lock);
3082 	ret = drop_buffers(page, &buffers_to_free);
3083 
3084 	/*
3085 	 * If the filesystem writes its buffers by hand (eg ext3)
3086 	 * then we can have clean buffers against a dirty page.  We
3087 	 * clean the page here; otherwise the VM will never notice
3088 	 * that the filesystem did any IO at all.
3089 	 *
3090 	 * Also, during truncate, discard_buffer will have marked all
3091 	 * the page's buffers clean.  We discover that here and clean
3092 	 * the page also.
3093 	 *
3094 	 * private_lock must be held over this entire operation in order
3095 	 * to synchronise against __set_page_dirty_buffers and prevent the
3096 	 * dirty bit from being lost.
3097 	 */
3098 	if (ret)
3099 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3100 	spin_unlock(&mapping->private_lock);
3101 out:
3102 	if (buffers_to_free) {
3103 		struct buffer_head *bh = buffers_to_free;
3104 
3105 		do {
3106 			struct buffer_head *next = bh->b_this_page;
3107 			free_buffer_head(bh);
3108 			bh = next;
3109 		} while (bh != buffers_to_free);
3110 	}
3111 	return ret;
3112 }
3113 EXPORT_SYMBOL(try_to_free_buffers);
3114 
3115 /*
3116  * There are no bdflush tunables left.  But distributions are
3117  * still running obsolete flush daemons, so we terminate them here.
3118  *
3119  * Use of bdflush() is deprecated and will be removed in a future kernel.
3120  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3121  */
3122 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3123 {
3124 	static int msg_count;
3125 
3126 	if (!capable(CAP_SYS_ADMIN))
3127 		return -EPERM;
3128 
3129 	if (msg_count < 5) {
3130 		msg_count++;
3131 		printk(KERN_INFO
3132 			"warning: process `%s' used the obsolete bdflush"
3133 			" system call\n", current->comm);
3134 		printk(KERN_INFO "Fix your initscripts?\n");
3135 	}
3136 
3137 	if (func == 1)
3138 		do_exit(0);
3139 	return 0;
3140 }
3141 
3142 /*
3143  * Buffer-head allocation
3144  */
3145 static struct kmem_cache *bh_cachep __read_mostly;
3146 
3147 /*
3148  * Once the number of bh's in the machine exceeds this level, we start
3149  * stripping them in writeback.
3150  */
3151 static int max_buffer_heads;
3152 
3153 int buffer_heads_over_limit;
3154 
3155 struct bh_accounting {
3156 	int nr;			/* Number of live bh's */
3157 	int ratelimit;		/* Limit cacheline bouncing */
3158 };
3159 
3160 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3161 
3162 static void recalc_bh_state(void)
3163 {
3164 	int i;
3165 	int tot = 0;
3166 
3167 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3168 		return;
3169 	__this_cpu_write(bh_accounting.ratelimit, 0);
3170 	for_each_online_cpu(i)
3171 		tot += per_cpu(bh_accounting, i).nr;
3172 	buffer_heads_over_limit = (tot > max_buffer_heads);
3173 }
3174 
3175 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3176 {
3177 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3178 	if (ret) {
3179 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3180 		preempt_disable();
3181 		__this_cpu_inc(bh_accounting.nr);
3182 		recalc_bh_state();
3183 		preempt_enable();
3184 	}
3185 	return ret;
3186 }
3187 EXPORT_SYMBOL(alloc_buffer_head);
3188 
3189 void free_buffer_head(struct buffer_head *bh)
3190 {
3191 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3192 	kmem_cache_free(bh_cachep, bh);
3193 	preempt_disable();
3194 	__this_cpu_dec(bh_accounting.nr);
3195 	recalc_bh_state();
3196 	preempt_enable();
3197 }
3198 EXPORT_SYMBOL(free_buffer_head);
3199 
3200 static void buffer_exit_cpu(int cpu)
3201 {
3202 	int i;
3203 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3204 
3205 	for (i = 0; i < BH_LRU_SIZE; i++) {
3206 		brelse(b->bhs[i]);
3207 		b->bhs[i] = NULL;
3208 	}
3209 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3210 	per_cpu(bh_accounting, cpu).nr = 0;
3211 }
3212 
3213 static int buffer_cpu_notify(struct notifier_block *self,
3214 			      unsigned long action, void *hcpu)
3215 {
3216 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3217 		buffer_exit_cpu((unsigned long)hcpu);
3218 	return NOTIFY_OK;
3219 }
3220 
3221 /**
3222  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3223  * @bh: struct buffer_head
3224  *
3225  * Return true if the buffer is up-to-date and false,
3226  * with the buffer locked, if not.
3227  */
3228 int bh_uptodate_or_lock(struct buffer_head *bh)
3229 {
3230 	if (!buffer_uptodate(bh)) {
3231 		lock_buffer(bh);
3232 		if (!buffer_uptodate(bh))
3233 			return 0;
3234 		unlock_buffer(bh);
3235 	}
3236 	return 1;
3237 }
3238 EXPORT_SYMBOL(bh_uptodate_or_lock);
3239 
3240 /**
3241  * bh_submit_read - Submit a locked buffer for reading
3242  * @bh: struct buffer_head
3243  *
3244  * Returns zero on success and -EIO on error.
3245  */
3246 int bh_submit_read(struct buffer_head *bh)
3247 {
3248 	BUG_ON(!buffer_locked(bh));
3249 
3250 	if (buffer_uptodate(bh)) {
3251 		unlock_buffer(bh);
3252 		return 0;
3253 	}
3254 
3255 	get_bh(bh);
3256 	bh->b_end_io = end_buffer_read_sync;
3257 	submit_bh(READ, bh);
3258 	wait_on_buffer(bh);
3259 	if (buffer_uptodate(bh))
3260 		return 0;
3261 	return -EIO;
3262 }
3263 EXPORT_SYMBOL(bh_submit_read);
3264 
3265 void __init buffer_init(void)
3266 {
3267 	int nrpages;
3268 
3269 	bh_cachep = kmem_cache_create("buffer_head",
3270 			sizeof(struct buffer_head), 0,
3271 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3272 				SLAB_MEM_SPREAD),
3273 				NULL);
3274 
3275 	/*
3276 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3277 	 */
3278 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3279 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3280 	hotcpu_notifier(buffer_cpu_notify, 0);
3281 }
3282