xref: /openbmc/linux/fs/buffer.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/export.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <trace/events/block.h>
45 
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49 
50 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56 
57 inline void touch_buffer(struct buffer_head *bh)
58 {
59 	trace_block_touch_buffer(bh);
60 	mark_page_accessed(bh->b_page);
61 }
62 EXPORT_SYMBOL(touch_buffer);
63 
64 static int sleep_on_buffer(void *word)
65 {
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_bit_unlock(BH_Lock, &bh->b_state);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 EXPORT_SYMBOL(unlock_buffer);
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 EXPORT_SYMBOL(__wait_on_buffer);
95 
96 static void
97 __clear_page_buffers(struct page *page)
98 {
99 	ClearPagePrivate(page);
100 	set_page_private(page, 0);
101 	page_cache_release(page);
102 }
103 
104 
105 static int quiet_error(struct buffer_head *bh)
106 {
107 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
108 		return 0;
109 	return 1;
110 }
111 
112 
113 static void buffer_io_error(struct buffer_head *bh)
114 {
115 	char b[BDEVNAME_SIZE];
116 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
117 			bdevname(bh->b_bdev, b),
118 			(unsigned long long)bh->b_blocknr);
119 }
120 
121 /*
122  * End-of-IO handler helper function which does not touch the bh after
123  * unlocking it.
124  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
125  * a race there is benign: unlock_buffer() only use the bh's address for
126  * hashing after unlocking the buffer, so it doesn't actually touch the bh
127  * itself.
128  */
129 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
130 {
131 	if (uptodate) {
132 		set_buffer_uptodate(bh);
133 	} else {
134 		/* This happens, due to failed READA attempts. */
135 		clear_buffer_uptodate(bh);
136 	}
137 	unlock_buffer(bh);
138 }
139 
140 /*
141  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
142  * unlock the buffer. This is what ll_rw_block uses too.
143  */
144 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
145 {
146 	__end_buffer_read_notouch(bh, uptodate);
147 	put_bh(bh);
148 }
149 EXPORT_SYMBOL(end_buffer_read_sync);
150 
151 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
152 {
153 	char b[BDEVNAME_SIZE];
154 
155 	if (uptodate) {
156 		set_buffer_uptodate(bh);
157 	} else {
158 		if (!quiet_error(bh)) {
159 			buffer_io_error(bh);
160 			printk(KERN_WARNING "lost page write due to "
161 					"I/O error on %s\n",
162 				       bdevname(bh->b_bdev, b));
163 		}
164 		set_buffer_write_io_error(bh);
165 		clear_buffer_uptodate(bh);
166 	}
167 	unlock_buffer(bh);
168 	put_bh(bh);
169 }
170 EXPORT_SYMBOL(end_buffer_write_sync);
171 
172 /*
173  * Various filesystems appear to want __find_get_block to be non-blocking.
174  * But it's the page lock which protects the buffers.  To get around this,
175  * we get exclusion from try_to_free_buffers with the blockdev mapping's
176  * private_lock.
177  *
178  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
179  * may be quite high.  This code could TryLock the page, and if that
180  * succeeds, there is no need to take private_lock. (But if
181  * private_lock is contended then so is mapping->tree_lock).
182  */
183 static struct buffer_head *
184 __find_get_block_slow(struct block_device *bdev, sector_t block)
185 {
186 	struct inode *bd_inode = bdev->bd_inode;
187 	struct address_space *bd_mapping = bd_inode->i_mapping;
188 	struct buffer_head *ret = NULL;
189 	pgoff_t index;
190 	struct buffer_head *bh;
191 	struct buffer_head *head;
192 	struct page *page;
193 	int all_mapped = 1;
194 
195 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
196 	page = find_get_page(bd_mapping, index);
197 	if (!page)
198 		goto out;
199 
200 	spin_lock(&bd_mapping->private_lock);
201 	if (!page_has_buffers(page))
202 		goto out_unlock;
203 	head = page_buffers(page);
204 	bh = head;
205 	do {
206 		if (!buffer_mapped(bh))
207 			all_mapped = 0;
208 		else if (bh->b_blocknr == block) {
209 			ret = bh;
210 			get_bh(bh);
211 			goto out_unlock;
212 		}
213 		bh = bh->b_this_page;
214 	} while (bh != head);
215 
216 	/* we might be here because some of the buffers on this page are
217 	 * not mapped.  This is due to various races between
218 	 * file io on the block device and getblk.  It gets dealt with
219 	 * elsewhere, don't buffer_error if we had some unmapped buffers
220 	 */
221 	if (all_mapped) {
222 		char b[BDEVNAME_SIZE];
223 
224 		printk("__find_get_block_slow() failed. "
225 			"block=%llu, b_blocknr=%llu\n",
226 			(unsigned long long)block,
227 			(unsigned long long)bh->b_blocknr);
228 		printk("b_state=0x%08lx, b_size=%zu\n",
229 			bh->b_state, bh->b_size);
230 		printk("device %s blocksize: %d\n", bdevname(bdev, b),
231 			1 << bd_inode->i_blkbits);
232 	}
233 out_unlock:
234 	spin_unlock(&bd_mapping->private_lock);
235 	page_cache_release(page);
236 out:
237 	return ret;
238 }
239 
240 /*
241  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
242  */
243 static void free_more_memory(void)
244 {
245 	struct zone *zone;
246 	int nid;
247 
248 	wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
249 	yield();
250 
251 	for_each_online_node(nid) {
252 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
253 						gfp_zone(GFP_NOFS), NULL,
254 						&zone);
255 		if (zone)
256 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
257 						GFP_NOFS, NULL);
258 	}
259 }
260 
261 /*
262  * I/O completion handler for block_read_full_page() - pages
263  * which come unlocked at the end of I/O.
264  */
265 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
266 {
267 	unsigned long flags;
268 	struct buffer_head *first;
269 	struct buffer_head *tmp;
270 	struct page *page;
271 	int page_uptodate = 1;
272 
273 	BUG_ON(!buffer_async_read(bh));
274 
275 	page = bh->b_page;
276 	if (uptodate) {
277 		set_buffer_uptodate(bh);
278 	} else {
279 		clear_buffer_uptodate(bh);
280 		if (!quiet_error(bh))
281 			buffer_io_error(bh);
282 		SetPageError(page);
283 	}
284 
285 	/*
286 	 * Be _very_ careful from here on. Bad things can happen if
287 	 * two buffer heads end IO at almost the same time and both
288 	 * decide that the page is now completely done.
289 	 */
290 	first = page_buffers(page);
291 	local_irq_save(flags);
292 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
293 	clear_buffer_async_read(bh);
294 	unlock_buffer(bh);
295 	tmp = bh;
296 	do {
297 		if (!buffer_uptodate(tmp))
298 			page_uptodate = 0;
299 		if (buffer_async_read(tmp)) {
300 			BUG_ON(!buffer_locked(tmp));
301 			goto still_busy;
302 		}
303 		tmp = tmp->b_this_page;
304 	} while (tmp != bh);
305 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
306 	local_irq_restore(flags);
307 
308 	/*
309 	 * If none of the buffers had errors and they are all
310 	 * uptodate then we can set the page uptodate.
311 	 */
312 	if (page_uptodate && !PageError(page))
313 		SetPageUptodate(page);
314 	unlock_page(page);
315 	return;
316 
317 still_busy:
318 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
319 	local_irq_restore(flags);
320 	return;
321 }
322 
323 /*
324  * Completion handler for block_write_full_page() - pages which are unlocked
325  * during I/O, and which have PageWriteback cleared upon I/O completion.
326  */
327 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
328 {
329 	char b[BDEVNAME_SIZE];
330 	unsigned long flags;
331 	struct buffer_head *first;
332 	struct buffer_head *tmp;
333 	struct page *page;
334 
335 	BUG_ON(!buffer_async_write(bh));
336 
337 	page = bh->b_page;
338 	if (uptodate) {
339 		set_buffer_uptodate(bh);
340 	} else {
341 		if (!quiet_error(bh)) {
342 			buffer_io_error(bh);
343 			printk(KERN_WARNING "lost page write due to "
344 					"I/O error on %s\n",
345 			       bdevname(bh->b_bdev, b));
346 		}
347 		set_bit(AS_EIO, &page->mapping->flags);
348 		set_buffer_write_io_error(bh);
349 		clear_buffer_uptodate(bh);
350 		SetPageError(page);
351 	}
352 
353 	first = page_buffers(page);
354 	local_irq_save(flags);
355 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
356 
357 	clear_buffer_async_write(bh);
358 	unlock_buffer(bh);
359 	tmp = bh->b_this_page;
360 	while (tmp != bh) {
361 		if (buffer_async_write(tmp)) {
362 			BUG_ON(!buffer_locked(tmp));
363 			goto still_busy;
364 		}
365 		tmp = tmp->b_this_page;
366 	}
367 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
368 	local_irq_restore(flags);
369 	end_page_writeback(page);
370 	return;
371 
372 still_busy:
373 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
374 	local_irq_restore(flags);
375 	return;
376 }
377 EXPORT_SYMBOL(end_buffer_async_write);
378 
379 /*
380  * If a page's buffers are under async readin (end_buffer_async_read
381  * completion) then there is a possibility that another thread of
382  * control could lock one of the buffers after it has completed
383  * but while some of the other buffers have not completed.  This
384  * locked buffer would confuse end_buffer_async_read() into not unlocking
385  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
386  * that this buffer is not under async I/O.
387  *
388  * The page comes unlocked when it has no locked buffer_async buffers
389  * left.
390  *
391  * PageLocked prevents anyone starting new async I/O reads any of
392  * the buffers.
393  *
394  * PageWriteback is used to prevent simultaneous writeout of the same
395  * page.
396  *
397  * PageLocked prevents anyone from starting writeback of a page which is
398  * under read I/O (PageWriteback is only ever set against a locked page).
399  */
400 static void mark_buffer_async_read(struct buffer_head *bh)
401 {
402 	bh->b_end_io = end_buffer_async_read;
403 	set_buffer_async_read(bh);
404 }
405 
406 static void mark_buffer_async_write_endio(struct buffer_head *bh,
407 					  bh_end_io_t *handler)
408 {
409 	bh->b_end_io = handler;
410 	set_buffer_async_write(bh);
411 }
412 
413 void mark_buffer_async_write(struct buffer_head *bh)
414 {
415 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
416 }
417 EXPORT_SYMBOL(mark_buffer_async_write);
418 
419 
420 /*
421  * fs/buffer.c contains helper functions for buffer-backed address space's
422  * fsync functions.  A common requirement for buffer-based filesystems is
423  * that certain data from the backing blockdev needs to be written out for
424  * a successful fsync().  For example, ext2 indirect blocks need to be
425  * written back and waited upon before fsync() returns.
426  *
427  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
428  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
429  * management of a list of dependent buffers at ->i_mapping->private_list.
430  *
431  * Locking is a little subtle: try_to_free_buffers() will remove buffers
432  * from their controlling inode's queue when they are being freed.  But
433  * try_to_free_buffers() will be operating against the *blockdev* mapping
434  * at the time, not against the S_ISREG file which depends on those buffers.
435  * So the locking for private_list is via the private_lock in the address_space
436  * which backs the buffers.  Which is different from the address_space
437  * against which the buffers are listed.  So for a particular address_space,
438  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
439  * mapping->private_list will always be protected by the backing blockdev's
440  * ->private_lock.
441  *
442  * Which introduces a requirement: all buffers on an address_space's
443  * ->private_list must be from the same address_space: the blockdev's.
444  *
445  * address_spaces which do not place buffers at ->private_list via these
446  * utility functions are free to use private_lock and private_list for
447  * whatever they want.  The only requirement is that list_empty(private_list)
448  * be true at clear_inode() time.
449  *
450  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
451  * filesystems should do that.  invalidate_inode_buffers() should just go
452  * BUG_ON(!list_empty).
453  *
454  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
455  * take an address_space, not an inode.  And it should be called
456  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
457  * queued up.
458  *
459  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
460  * list if it is already on a list.  Because if the buffer is on a list,
461  * it *must* already be on the right one.  If not, the filesystem is being
462  * silly.  This will save a ton of locking.  But first we have to ensure
463  * that buffers are taken *off* the old inode's list when they are freed
464  * (presumably in truncate).  That requires careful auditing of all
465  * filesystems (do it inside bforget()).  It could also be done by bringing
466  * b_inode back.
467  */
468 
469 /*
470  * The buffer's backing address_space's private_lock must be held
471  */
472 static void __remove_assoc_queue(struct buffer_head *bh)
473 {
474 	list_del_init(&bh->b_assoc_buffers);
475 	WARN_ON(!bh->b_assoc_map);
476 	if (buffer_write_io_error(bh))
477 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
478 	bh->b_assoc_map = NULL;
479 }
480 
481 int inode_has_buffers(struct inode *inode)
482 {
483 	return !list_empty(&inode->i_data.private_list);
484 }
485 
486 /*
487  * osync is designed to support O_SYNC io.  It waits synchronously for
488  * all already-submitted IO to complete, but does not queue any new
489  * writes to the disk.
490  *
491  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
492  * you dirty the buffers, and then use osync_inode_buffers to wait for
493  * completion.  Any other dirty buffers which are not yet queued for
494  * write will not be flushed to disk by the osync.
495  */
496 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
497 {
498 	struct buffer_head *bh;
499 	struct list_head *p;
500 	int err = 0;
501 
502 	spin_lock(lock);
503 repeat:
504 	list_for_each_prev(p, list) {
505 		bh = BH_ENTRY(p);
506 		if (buffer_locked(bh)) {
507 			get_bh(bh);
508 			spin_unlock(lock);
509 			wait_on_buffer(bh);
510 			if (!buffer_uptodate(bh))
511 				err = -EIO;
512 			brelse(bh);
513 			spin_lock(lock);
514 			goto repeat;
515 		}
516 	}
517 	spin_unlock(lock);
518 	return err;
519 }
520 
521 static void do_thaw_one(struct super_block *sb, void *unused)
522 {
523 	char b[BDEVNAME_SIZE];
524 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
525 		printk(KERN_WARNING "Emergency Thaw on %s\n",
526 		       bdevname(sb->s_bdev, b));
527 }
528 
529 static void do_thaw_all(struct work_struct *work)
530 {
531 	iterate_supers(do_thaw_one, NULL);
532 	kfree(work);
533 	printk(KERN_WARNING "Emergency Thaw complete\n");
534 }
535 
536 /**
537  * emergency_thaw_all -- forcibly thaw every frozen filesystem
538  *
539  * Used for emergency unfreeze of all filesystems via SysRq
540  */
541 void emergency_thaw_all(void)
542 {
543 	struct work_struct *work;
544 
545 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
546 	if (work) {
547 		INIT_WORK(work, do_thaw_all);
548 		schedule_work(work);
549 	}
550 }
551 
552 /**
553  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
554  * @mapping: the mapping which wants those buffers written
555  *
556  * Starts I/O against the buffers at mapping->private_list, and waits upon
557  * that I/O.
558  *
559  * Basically, this is a convenience function for fsync().
560  * @mapping is a file or directory which needs those buffers to be written for
561  * a successful fsync().
562  */
563 int sync_mapping_buffers(struct address_space *mapping)
564 {
565 	struct address_space *buffer_mapping = mapping->private_data;
566 
567 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
568 		return 0;
569 
570 	return fsync_buffers_list(&buffer_mapping->private_lock,
571 					&mapping->private_list);
572 }
573 EXPORT_SYMBOL(sync_mapping_buffers);
574 
575 /*
576  * Called when we've recently written block `bblock', and it is known that
577  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
578  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
579  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
580  */
581 void write_boundary_block(struct block_device *bdev,
582 			sector_t bblock, unsigned blocksize)
583 {
584 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
585 	if (bh) {
586 		if (buffer_dirty(bh))
587 			ll_rw_block(WRITE, 1, &bh);
588 		put_bh(bh);
589 	}
590 }
591 
592 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
593 {
594 	struct address_space *mapping = inode->i_mapping;
595 	struct address_space *buffer_mapping = bh->b_page->mapping;
596 
597 	mark_buffer_dirty(bh);
598 	if (!mapping->private_data) {
599 		mapping->private_data = buffer_mapping;
600 	} else {
601 		BUG_ON(mapping->private_data != buffer_mapping);
602 	}
603 	if (!bh->b_assoc_map) {
604 		spin_lock(&buffer_mapping->private_lock);
605 		list_move_tail(&bh->b_assoc_buffers,
606 				&mapping->private_list);
607 		bh->b_assoc_map = mapping;
608 		spin_unlock(&buffer_mapping->private_lock);
609 	}
610 }
611 EXPORT_SYMBOL(mark_buffer_dirty_inode);
612 
613 /*
614  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
615  * dirty.
616  *
617  * If warn is true, then emit a warning if the page is not uptodate and has
618  * not been truncated.
619  */
620 static void __set_page_dirty(struct page *page,
621 		struct address_space *mapping, int warn)
622 {
623 	spin_lock_irq(&mapping->tree_lock);
624 	if (page->mapping) {	/* Race with truncate? */
625 		WARN_ON_ONCE(warn && !PageUptodate(page));
626 		account_page_dirtied(page, mapping);
627 		radix_tree_tag_set(&mapping->page_tree,
628 				page_index(page), PAGECACHE_TAG_DIRTY);
629 	}
630 	spin_unlock_irq(&mapping->tree_lock);
631 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
632 }
633 
634 /*
635  * Add a page to the dirty page list.
636  *
637  * It is a sad fact of life that this function is called from several places
638  * deeply under spinlocking.  It may not sleep.
639  *
640  * If the page has buffers, the uptodate buffers are set dirty, to preserve
641  * dirty-state coherency between the page and the buffers.  It the page does
642  * not have buffers then when they are later attached they will all be set
643  * dirty.
644  *
645  * The buffers are dirtied before the page is dirtied.  There's a small race
646  * window in which a writepage caller may see the page cleanness but not the
647  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
648  * before the buffers, a concurrent writepage caller could clear the page dirty
649  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
650  * page on the dirty page list.
651  *
652  * We use private_lock to lock against try_to_free_buffers while using the
653  * page's buffer list.  Also use this to protect against clean buffers being
654  * added to the page after it was set dirty.
655  *
656  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
657  * address_space though.
658  */
659 int __set_page_dirty_buffers(struct page *page)
660 {
661 	int newly_dirty;
662 	struct address_space *mapping = page_mapping(page);
663 
664 	if (unlikely(!mapping))
665 		return !TestSetPageDirty(page);
666 
667 	spin_lock(&mapping->private_lock);
668 	if (page_has_buffers(page)) {
669 		struct buffer_head *head = page_buffers(page);
670 		struct buffer_head *bh = head;
671 
672 		do {
673 			set_buffer_dirty(bh);
674 			bh = bh->b_this_page;
675 		} while (bh != head);
676 	}
677 	newly_dirty = !TestSetPageDirty(page);
678 	spin_unlock(&mapping->private_lock);
679 
680 	if (newly_dirty)
681 		__set_page_dirty(page, mapping, 1);
682 	return newly_dirty;
683 }
684 EXPORT_SYMBOL(__set_page_dirty_buffers);
685 
686 /*
687  * Write out and wait upon a list of buffers.
688  *
689  * We have conflicting pressures: we want to make sure that all
690  * initially dirty buffers get waited on, but that any subsequently
691  * dirtied buffers don't.  After all, we don't want fsync to last
692  * forever if somebody is actively writing to the file.
693  *
694  * Do this in two main stages: first we copy dirty buffers to a
695  * temporary inode list, queueing the writes as we go.  Then we clean
696  * up, waiting for those writes to complete.
697  *
698  * During this second stage, any subsequent updates to the file may end
699  * up refiling the buffer on the original inode's dirty list again, so
700  * there is a chance we will end up with a buffer queued for write but
701  * not yet completed on that list.  So, as a final cleanup we go through
702  * the osync code to catch these locked, dirty buffers without requeuing
703  * any newly dirty buffers for write.
704  */
705 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
706 {
707 	struct buffer_head *bh;
708 	struct list_head tmp;
709 	struct address_space *mapping;
710 	int err = 0, err2;
711 	struct blk_plug plug;
712 
713 	INIT_LIST_HEAD(&tmp);
714 	blk_start_plug(&plug);
715 
716 	spin_lock(lock);
717 	while (!list_empty(list)) {
718 		bh = BH_ENTRY(list->next);
719 		mapping = bh->b_assoc_map;
720 		__remove_assoc_queue(bh);
721 		/* Avoid race with mark_buffer_dirty_inode() which does
722 		 * a lockless check and we rely on seeing the dirty bit */
723 		smp_mb();
724 		if (buffer_dirty(bh) || buffer_locked(bh)) {
725 			list_add(&bh->b_assoc_buffers, &tmp);
726 			bh->b_assoc_map = mapping;
727 			if (buffer_dirty(bh)) {
728 				get_bh(bh);
729 				spin_unlock(lock);
730 				/*
731 				 * Ensure any pending I/O completes so that
732 				 * write_dirty_buffer() actually writes the
733 				 * current contents - it is a noop if I/O is
734 				 * still in flight on potentially older
735 				 * contents.
736 				 */
737 				write_dirty_buffer(bh, WRITE_SYNC);
738 
739 				/*
740 				 * Kick off IO for the previous mapping. Note
741 				 * that we will not run the very last mapping,
742 				 * wait_on_buffer() will do that for us
743 				 * through sync_buffer().
744 				 */
745 				brelse(bh);
746 				spin_lock(lock);
747 			}
748 		}
749 	}
750 
751 	spin_unlock(lock);
752 	blk_finish_plug(&plug);
753 	spin_lock(lock);
754 
755 	while (!list_empty(&tmp)) {
756 		bh = BH_ENTRY(tmp.prev);
757 		get_bh(bh);
758 		mapping = bh->b_assoc_map;
759 		__remove_assoc_queue(bh);
760 		/* Avoid race with mark_buffer_dirty_inode() which does
761 		 * a lockless check and we rely on seeing the dirty bit */
762 		smp_mb();
763 		if (buffer_dirty(bh)) {
764 			list_add(&bh->b_assoc_buffers,
765 				 &mapping->private_list);
766 			bh->b_assoc_map = mapping;
767 		}
768 		spin_unlock(lock);
769 		wait_on_buffer(bh);
770 		if (!buffer_uptodate(bh))
771 			err = -EIO;
772 		brelse(bh);
773 		spin_lock(lock);
774 	}
775 
776 	spin_unlock(lock);
777 	err2 = osync_buffers_list(lock, list);
778 	if (err)
779 		return err;
780 	else
781 		return err2;
782 }
783 
784 /*
785  * Invalidate any and all dirty buffers on a given inode.  We are
786  * probably unmounting the fs, but that doesn't mean we have already
787  * done a sync().  Just drop the buffers from the inode list.
788  *
789  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
790  * assumes that all the buffers are against the blockdev.  Not true
791  * for reiserfs.
792  */
793 void invalidate_inode_buffers(struct inode *inode)
794 {
795 	if (inode_has_buffers(inode)) {
796 		struct address_space *mapping = &inode->i_data;
797 		struct list_head *list = &mapping->private_list;
798 		struct address_space *buffer_mapping = mapping->private_data;
799 
800 		spin_lock(&buffer_mapping->private_lock);
801 		while (!list_empty(list))
802 			__remove_assoc_queue(BH_ENTRY(list->next));
803 		spin_unlock(&buffer_mapping->private_lock);
804 	}
805 }
806 EXPORT_SYMBOL(invalidate_inode_buffers);
807 
808 /*
809  * Remove any clean buffers from the inode's buffer list.  This is called
810  * when we're trying to free the inode itself.  Those buffers can pin it.
811  *
812  * Returns true if all buffers were removed.
813  */
814 int remove_inode_buffers(struct inode *inode)
815 {
816 	int ret = 1;
817 
818 	if (inode_has_buffers(inode)) {
819 		struct address_space *mapping = &inode->i_data;
820 		struct list_head *list = &mapping->private_list;
821 		struct address_space *buffer_mapping = mapping->private_data;
822 
823 		spin_lock(&buffer_mapping->private_lock);
824 		while (!list_empty(list)) {
825 			struct buffer_head *bh = BH_ENTRY(list->next);
826 			if (buffer_dirty(bh)) {
827 				ret = 0;
828 				break;
829 			}
830 			__remove_assoc_queue(bh);
831 		}
832 		spin_unlock(&buffer_mapping->private_lock);
833 	}
834 	return ret;
835 }
836 
837 /*
838  * Create the appropriate buffers when given a page for data area and
839  * the size of each buffer.. Use the bh->b_this_page linked list to
840  * follow the buffers created.  Return NULL if unable to create more
841  * buffers.
842  *
843  * The retry flag is used to differentiate async IO (paging, swapping)
844  * which may not fail from ordinary buffer allocations.
845  */
846 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
847 		int retry)
848 {
849 	struct buffer_head *bh, *head;
850 	long offset;
851 
852 try_again:
853 	head = NULL;
854 	offset = PAGE_SIZE;
855 	while ((offset -= size) >= 0) {
856 		bh = alloc_buffer_head(GFP_NOFS);
857 		if (!bh)
858 			goto no_grow;
859 
860 		bh->b_this_page = head;
861 		bh->b_blocknr = -1;
862 		head = bh;
863 
864 		bh->b_size = size;
865 
866 		/* Link the buffer to its page */
867 		set_bh_page(bh, page, offset);
868 
869 		init_buffer(bh, NULL, NULL);
870 	}
871 	return head;
872 /*
873  * In case anything failed, we just free everything we got.
874  */
875 no_grow:
876 	if (head) {
877 		do {
878 			bh = head;
879 			head = head->b_this_page;
880 			free_buffer_head(bh);
881 		} while (head);
882 	}
883 
884 	/*
885 	 * Return failure for non-async IO requests.  Async IO requests
886 	 * are not allowed to fail, so we have to wait until buffer heads
887 	 * become available.  But we don't want tasks sleeping with
888 	 * partially complete buffers, so all were released above.
889 	 */
890 	if (!retry)
891 		return NULL;
892 
893 	/* We're _really_ low on memory. Now we just
894 	 * wait for old buffer heads to become free due to
895 	 * finishing IO.  Since this is an async request and
896 	 * the reserve list is empty, we're sure there are
897 	 * async buffer heads in use.
898 	 */
899 	free_more_memory();
900 	goto try_again;
901 }
902 EXPORT_SYMBOL_GPL(alloc_page_buffers);
903 
904 static inline void
905 link_dev_buffers(struct page *page, struct buffer_head *head)
906 {
907 	struct buffer_head *bh, *tail;
908 
909 	bh = head;
910 	do {
911 		tail = bh;
912 		bh = bh->b_this_page;
913 	} while (bh);
914 	tail->b_this_page = head;
915 	attach_page_buffers(page, head);
916 }
917 
918 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
919 {
920 	sector_t retval = ~((sector_t)0);
921 	loff_t sz = i_size_read(bdev->bd_inode);
922 
923 	if (sz) {
924 		unsigned int sizebits = blksize_bits(size);
925 		retval = (sz >> sizebits);
926 	}
927 	return retval;
928 }
929 
930 /*
931  * Initialise the state of a blockdev page's buffers.
932  */
933 static sector_t
934 init_page_buffers(struct page *page, struct block_device *bdev,
935 			sector_t block, int size)
936 {
937 	struct buffer_head *head = page_buffers(page);
938 	struct buffer_head *bh = head;
939 	int uptodate = PageUptodate(page);
940 	sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
941 
942 	do {
943 		if (!buffer_mapped(bh)) {
944 			init_buffer(bh, NULL, NULL);
945 			bh->b_bdev = bdev;
946 			bh->b_blocknr = block;
947 			if (uptodate)
948 				set_buffer_uptodate(bh);
949 			if (block < end_block)
950 				set_buffer_mapped(bh);
951 		}
952 		block++;
953 		bh = bh->b_this_page;
954 	} while (bh != head);
955 
956 	/*
957 	 * Caller needs to validate requested block against end of device.
958 	 */
959 	return end_block;
960 }
961 
962 /*
963  * Create the page-cache page that contains the requested block.
964  *
965  * This is used purely for blockdev mappings.
966  */
967 static int
968 grow_dev_page(struct block_device *bdev, sector_t block,
969 		pgoff_t index, int size, int sizebits)
970 {
971 	struct inode *inode = bdev->bd_inode;
972 	struct page *page;
973 	struct buffer_head *bh;
974 	sector_t end_block;
975 	int ret = 0;		/* Will call free_more_memory() */
976 
977 	page = find_or_create_page(inode->i_mapping, index,
978 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
979 	if (!page)
980 		return ret;
981 
982 	BUG_ON(!PageLocked(page));
983 
984 	if (page_has_buffers(page)) {
985 		bh = page_buffers(page);
986 		if (bh->b_size == size) {
987 			end_block = init_page_buffers(page, bdev,
988 						index << sizebits, size);
989 			goto done;
990 		}
991 		if (!try_to_free_buffers(page))
992 			goto failed;
993 	}
994 
995 	/*
996 	 * Allocate some buffers for this page
997 	 */
998 	bh = alloc_page_buffers(page, size, 0);
999 	if (!bh)
1000 		goto failed;
1001 
1002 	/*
1003 	 * Link the page to the buffers and initialise them.  Take the
1004 	 * lock to be atomic wrt __find_get_block(), which does not
1005 	 * run under the page lock.
1006 	 */
1007 	spin_lock(&inode->i_mapping->private_lock);
1008 	link_dev_buffers(page, bh);
1009 	end_block = init_page_buffers(page, bdev, index << sizebits, size);
1010 	spin_unlock(&inode->i_mapping->private_lock);
1011 done:
1012 	ret = (block < end_block) ? 1 : -ENXIO;
1013 failed:
1014 	unlock_page(page);
1015 	page_cache_release(page);
1016 	return ret;
1017 }
1018 
1019 /*
1020  * Create buffers for the specified block device block's page.  If
1021  * that page was dirty, the buffers are set dirty also.
1022  */
1023 static int
1024 grow_buffers(struct block_device *bdev, sector_t block, int size)
1025 {
1026 	pgoff_t index;
1027 	int sizebits;
1028 
1029 	sizebits = -1;
1030 	do {
1031 		sizebits++;
1032 	} while ((size << sizebits) < PAGE_SIZE);
1033 
1034 	index = block >> sizebits;
1035 
1036 	/*
1037 	 * Check for a block which wants to lie outside our maximum possible
1038 	 * pagecache index.  (this comparison is done using sector_t types).
1039 	 */
1040 	if (unlikely(index != block >> sizebits)) {
1041 		char b[BDEVNAME_SIZE];
1042 
1043 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1044 			"device %s\n",
1045 			__func__, (unsigned long long)block,
1046 			bdevname(bdev, b));
1047 		return -EIO;
1048 	}
1049 
1050 	/* Create a page with the proper size buffers.. */
1051 	return grow_dev_page(bdev, block, index, size, sizebits);
1052 }
1053 
1054 static struct buffer_head *
1055 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1056 {
1057 	/* Size must be multiple of hard sectorsize */
1058 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1059 			(size < 512 || size > PAGE_SIZE))) {
1060 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1061 					size);
1062 		printk(KERN_ERR "logical block size: %d\n",
1063 					bdev_logical_block_size(bdev));
1064 
1065 		dump_stack();
1066 		return NULL;
1067 	}
1068 
1069 	for (;;) {
1070 		struct buffer_head *bh;
1071 		int ret;
1072 
1073 		bh = __find_get_block(bdev, block, size);
1074 		if (bh)
1075 			return bh;
1076 
1077 		ret = grow_buffers(bdev, block, size);
1078 		if (ret < 0)
1079 			return NULL;
1080 		if (ret == 0)
1081 			free_more_memory();
1082 	}
1083 }
1084 
1085 /*
1086  * The relationship between dirty buffers and dirty pages:
1087  *
1088  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1089  * the page is tagged dirty in its radix tree.
1090  *
1091  * At all times, the dirtiness of the buffers represents the dirtiness of
1092  * subsections of the page.  If the page has buffers, the page dirty bit is
1093  * merely a hint about the true dirty state.
1094  *
1095  * When a page is set dirty in its entirety, all its buffers are marked dirty
1096  * (if the page has buffers).
1097  *
1098  * When a buffer is marked dirty, its page is dirtied, but the page's other
1099  * buffers are not.
1100  *
1101  * Also.  When blockdev buffers are explicitly read with bread(), they
1102  * individually become uptodate.  But their backing page remains not
1103  * uptodate - even if all of its buffers are uptodate.  A subsequent
1104  * block_read_full_page() against that page will discover all the uptodate
1105  * buffers, will set the page uptodate and will perform no I/O.
1106  */
1107 
1108 /**
1109  * mark_buffer_dirty - mark a buffer_head as needing writeout
1110  * @bh: the buffer_head to mark dirty
1111  *
1112  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1113  * backing page dirty, then tag the page as dirty in its address_space's radix
1114  * tree and then attach the address_space's inode to its superblock's dirty
1115  * inode list.
1116  *
1117  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1118  * mapping->tree_lock and mapping->host->i_lock.
1119  */
1120 void mark_buffer_dirty(struct buffer_head *bh)
1121 {
1122 	WARN_ON_ONCE(!buffer_uptodate(bh));
1123 
1124 	trace_block_dirty_buffer(bh);
1125 
1126 	/*
1127 	 * Very *carefully* optimize the it-is-already-dirty case.
1128 	 *
1129 	 * Don't let the final "is it dirty" escape to before we
1130 	 * perhaps modified the buffer.
1131 	 */
1132 	if (buffer_dirty(bh)) {
1133 		smp_mb();
1134 		if (buffer_dirty(bh))
1135 			return;
1136 	}
1137 
1138 	if (!test_set_buffer_dirty(bh)) {
1139 		struct page *page = bh->b_page;
1140 		if (!TestSetPageDirty(page)) {
1141 			struct address_space *mapping = page_mapping(page);
1142 			if (mapping)
1143 				__set_page_dirty(page, mapping, 0);
1144 		}
1145 	}
1146 }
1147 EXPORT_SYMBOL(mark_buffer_dirty);
1148 
1149 /*
1150  * Decrement a buffer_head's reference count.  If all buffers against a page
1151  * have zero reference count, are clean and unlocked, and if the page is clean
1152  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1153  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1154  * a page but it ends up not being freed, and buffers may later be reattached).
1155  */
1156 void __brelse(struct buffer_head * buf)
1157 {
1158 	if (atomic_read(&buf->b_count)) {
1159 		put_bh(buf);
1160 		return;
1161 	}
1162 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1163 }
1164 EXPORT_SYMBOL(__brelse);
1165 
1166 /*
1167  * bforget() is like brelse(), except it discards any
1168  * potentially dirty data.
1169  */
1170 void __bforget(struct buffer_head *bh)
1171 {
1172 	clear_buffer_dirty(bh);
1173 	if (bh->b_assoc_map) {
1174 		struct address_space *buffer_mapping = bh->b_page->mapping;
1175 
1176 		spin_lock(&buffer_mapping->private_lock);
1177 		list_del_init(&bh->b_assoc_buffers);
1178 		bh->b_assoc_map = NULL;
1179 		spin_unlock(&buffer_mapping->private_lock);
1180 	}
1181 	__brelse(bh);
1182 }
1183 EXPORT_SYMBOL(__bforget);
1184 
1185 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1186 {
1187 	lock_buffer(bh);
1188 	if (buffer_uptodate(bh)) {
1189 		unlock_buffer(bh);
1190 		return bh;
1191 	} else {
1192 		get_bh(bh);
1193 		bh->b_end_io = end_buffer_read_sync;
1194 		submit_bh(READ, bh);
1195 		wait_on_buffer(bh);
1196 		if (buffer_uptodate(bh))
1197 			return bh;
1198 	}
1199 	brelse(bh);
1200 	return NULL;
1201 }
1202 
1203 /*
1204  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1205  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1206  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1207  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1208  * CPU's LRUs at the same time.
1209  *
1210  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1211  * sb_find_get_block().
1212  *
1213  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1214  * a local interrupt disable for that.
1215  */
1216 
1217 #define BH_LRU_SIZE	8
1218 
1219 struct bh_lru {
1220 	struct buffer_head *bhs[BH_LRU_SIZE];
1221 };
1222 
1223 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1224 
1225 #ifdef CONFIG_SMP
1226 #define bh_lru_lock()	local_irq_disable()
1227 #define bh_lru_unlock()	local_irq_enable()
1228 #else
1229 #define bh_lru_lock()	preempt_disable()
1230 #define bh_lru_unlock()	preempt_enable()
1231 #endif
1232 
1233 static inline void check_irqs_on(void)
1234 {
1235 #ifdef irqs_disabled
1236 	BUG_ON(irqs_disabled());
1237 #endif
1238 }
1239 
1240 /*
1241  * The LRU management algorithm is dopey-but-simple.  Sorry.
1242  */
1243 static void bh_lru_install(struct buffer_head *bh)
1244 {
1245 	struct buffer_head *evictee = NULL;
1246 
1247 	check_irqs_on();
1248 	bh_lru_lock();
1249 	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1250 		struct buffer_head *bhs[BH_LRU_SIZE];
1251 		int in;
1252 		int out = 0;
1253 
1254 		get_bh(bh);
1255 		bhs[out++] = bh;
1256 		for (in = 0; in < BH_LRU_SIZE; in++) {
1257 			struct buffer_head *bh2 =
1258 				__this_cpu_read(bh_lrus.bhs[in]);
1259 
1260 			if (bh2 == bh) {
1261 				__brelse(bh2);
1262 			} else {
1263 				if (out >= BH_LRU_SIZE) {
1264 					BUG_ON(evictee != NULL);
1265 					evictee = bh2;
1266 				} else {
1267 					bhs[out++] = bh2;
1268 				}
1269 			}
1270 		}
1271 		while (out < BH_LRU_SIZE)
1272 			bhs[out++] = NULL;
1273 		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1274 	}
1275 	bh_lru_unlock();
1276 
1277 	if (evictee)
1278 		__brelse(evictee);
1279 }
1280 
1281 /*
1282  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1283  */
1284 static struct buffer_head *
1285 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1286 {
1287 	struct buffer_head *ret = NULL;
1288 	unsigned int i;
1289 
1290 	check_irqs_on();
1291 	bh_lru_lock();
1292 	for (i = 0; i < BH_LRU_SIZE; i++) {
1293 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1294 
1295 		if (bh && bh->b_bdev == bdev &&
1296 				bh->b_blocknr == block && bh->b_size == size) {
1297 			if (i) {
1298 				while (i) {
1299 					__this_cpu_write(bh_lrus.bhs[i],
1300 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1301 					i--;
1302 				}
1303 				__this_cpu_write(bh_lrus.bhs[0], bh);
1304 			}
1305 			get_bh(bh);
1306 			ret = bh;
1307 			break;
1308 		}
1309 	}
1310 	bh_lru_unlock();
1311 	return ret;
1312 }
1313 
1314 /*
1315  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1316  * it in the LRU and mark it as accessed.  If it is not present then return
1317  * NULL
1318  */
1319 struct buffer_head *
1320 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1321 {
1322 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1323 
1324 	if (bh == NULL) {
1325 		bh = __find_get_block_slow(bdev, block);
1326 		if (bh)
1327 			bh_lru_install(bh);
1328 	}
1329 	if (bh)
1330 		touch_buffer(bh);
1331 	return bh;
1332 }
1333 EXPORT_SYMBOL(__find_get_block);
1334 
1335 /*
1336  * __getblk will locate (and, if necessary, create) the buffer_head
1337  * which corresponds to the passed block_device, block and size. The
1338  * returned buffer has its reference count incremented.
1339  *
1340  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1341  * attempt is failing.  FIXME, perhaps?
1342  */
1343 struct buffer_head *
1344 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1345 {
1346 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1347 
1348 	might_sleep();
1349 	if (bh == NULL)
1350 		bh = __getblk_slow(bdev, block, size);
1351 	return bh;
1352 }
1353 EXPORT_SYMBOL(__getblk);
1354 
1355 /*
1356  * Do async read-ahead on a buffer..
1357  */
1358 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1359 {
1360 	struct buffer_head *bh = __getblk(bdev, block, size);
1361 	if (likely(bh)) {
1362 		ll_rw_block(READA, 1, &bh);
1363 		brelse(bh);
1364 	}
1365 }
1366 EXPORT_SYMBOL(__breadahead);
1367 
1368 /**
1369  *  __bread() - reads a specified block and returns the bh
1370  *  @bdev: the block_device to read from
1371  *  @block: number of block
1372  *  @size: size (in bytes) to read
1373  *
1374  *  Reads a specified block, and returns buffer head that contains it.
1375  *  It returns NULL if the block was unreadable.
1376  */
1377 struct buffer_head *
1378 __bread(struct block_device *bdev, sector_t block, unsigned size)
1379 {
1380 	struct buffer_head *bh = __getblk(bdev, block, size);
1381 
1382 	if (likely(bh) && !buffer_uptodate(bh))
1383 		bh = __bread_slow(bh);
1384 	return bh;
1385 }
1386 EXPORT_SYMBOL(__bread);
1387 
1388 /*
1389  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1390  * This doesn't race because it runs in each cpu either in irq
1391  * or with preempt disabled.
1392  */
1393 static void invalidate_bh_lru(void *arg)
1394 {
1395 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1396 	int i;
1397 
1398 	for (i = 0; i < BH_LRU_SIZE; i++) {
1399 		brelse(b->bhs[i]);
1400 		b->bhs[i] = NULL;
1401 	}
1402 	put_cpu_var(bh_lrus);
1403 }
1404 
1405 static bool has_bh_in_lru(int cpu, void *dummy)
1406 {
1407 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1408 	int i;
1409 
1410 	for (i = 0; i < BH_LRU_SIZE; i++) {
1411 		if (b->bhs[i])
1412 			return 1;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 void invalidate_bh_lrus(void)
1419 {
1420 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1421 }
1422 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1423 
1424 void set_bh_page(struct buffer_head *bh,
1425 		struct page *page, unsigned long offset)
1426 {
1427 	bh->b_page = page;
1428 	BUG_ON(offset >= PAGE_SIZE);
1429 	if (PageHighMem(page))
1430 		/*
1431 		 * This catches illegal uses and preserves the offset:
1432 		 */
1433 		bh->b_data = (char *)(0 + offset);
1434 	else
1435 		bh->b_data = page_address(page) + offset;
1436 }
1437 EXPORT_SYMBOL(set_bh_page);
1438 
1439 /*
1440  * Called when truncating a buffer on a page completely.
1441  */
1442 static void discard_buffer(struct buffer_head * bh)
1443 {
1444 	lock_buffer(bh);
1445 	clear_buffer_dirty(bh);
1446 	bh->b_bdev = NULL;
1447 	clear_buffer_mapped(bh);
1448 	clear_buffer_req(bh);
1449 	clear_buffer_new(bh);
1450 	clear_buffer_delay(bh);
1451 	clear_buffer_unwritten(bh);
1452 	unlock_buffer(bh);
1453 }
1454 
1455 /**
1456  * block_invalidatepage - invalidate part or all of a buffer-backed page
1457  *
1458  * @page: the page which is affected
1459  * @offset: the index of the truncation point
1460  *
1461  * block_invalidatepage() is called when all or part of the page has become
1462  * invalidated by a truncate operation.
1463  *
1464  * block_invalidatepage() does not have to release all buffers, but it must
1465  * ensure that no dirty buffer is left outside @offset and that no I/O
1466  * is underway against any of the blocks which are outside the truncation
1467  * point.  Because the caller is about to free (and possibly reuse) those
1468  * blocks on-disk.
1469  */
1470 void block_invalidatepage(struct page *page, unsigned long offset)
1471 {
1472 	struct buffer_head *head, *bh, *next;
1473 	unsigned int curr_off = 0;
1474 
1475 	BUG_ON(!PageLocked(page));
1476 	if (!page_has_buffers(page))
1477 		goto out;
1478 
1479 	head = page_buffers(page);
1480 	bh = head;
1481 	do {
1482 		unsigned int next_off = curr_off + bh->b_size;
1483 		next = bh->b_this_page;
1484 
1485 		/*
1486 		 * is this block fully invalidated?
1487 		 */
1488 		if (offset <= curr_off)
1489 			discard_buffer(bh);
1490 		curr_off = next_off;
1491 		bh = next;
1492 	} while (bh != head);
1493 
1494 	/*
1495 	 * We release buffers only if the entire page is being invalidated.
1496 	 * The get_block cached value has been unconditionally invalidated,
1497 	 * so real IO is not possible anymore.
1498 	 */
1499 	if (offset == 0)
1500 		try_to_release_page(page, 0);
1501 out:
1502 	return;
1503 }
1504 EXPORT_SYMBOL(block_invalidatepage);
1505 
1506 /*
1507  * We attach and possibly dirty the buffers atomically wrt
1508  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1509  * is already excluded via the page lock.
1510  */
1511 void create_empty_buffers(struct page *page,
1512 			unsigned long blocksize, unsigned long b_state)
1513 {
1514 	struct buffer_head *bh, *head, *tail;
1515 
1516 	head = alloc_page_buffers(page, blocksize, 1);
1517 	bh = head;
1518 	do {
1519 		bh->b_state |= b_state;
1520 		tail = bh;
1521 		bh = bh->b_this_page;
1522 	} while (bh);
1523 	tail->b_this_page = head;
1524 
1525 	spin_lock(&page->mapping->private_lock);
1526 	if (PageUptodate(page) || PageDirty(page)) {
1527 		bh = head;
1528 		do {
1529 			if (PageDirty(page))
1530 				set_buffer_dirty(bh);
1531 			if (PageUptodate(page))
1532 				set_buffer_uptodate(bh);
1533 			bh = bh->b_this_page;
1534 		} while (bh != head);
1535 	}
1536 	attach_page_buffers(page, head);
1537 	spin_unlock(&page->mapping->private_lock);
1538 }
1539 EXPORT_SYMBOL(create_empty_buffers);
1540 
1541 /*
1542  * We are taking a block for data and we don't want any output from any
1543  * buffer-cache aliases starting from return from that function and
1544  * until the moment when something will explicitly mark the buffer
1545  * dirty (hopefully that will not happen until we will free that block ;-)
1546  * We don't even need to mark it not-uptodate - nobody can expect
1547  * anything from a newly allocated buffer anyway. We used to used
1548  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1549  * don't want to mark the alias unmapped, for example - it would confuse
1550  * anyone who might pick it with bread() afterwards...
1551  *
1552  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1553  * be writeout I/O going on against recently-freed buffers.  We don't
1554  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1555  * only if we really need to.  That happens here.
1556  */
1557 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1558 {
1559 	struct buffer_head *old_bh;
1560 
1561 	might_sleep();
1562 
1563 	old_bh = __find_get_block_slow(bdev, block);
1564 	if (old_bh) {
1565 		clear_buffer_dirty(old_bh);
1566 		wait_on_buffer(old_bh);
1567 		clear_buffer_req(old_bh);
1568 		__brelse(old_bh);
1569 	}
1570 }
1571 EXPORT_SYMBOL(unmap_underlying_metadata);
1572 
1573 /*
1574  * Size is a power-of-two in the range 512..PAGE_SIZE,
1575  * and the case we care about most is PAGE_SIZE.
1576  *
1577  * So this *could* possibly be written with those
1578  * constraints in mind (relevant mostly if some
1579  * architecture has a slow bit-scan instruction)
1580  */
1581 static inline int block_size_bits(unsigned int blocksize)
1582 {
1583 	return ilog2(blocksize);
1584 }
1585 
1586 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1587 {
1588 	BUG_ON(!PageLocked(page));
1589 
1590 	if (!page_has_buffers(page))
1591 		create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
1592 	return page_buffers(page);
1593 }
1594 
1595 /*
1596  * NOTE! All mapped/uptodate combinations are valid:
1597  *
1598  *	Mapped	Uptodate	Meaning
1599  *
1600  *	No	No		"unknown" - must do get_block()
1601  *	No	Yes		"hole" - zero-filled
1602  *	Yes	No		"allocated" - allocated on disk, not read in
1603  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1604  *
1605  * "Dirty" is valid only with the last case (mapped+uptodate).
1606  */
1607 
1608 /*
1609  * While block_write_full_page is writing back the dirty buffers under
1610  * the page lock, whoever dirtied the buffers may decide to clean them
1611  * again at any time.  We handle that by only looking at the buffer
1612  * state inside lock_buffer().
1613  *
1614  * If block_write_full_page() is called for regular writeback
1615  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1616  * locked buffer.   This only can happen if someone has written the buffer
1617  * directly, with submit_bh().  At the address_space level PageWriteback
1618  * prevents this contention from occurring.
1619  *
1620  * If block_write_full_page() is called with wbc->sync_mode ==
1621  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1622  * causes the writes to be flagged as synchronous writes.
1623  */
1624 static int __block_write_full_page(struct inode *inode, struct page *page,
1625 			get_block_t *get_block, struct writeback_control *wbc,
1626 			bh_end_io_t *handler)
1627 {
1628 	int err;
1629 	sector_t block;
1630 	sector_t last_block;
1631 	struct buffer_head *bh, *head;
1632 	unsigned int blocksize, bbits;
1633 	int nr_underway = 0;
1634 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1635 			WRITE_SYNC : WRITE);
1636 
1637 	head = create_page_buffers(page, inode,
1638 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1639 
1640 	/*
1641 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1642 	 * here, and the (potentially unmapped) buffers may become dirty at
1643 	 * any time.  If a buffer becomes dirty here after we've inspected it
1644 	 * then we just miss that fact, and the page stays dirty.
1645 	 *
1646 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1647 	 * handle that here by just cleaning them.
1648 	 */
1649 
1650 	bh = head;
1651 	blocksize = bh->b_size;
1652 	bbits = block_size_bits(blocksize);
1653 
1654 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1655 	last_block = (i_size_read(inode) - 1) >> bbits;
1656 
1657 	/*
1658 	 * Get all the dirty buffers mapped to disk addresses and
1659 	 * handle any aliases from the underlying blockdev's mapping.
1660 	 */
1661 	do {
1662 		if (block > last_block) {
1663 			/*
1664 			 * mapped buffers outside i_size will occur, because
1665 			 * this page can be outside i_size when there is a
1666 			 * truncate in progress.
1667 			 */
1668 			/*
1669 			 * The buffer was zeroed by block_write_full_page()
1670 			 */
1671 			clear_buffer_dirty(bh);
1672 			set_buffer_uptodate(bh);
1673 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1674 			   buffer_dirty(bh)) {
1675 			WARN_ON(bh->b_size != blocksize);
1676 			err = get_block(inode, block, bh, 1);
1677 			if (err)
1678 				goto recover;
1679 			clear_buffer_delay(bh);
1680 			if (buffer_new(bh)) {
1681 				/* blockdev mappings never come here */
1682 				clear_buffer_new(bh);
1683 				unmap_underlying_metadata(bh->b_bdev,
1684 							bh->b_blocknr);
1685 			}
1686 		}
1687 		bh = bh->b_this_page;
1688 		block++;
1689 	} while (bh != head);
1690 
1691 	do {
1692 		if (!buffer_mapped(bh))
1693 			continue;
1694 		/*
1695 		 * If it's a fully non-blocking write attempt and we cannot
1696 		 * lock the buffer then redirty the page.  Note that this can
1697 		 * potentially cause a busy-wait loop from writeback threads
1698 		 * and kswapd activity, but those code paths have their own
1699 		 * higher-level throttling.
1700 		 */
1701 		if (wbc->sync_mode != WB_SYNC_NONE) {
1702 			lock_buffer(bh);
1703 		} else if (!trylock_buffer(bh)) {
1704 			redirty_page_for_writepage(wbc, page);
1705 			continue;
1706 		}
1707 		if (test_clear_buffer_dirty(bh)) {
1708 			mark_buffer_async_write_endio(bh, handler);
1709 		} else {
1710 			unlock_buffer(bh);
1711 		}
1712 	} while ((bh = bh->b_this_page) != head);
1713 
1714 	/*
1715 	 * The page and its buffers are protected by PageWriteback(), so we can
1716 	 * drop the bh refcounts early.
1717 	 */
1718 	BUG_ON(PageWriteback(page));
1719 	set_page_writeback(page);
1720 
1721 	do {
1722 		struct buffer_head *next = bh->b_this_page;
1723 		if (buffer_async_write(bh)) {
1724 			submit_bh(write_op, bh);
1725 			nr_underway++;
1726 		}
1727 		bh = next;
1728 	} while (bh != head);
1729 	unlock_page(page);
1730 
1731 	err = 0;
1732 done:
1733 	if (nr_underway == 0) {
1734 		/*
1735 		 * The page was marked dirty, but the buffers were
1736 		 * clean.  Someone wrote them back by hand with
1737 		 * ll_rw_block/submit_bh.  A rare case.
1738 		 */
1739 		end_page_writeback(page);
1740 
1741 		/*
1742 		 * The page and buffer_heads can be released at any time from
1743 		 * here on.
1744 		 */
1745 	}
1746 	return err;
1747 
1748 recover:
1749 	/*
1750 	 * ENOSPC, or some other error.  We may already have added some
1751 	 * blocks to the file, so we need to write these out to avoid
1752 	 * exposing stale data.
1753 	 * The page is currently locked and not marked for writeback
1754 	 */
1755 	bh = head;
1756 	/* Recovery: lock and submit the mapped buffers */
1757 	do {
1758 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1759 		    !buffer_delay(bh)) {
1760 			lock_buffer(bh);
1761 			mark_buffer_async_write_endio(bh, handler);
1762 		} else {
1763 			/*
1764 			 * The buffer may have been set dirty during
1765 			 * attachment to a dirty page.
1766 			 */
1767 			clear_buffer_dirty(bh);
1768 		}
1769 	} while ((bh = bh->b_this_page) != head);
1770 	SetPageError(page);
1771 	BUG_ON(PageWriteback(page));
1772 	mapping_set_error(page->mapping, err);
1773 	set_page_writeback(page);
1774 	do {
1775 		struct buffer_head *next = bh->b_this_page;
1776 		if (buffer_async_write(bh)) {
1777 			clear_buffer_dirty(bh);
1778 			submit_bh(write_op, bh);
1779 			nr_underway++;
1780 		}
1781 		bh = next;
1782 	} while (bh != head);
1783 	unlock_page(page);
1784 	goto done;
1785 }
1786 
1787 /*
1788  * If a page has any new buffers, zero them out here, and mark them uptodate
1789  * and dirty so they'll be written out (in order to prevent uninitialised
1790  * block data from leaking). And clear the new bit.
1791  */
1792 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1793 {
1794 	unsigned int block_start, block_end;
1795 	struct buffer_head *head, *bh;
1796 
1797 	BUG_ON(!PageLocked(page));
1798 	if (!page_has_buffers(page))
1799 		return;
1800 
1801 	bh = head = page_buffers(page);
1802 	block_start = 0;
1803 	do {
1804 		block_end = block_start + bh->b_size;
1805 
1806 		if (buffer_new(bh)) {
1807 			if (block_end > from && block_start < to) {
1808 				if (!PageUptodate(page)) {
1809 					unsigned start, size;
1810 
1811 					start = max(from, block_start);
1812 					size = min(to, block_end) - start;
1813 
1814 					zero_user(page, start, size);
1815 					set_buffer_uptodate(bh);
1816 				}
1817 
1818 				clear_buffer_new(bh);
1819 				mark_buffer_dirty(bh);
1820 			}
1821 		}
1822 
1823 		block_start = block_end;
1824 		bh = bh->b_this_page;
1825 	} while (bh != head);
1826 }
1827 EXPORT_SYMBOL(page_zero_new_buffers);
1828 
1829 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1830 		get_block_t *get_block)
1831 {
1832 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1833 	unsigned to = from + len;
1834 	struct inode *inode = page->mapping->host;
1835 	unsigned block_start, block_end;
1836 	sector_t block;
1837 	int err = 0;
1838 	unsigned blocksize, bbits;
1839 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1840 
1841 	BUG_ON(!PageLocked(page));
1842 	BUG_ON(from > PAGE_CACHE_SIZE);
1843 	BUG_ON(to > PAGE_CACHE_SIZE);
1844 	BUG_ON(from > to);
1845 
1846 	head = create_page_buffers(page, inode, 0);
1847 	blocksize = head->b_size;
1848 	bbits = block_size_bits(blocksize);
1849 
1850 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1851 
1852 	for(bh = head, block_start = 0; bh != head || !block_start;
1853 	    block++, block_start=block_end, bh = bh->b_this_page) {
1854 		block_end = block_start + blocksize;
1855 		if (block_end <= from || block_start >= to) {
1856 			if (PageUptodate(page)) {
1857 				if (!buffer_uptodate(bh))
1858 					set_buffer_uptodate(bh);
1859 			}
1860 			continue;
1861 		}
1862 		if (buffer_new(bh))
1863 			clear_buffer_new(bh);
1864 		if (!buffer_mapped(bh)) {
1865 			WARN_ON(bh->b_size != blocksize);
1866 			err = get_block(inode, block, bh, 1);
1867 			if (err)
1868 				break;
1869 			if (buffer_new(bh)) {
1870 				unmap_underlying_metadata(bh->b_bdev,
1871 							bh->b_blocknr);
1872 				if (PageUptodate(page)) {
1873 					clear_buffer_new(bh);
1874 					set_buffer_uptodate(bh);
1875 					mark_buffer_dirty(bh);
1876 					continue;
1877 				}
1878 				if (block_end > to || block_start < from)
1879 					zero_user_segments(page,
1880 						to, block_end,
1881 						block_start, from);
1882 				continue;
1883 			}
1884 		}
1885 		if (PageUptodate(page)) {
1886 			if (!buffer_uptodate(bh))
1887 				set_buffer_uptodate(bh);
1888 			continue;
1889 		}
1890 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1891 		    !buffer_unwritten(bh) &&
1892 		     (block_start < from || block_end > to)) {
1893 			ll_rw_block(READ, 1, &bh);
1894 			*wait_bh++=bh;
1895 		}
1896 	}
1897 	/*
1898 	 * If we issued read requests - let them complete.
1899 	 */
1900 	while(wait_bh > wait) {
1901 		wait_on_buffer(*--wait_bh);
1902 		if (!buffer_uptodate(*wait_bh))
1903 			err = -EIO;
1904 	}
1905 	if (unlikely(err))
1906 		page_zero_new_buffers(page, from, to);
1907 	return err;
1908 }
1909 EXPORT_SYMBOL(__block_write_begin);
1910 
1911 static int __block_commit_write(struct inode *inode, struct page *page,
1912 		unsigned from, unsigned to)
1913 {
1914 	unsigned block_start, block_end;
1915 	int partial = 0;
1916 	unsigned blocksize;
1917 	struct buffer_head *bh, *head;
1918 
1919 	bh = head = page_buffers(page);
1920 	blocksize = bh->b_size;
1921 
1922 	block_start = 0;
1923 	do {
1924 		block_end = block_start + blocksize;
1925 		if (block_end <= from || block_start >= to) {
1926 			if (!buffer_uptodate(bh))
1927 				partial = 1;
1928 		} else {
1929 			set_buffer_uptodate(bh);
1930 			mark_buffer_dirty(bh);
1931 		}
1932 		clear_buffer_new(bh);
1933 
1934 		block_start = block_end;
1935 		bh = bh->b_this_page;
1936 	} while (bh != head);
1937 
1938 	/*
1939 	 * If this is a partial write which happened to make all buffers
1940 	 * uptodate then we can optimize away a bogus readpage() for
1941 	 * the next read(). Here we 'discover' whether the page went
1942 	 * uptodate as a result of this (potentially partial) write.
1943 	 */
1944 	if (!partial)
1945 		SetPageUptodate(page);
1946 	return 0;
1947 }
1948 
1949 /*
1950  * block_write_begin takes care of the basic task of block allocation and
1951  * bringing partial write blocks uptodate first.
1952  *
1953  * The filesystem needs to handle block truncation upon failure.
1954  */
1955 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1956 		unsigned flags, struct page **pagep, get_block_t *get_block)
1957 {
1958 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1959 	struct page *page;
1960 	int status;
1961 
1962 	page = grab_cache_page_write_begin(mapping, index, flags);
1963 	if (!page)
1964 		return -ENOMEM;
1965 
1966 	status = __block_write_begin(page, pos, len, get_block);
1967 	if (unlikely(status)) {
1968 		unlock_page(page);
1969 		page_cache_release(page);
1970 		page = NULL;
1971 	}
1972 
1973 	*pagep = page;
1974 	return status;
1975 }
1976 EXPORT_SYMBOL(block_write_begin);
1977 
1978 int block_write_end(struct file *file, struct address_space *mapping,
1979 			loff_t pos, unsigned len, unsigned copied,
1980 			struct page *page, void *fsdata)
1981 {
1982 	struct inode *inode = mapping->host;
1983 	unsigned start;
1984 
1985 	start = pos & (PAGE_CACHE_SIZE - 1);
1986 
1987 	if (unlikely(copied < len)) {
1988 		/*
1989 		 * The buffers that were written will now be uptodate, so we
1990 		 * don't have to worry about a readpage reading them and
1991 		 * overwriting a partial write. However if we have encountered
1992 		 * a short write and only partially written into a buffer, it
1993 		 * will not be marked uptodate, so a readpage might come in and
1994 		 * destroy our partial write.
1995 		 *
1996 		 * Do the simplest thing, and just treat any short write to a
1997 		 * non uptodate page as a zero-length write, and force the
1998 		 * caller to redo the whole thing.
1999 		 */
2000 		if (!PageUptodate(page))
2001 			copied = 0;
2002 
2003 		page_zero_new_buffers(page, start+copied, start+len);
2004 	}
2005 	flush_dcache_page(page);
2006 
2007 	/* This could be a short (even 0-length) commit */
2008 	__block_commit_write(inode, page, start, start+copied);
2009 
2010 	return copied;
2011 }
2012 EXPORT_SYMBOL(block_write_end);
2013 
2014 int generic_write_end(struct file *file, struct address_space *mapping,
2015 			loff_t pos, unsigned len, unsigned copied,
2016 			struct page *page, void *fsdata)
2017 {
2018 	struct inode *inode = mapping->host;
2019 	int i_size_changed = 0;
2020 
2021 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2022 
2023 	/*
2024 	 * No need to use i_size_read() here, the i_size
2025 	 * cannot change under us because we hold i_mutex.
2026 	 *
2027 	 * But it's important to update i_size while still holding page lock:
2028 	 * page writeout could otherwise come in and zero beyond i_size.
2029 	 */
2030 	if (pos+copied > inode->i_size) {
2031 		i_size_write(inode, pos+copied);
2032 		i_size_changed = 1;
2033 	}
2034 
2035 	unlock_page(page);
2036 	page_cache_release(page);
2037 
2038 	/*
2039 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2040 	 * makes the holding time of page lock longer. Second, it forces lock
2041 	 * ordering of page lock and transaction start for journaling
2042 	 * filesystems.
2043 	 */
2044 	if (i_size_changed)
2045 		mark_inode_dirty(inode);
2046 
2047 	return copied;
2048 }
2049 EXPORT_SYMBOL(generic_write_end);
2050 
2051 /*
2052  * block_is_partially_uptodate checks whether buffers within a page are
2053  * uptodate or not.
2054  *
2055  * Returns true if all buffers which correspond to a file portion
2056  * we want to read are uptodate.
2057  */
2058 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2059 					unsigned long from)
2060 {
2061 	unsigned block_start, block_end, blocksize;
2062 	unsigned to;
2063 	struct buffer_head *bh, *head;
2064 	int ret = 1;
2065 
2066 	if (!page_has_buffers(page))
2067 		return 0;
2068 
2069 	head = page_buffers(page);
2070 	blocksize = head->b_size;
2071 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2072 	to = from + to;
2073 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2074 		return 0;
2075 
2076 	bh = head;
2077 	block_start = 0;
2078 	do {
2079 		block_end = block_start + blocksize;
2080 		if (block_end > from && block_start < to) {
2081 			if (!buffer_uptodate(bh)) {
2082 				ret = 0;
2083 				break;
2084 			}
2085 			if (block_end >= to)
2086 				break;
2087 		}
2088 		block_start = block_end;
2089 		bh = bh->b_this_page;
2090 	} while (bh != head);
2091 
2092 	return ret;
2093 }
2094 EXPORT_SYMBOL(block_is_partially_uptodate);
2095 
2096 /*
2097  * Generic "read page" function for block devices that have the normal
2098  * get_block functionality. This is most of the block device filesystems.
2099  * Reads the page asynchronously --- the unlock_buffer() and
2100  * set/clear_buffer_uptodate() functions propagate buffer state into the
2101  * page struct once IO has completed.
2102  */
2103 int block_read_full_page(struct page *page, get_block_t *get_block)
2104 {
2105 	struct inode *inode = page->mapping->host;
2106 	sector_t iblock, lblock;
2107 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2108 	unsigned int blocksize, bbits;
2109 	int nr, i;
2110 	int fully_mapped = 1;
2111 
2112 	head = create_page_buffers(page, inode, 0);
2113 	blocksize = head->b_size;
2114 	bbits = block_size_bits(blocksize);
2115 
2116 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
2117 	lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2118 	bh = head;
2119 	nr = 0;
2120 	i = 0;
2121 
2122 	do {
2123 		if (buffer_uptodate(bh))
2124 			continue;
2125 
2126 		if (!buffer_mapped(bh)) {
2127 			int err = 0;
2128 
2129 			fully_mapped = 0;
2130 			if (iblock < lblock) {
2131 				WARN_ON(bh->b_size != blocksize);
2132 				err = get_block(inode, iblock, bh, 0);
2133 				if (err)
2134 					SetPageError(page);
2135 			}
2136 			if (!buffer_mapped(bh)) {
2137 				zero_user(page, i * blocksize, blocksize);
2138 				if (!err)
2139 					set_buffer_uptodate(bh);
2140 				continue;
2141 			}
2142 			/*
2143 			 * get_block() might have updated the buffer
2144 			 * synchronously
2145 			 */
2146 			if (buffer_uptodate(bh))
2147 				continue;
2148 		}
2149 		arr[nr++] = bh;
2150 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2151 
2152 	if (fully_mapped)
2153 		SetPageMappedToDisk(page);
2154 
2155 	if (!nr) {
2156 		/*
2157 		 * All buffers are uptodate - we can set the page uptodate
2158 		 * as well. But not if get_block() returned an error.
2159 		 */
2160 		if (!PageError(page))
2161 			SetPageUptodate(page);
2162 		unlock_page(page);
2163 		return 0;
2164 	}
2165 
2166 	/* Stage two: lock the buffers */
2167 	for (i = 0; i < nr; i++) {
2168 		bh = arr[i];
2169 		lock_buffer(bh);
2170 		mark_buffer_async_read(bh);
2171 	}
2172 
2173 	/*
2174 	 * Stage 3: start the IO.  Check for uptodateness
2175 	 * inside the buffer lock in case another process reading
2176 	 * the underlying blockdev brought it uptodate (the sct fix).
2177 	 */
2178 	for (i = 0; i < nr; i++) {
2179 		bh = arr[i];
2180 		if (buffer_uptodate(bh))
2181 			end_buffer_async_read(bh, 1);
2182 		else
2183 			submit_bh(READ, bh);
2184 	}
2185 	return 0;
2186 }
2187 EXPORT_SYMBOL(block_read_full_page);
2188 
2189 /* utility function for filesystems that need to do work on expanding
2190  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2191  * deal with the hole.
2192  */
2193 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2194 {
2195 	struct address_space *mapping = inode->i_mapping;
2196 	struct page *page;
2197 	void *fsdata;
2198 	int err;
2199 
2200 	err = inode_newsize_ok(inode, size);
2201 	if (err)
2202 		goto out;
2203 
2204 	err = pagecache_write_begin(NULL, mapping, size, 0,
2205 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2206 				&page, &fsdata);
2207 	if (err)
2208 		goto out;
2209 
2210 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2211 	BUG_ON(err > 0);
2212 
2213 out:
2214 	return err;
2215 }
2216 EXPORT_SYMBOL(generic_cont_expand_simple);
2217 
2218 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2219 			    loff_t pos, loff_t *bytes)
2220 {
2221 	struct inode *inode = mapping->host;
2222 	unsigned blocksize = 1 << inode->i_blkbits;
2223 	struct page *page;
2224 	void *fsdata;
2225 	pgoff_t index, curidx;
2226 	loff_t curpos;
2227 	unsigned zerofrom, offset, len;
2228 	int err = 0;
2229 
2230 	index = pos >> PAGE_CACHE_SHIFT;
2231 	offset = pos & ~PAGE_CACHE_MASK;
2232 
2233 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2234 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2235 		if (zerofrom & (blocksize-1)) {
2236 			*bytes |= (blocksize-1);
2237 			(*bytes)++;
2238 		}
2239 		len = PAGE_CACHE_SIZE - zerofrom;
2240 
2241 		err = pagecache_write_begin(file, mapping, curpos, len,
2242 						AOP_FLAG_UNINTERRUPTIBLE,
2243 						&page, &fsdata);
2244 		if (err)
2245 			goto out;
2246 		zero_user(page, zerofrom, len);
2247 		err = pagecache_write_end(file, mapping, curpos, len, len,
2248 						page, fsdata);
2249 		if (err < 0)
2250 			goto out;
2251 		BUG_ON(err != len);
2252 		err = 0;
2253 
2254 		balance_dirty_pages_ratelimited(mapping);
2255 	}
2256 
2257 	/* page covers the boundary, find the boundary offset */
2258 	if (index == curidx) {
2259 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2260 		/* if we will expand the thing last block will be filled */
2261 		if (offset <= zerofrom) {
2262 			goto out;
2263 		}
2264 		if (zerofrom & (blocksize-1)) {
2265 			*bytes |= (blocksize-1);
2266 			(*bytes)++;
2267 		}
2268 		len = offset - zerofrom;
2269 
2270 		err = pagecache_write_begin(file, mapping, curpos, len,
2271 						AOP_FLAG_UNINTERRUPTIBLE,
2272 						&page, &fsdata);
2273 		if (err)
2274 			goto out;
2275 		zero_user(page, zerofrom, len);
2276 		err = pagecache_write_end(file, mapping, curpos, len, len,
2277 						page, fsdata);
2278 		if (err < 0)
2279 			goto out;
2280 		BUG_ON(err != len);
2281 		err = 0;
2282 	}
2283 out:
2284 	return err;
2285 }
2286 
2287 /*
2288  * For moronic filesystems that do not allow holes in file.
2289  * We may have to extend the file.
2290  */
2291 int cont_write_begin(struct file *file, struct address_space *mapping,
2292 			loff_t pos, unsigned len, unsigned flags,
2293 			struct page **pagep, void **fsdata,
2294 			get_block_t *get_block, loff_t *bytes)
2295 {
2296 	struct inode *inode = mapping->host;
2297 	unsigned blocksize = 1 << inode->i_blkbits;
2298 	unsigned zerofrom;
2299 	int err;
2300 
2301 	err = cont_expand_zero(file, mapping, pos, bytes);
2302 	if (err)
2303 		return err;
2304 
2305 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2306 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2307 		*bytes |= (blocksize-1);
2308 		(*bytes)++;
2309 	}
2310 
2311 	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2312 }
2313 EXPORT_SYMBOL(cont_write_begin);
2314 
2315 int block_commit_write(struct page *page, unsigned from, unsigned to)
2316 {
2317 	struct inode *inode = page->mapping->host;
2318 	__block_commit_write(inode,page,from,to);
2319 	return 0;
2320 }
2321 EXPORT_SYMBOL(block_commit_write);
2322 
2323 /*
2324  * block_page_mkwrite() is not allowed to change the file size as it gets
2325  * called from a page fault handler when a page is first dirtied. Hence we must
2326  * be careful to check for EOF conditions here. We set the page up correctly
2327  * for a written page which means we get ENOSPC checking when writing into
2328  * holes and correct delalloc and unwritten extent mapping on filesystems that
2329  * support these features.
2330  *
2331  * We are not allowed to take the i_mutex here so we have to play games to
2332  * protect against truncate races as the page could now be beyond EOF.  Because
2333  * truncate writes the inode size before removing pages, once we have the
2334  * page lock we can determine safely if the page is beyond EOF. If it is not
2335  * beyond EOF, then the page is guaranteed safe against truncation until we
2336  * unlock the page.
2337  *
2338  * Direct callers of this function should protect against filesystem freezing
2339  * using sb_start_write() - sb_end_write() functions.
2340  */
2341 int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2342 			 get_block_t get_block)
2343 {
2344 	struct page *page = vmf->page;
2345 	struct inode *inode = file_inode(vma->vm_file);
2346 	unsigned long end;
2347 	loff_t size;
2348 	int ret;
2349 
2350 	lock_page(page);
2351 	size = i_size_read(inode);
2352 	if ((page->mapping != inode->i_mapping) ||
2353 	    (page_offset(page) > size)) {
2354 		/* We overload EFAULT to mean page got truncated */
2355 		ret = -EFAULT;
2356 		goto out_unlock;
2357 	}
2358 
2359 	/* page is wholly or partially inside EOF */
2360 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2361 		end = size & ~PAGE_CACHE_MASK;
2362 	else
2363 		end = PAGE_CACHE_SIZE;
2364 
2365 	ret = __block_write_begin(page, 0, end, get_block);
2366 	if (!ret)
2367 		ret = block_commit_write(page, 0, end);
2368 
2369 	if (unlikely(ret < 0))
2370 		goto out_unlock;
2371 	set_page_dirty(page);
2372 	wait_for_stable_page(page);
2373 	return 0;
2374 out_unlock:
2375 	unlock_page(page);
2376 	return ret;
2377 }
2378 EXPORT_SYMBOL(__block_page_mkwrite);
2379 
2380 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2381 		   get_block_t get_block)
2382 {
2383 	int ret;
2384 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
2385 
2386 	sb_start_pagefault(sb);
2387 
2388 	/*
2389 	 * Update file times before taking page lock. We may end up failing the
2390 	 * fault so this update may be superfluous but who really cares...
2391 	 */
2392 	file_update_time(vma->vm_file);
2393 
2394 	ret = __block_page_mkwrite(vma, vmf, get_block);
2395 	sb_end_pagefault(sb);
2396 	return block_page_mkwrite_return(ret);
2397 }
2398 EXPORT_SYMBOL(block_page_mkwrite);
2399 
2400 /*
2401  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2402  * immediately, while under the page lock.  So it needs a special end_io
2403  * handler which does not touch the bh after unlocking it.
2404  */
2405 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2406 {
2407 	__end_buffer_read_notouch(bh, uptodate);
2408 }
2409 
2410 /*
2411  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2412  * the page (converting it to circular linked list and taking care of page
2413  * dirty races).
2414  */
2415 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2416 {
2417 	struct buffer_head *bh;
2418 
2419 	BUG_ON(!PageLocked(page));
2420 
2421 	spin_lock(&page->mapping->private_lock);
2422 	bh = head;
2423 	do {
2424 		if (PageDirty(page))
2425 			set_buffer_dirty(bh);
2426 		if (!bh->b_this_page)
2427 			bh->b_this_page = head;
2428 		bh = bh->b_this_page;
2429 	} while (bh != head);
2430 	attach_page_buffers(page, head);
2431 	spin_unlock(&page->mapping->private_lock);
2432 }
2433 
2434 /*
2435  * On entry, the page is fully not uptodate.
2436  * On exit the page is fully uptodate in the areas outside (from,to)
2437  * The filesystem needs to handle block truncation upon failure.
2438  */
2439 int nobh_write_begin(struct address_space *mapping,
2440 			loff_t pos, unsigned len, unsigned flags,
2441 			struct page **pagep, void **fsdata,
2442 			get_block_t *get_block)
2443 {
2444 	struct inode *inode = mapping->host;
2445 	const unsigned blkbits = inode->i_blkbits;
2446 	const unsigned blocksize = 1 << blkbits;
2447 	struct buffer_head *head, *bh;
2448 	struct page *page;
2449 	pgoff_t index;
2450 	unsigned from, to;
2451 	unsigned block_in_page;
2452 	unsigned block_start, block_end;
2453 	sector_t block_in_file;
2454 	int nr_reads = 0;
2455 	int ret = 0;
2456 	int is_mapped_to_disk = 1;
2457 
2458 	index = pos >> PAGE_CACHE_SHIFT;
2459 	from = pos & (PAGE_CACHE_SIZE - 1);
2460 	to = from + len;
2461 
2462 	page = grab_cache_page_write_begin(mapping, index, flags);
2463 	if (!page)
2464 		return -ENOMEM;
2465 	*pagep = page;
2466 	*fsdata = NULL;
2467 
2468 	if (page_has_buffers(page)) {
2469 		ret = __block_write_begin(page, pos, len, get_block);
2470 		if (unlikely(ret))
2471 			goto out_release;
2472 		return ret;
2473 	}
2474 
2475 	if (PageMappedToDisk(page))
2476 		return 0;
2477 
2478 	/*
2479 	 * Allocate buffers so that we can keep track of state, and potentially
2480 	 * attach them to the page if an error occurs. In the common case of
2481 	 * no error, they will just be freed again without ever being attached
2482 	 * to the page (which is all OK, because we're under the page lock).
2483 	 *
2484 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2485 	 * than the circular one we're used to.
2486 	 */
2487 	head = alloc_page_buffers(page, blocksize, 0);
2488 	if (!head) {
2489 		ret = -ENOMEM;
2490 		goto out_release;
2491 	}
2492 
2493 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2494 
2495 	/*
2496 	 * We loop across all blocks in the page, whether or not they are
2497 	 * part of the affected region.  This is so we can discover if the
2498 	 * page is fully mapped-to-disk.
2499 	 */
2500 	for (block_start = 0, block_in_page = 0, bh = head;
2501 		  block_start < PAGE_CACHE_SIZE;
2502 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2503 		int create;
2504 
2505 		block_end = block_start + blocksize;
2506 		bh->b_state = 0;
2507 		create = 1;
2508 		if (block_start >= to)
2509 			create = 0;
2510 		ret = get_block(inode, block_in_file + block_in_page,
2511 					bh, create);
2512 		if (ret)
2513 			goto failed;
2514 		if (!buffer_mapped(bh))
2515 			is_mapped_to_disk = 0;
2516 		if (buffer_new(bh))
2517 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2518 		if (PageUptodate(page)) {
2519 			set_buffer_uptodate(bh);
2520 			continue;
2521 		}
2522 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2523 			zero_user_segments(page, block_start, from,
2524 							to, block_end);
2525 			continue;
2526 		}
2527 		if (buffer_uptodate(bh))
2528 			continue;	/* reiserfs does this */
2529 		if (block_start < from || block_end > to) {
2530 			lock_buffer(bh);
2531 			bh->b_end_io = end_buffer_read_nobh;
2532 			submit_bh(READ, bh);
2533 			nr_reads++;
2534 		}
2535 	}
2536 
2537 	if (nr_reads) {
2538 		/*
2539 		 * The page is locked, so these buffers are protected from
2540 		 * any VM or truncate activity.  Hence we don't need to care
2541 		 * for the buffer_head refcounts.
2542 		 */
2543 		for (bh = head; bh; bh = bh->b_this_page) {
2544 			wait_on_buffer(bh);
2545 			if (!buffer_uptodate(bh))
2546 				ret = -EIO;
2547 		}
2548 		if (ret)
2549 			goto failed;
2550 	}
2551 
2552 	if (is_mapped_to_disk)
2553 		SetPageMappedToDisk(page);
2554 
2555 	*fsdata = head; /* to be released by nobh_write_end */
2556 
2557 	return 0;
2558 
2559 failed:
2560 	BUG_ON(!ret);
2561 	/*
2562 	 * Error recovery is a bit difficult. We need to zero out blocks that
2563 	 * were newly allocated, and dirty them to ensure they get written out.
2564 	 * Buffers need to be attached to the page at this point, otherwise
2565 	 * the handling of potential IO errors during writeout would be hard
2566 	 * (could try doing synchronous writeout, but what if that fails too?)
2567 	 */
2568 	attach_nobh_buffers(page, head);
2569 	page_zero_new_buffers(page, from, to);
2570 
2571 out_release:
2572 	unlock_page(page);
2573 	page_cache_release(page);
2574 	*pagep = NULL;
2575 
2576 	return ret;
2577 }
2578 EXPORT_SYMBOL(nobh_write_begin);
2579 
2580 int nobh_write_end(struct file *file, struct address_space *mapping,
2581 			loff_t pos, unsigned len, unsigned copied,
2582 			struct page *page, void *fsdata)
2583 {
2584 	struct inode *inode = page->mapping->host;
2585 	struct buffer_head *head = fsdata;
2586 	struct buffer_head *bh;
2587 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2588 
2589 	if (unlikely(copied < len) && head)
2590 		attach_nobh_buffers(page, head);
2591 	if (page_has_buffers(page))
2592 		return generic_write_end(file, mapping, pos, len,
2593 					copied, page, fsdata);
2594 
2595 	SetPageUptodate(page);
2596 	set_page_dirty(page);
2597 	if (pos+copied > inode->i_size) {
2598 		i_size_write(inode, pos+copied);
2599 		mark_inode_dirty(inode);
2600 	}
2601 
2602 	unlock_page(page);
2603 	page_cache_release(page);
2604 
2605 	while (head) {
2606 		bh = head;
2607 		head = head->b_this_page;
2608 		free_buffer_head(bh);
2609 	}
2610 
2611 	return copied;
2612 }
2613 EXPORT_SYMBOL(nobh_write_end);
2614 
2615 /*
2616  * nobh_writepage() - based on block_full_write_page() except
2617  * that it tries to operate without attaching bufferheads to
2618  * the page.
2619  */
2620 int nobh_writepage(struct page *page, get_block_t *get_block,
2621 			struct writeback_control *wbc)
2622 {
2623 	struct inode * const inode = page->mapping->host;
2624 	loff_t i_size = i_size_read(inode);
2625 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2626 	unsigned offset;
2627 	int ret;
2628 
2629 	/* Is the page fully inside i_size? */
2630 	if (page->index < end_index)
2631 		goto out;
2632 
2633 	/* Is the page fully outside i_size? (truncate in progress) */
2634 	offset = i_size & (PAGE_CACHE_SIZE-1);
2635 	if (page->index >= end_index+1 || !offset) {
2636 		/*
2637 		 * The page may have dirty, unmapped buffers.  For example,
2638 		 * they may have been added in ext3_writepage().  Make them
2639 		 * freeable here, so the page does not leak.
2640 		 */
2641 #if 0
2642 		/* Not really sure about this  - do we need this ? */
2643 		if (page->mapping->a_ops->invalidatepage)
2644 			page->mapping->a_ops->invalidatepage(page, offset);
2645 #endif
2646 		unlock_page(page);
2647 		return 0; /* don't care */
2648 	}
2649 
2650 	/*
2651 	 * The page straddles i_size.  It must be zeroed out on each and every
2652 	 * writepage invocation because it may be mmapped.  "A file is mapped
2653 	 * in multiples of the page size.  For a file that is not a multiple of
2654 	 * the  page size, the remaining memory is zeroed when mapped, and
2655 	 * writes to that region are not written out to the file."
2656 	 */
2657 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2658 out:
2659 	ret = mpage_writepage(page, get_block, wbc);
2660 	if (ret == -EAGAIN)
2661 		ret = __block_write_full_page(inode, page, get_block, wbc,
2662 					      end_buffer_async_write);
2663 	return ret;
2664 }
2665 EXPORT_SYMBOL(nobh_writepage);
2666 
2667 int nobh_truncate_page(struct address_space *mapping,
2668 			loff_t from, get_block_t *get_block)
2669 {
2670 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2671 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2672 	unsigned blocksize;
2673 	sector_t iblock;
2674 	unsigned length, pos;
2675 	struct inode *inode = mapping->host;
2676 	struct page *page;
2677 	struct buffer_head map_bh;
2678 	int err;
2679 
2680 	blocksize = 1 << inode->i_blkbits;
2681 	length = offset & (blocksize - 1);
2682 
2683 	/* Block boundary? Nothing to do */
2684 	if (!length)
2685 		return 0;
2686 
2687 	length = blocksize - length;
2688 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2689 
2690 	page = grab_cache_page(mapping, index);
2691 	err = -ENOMEM;
2692 	if (!page)
2693 		goto out;
2694 
2695 	if (page_has_buffers(page)) {
2696 has_buffers:
2697 		unlock_page(page);
2698 		page_cache_release(page);
2699 		return block_truncate_page(mapping, from, get_block);
2700 	}
2701 
2702 	/* Find the buffer that contains "offset" */
2703 	pos = blocksize;
2704 	while (offset >= pos) {
2705 		iblock++;
2706 		pos += blocksize;
2707 	}
2708 
2709 	map_bh.b_size = blocksize;
2710 	map_bh.b_state = 0;
2711 	err = get_block(inode, iblock, &map_bh, 0);
2712 	if (err)
2713 		goto unlock;
2714 	/* unmapped? It's a hole - nothing to do */
2715 	if (!buffer_mapped(&map_bh))
2716 		goto unlock;
2717 
2718 	/* Ok, it's mapped. Make sure it's up-to-date */
2719 	if (!PageUptodate(page)) {
2720 		err = mapping->a_ops->readpage(NULL, page);
2721 		if (err) {
2722 			page_cache_release(page);
2723 			goto out;
2724 		}
2725 		lock_page(page);
2726 		if (!PageUptodate(page)) {
2727 			err = -EIO;
2728 			goto unlock;
2729 		}
2730 		if (page_has_buffers(page))
2731 			goto has_buffers;
2732 	}
2733 	zero_user(page, offset, length);
2734 	set_page_dirty(page);
2735 	err = 0;
2736 
2737 unlock:
2738 	unlock_page(page);
2739 	page_cache_release(page);
2740 out:
2741 	return err;
2742 }
2743 EXPORT_SYMBOL(nobh_truncate_page);
2744 
2745 int block_truncate_page(struct address_space *mapping,
2746 			loff_t from, get_block_t *get_block)
2747 {
2748 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2749 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2750 	unsigned blocksize;
2751 	sector_t iblock;
2752 	unsigned length, pos;
2753 	struct inode *inode = mapping->host;
2754 	struct page *page;
2755 	struct buffer_head *bh;
2756 	int err;
2757 
2758 	blocksize = 1 << inode->i_blkbits;
2759 	length = offset & (blocksize - 1);
2760 
2761 	/* Block boundary? Nothing to do */
2762 	if (!length)
2763 		return 0;
2764 
2765 	length = blocksize - length;
2766 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2767 
2768 	page = grab_cache_page(mapping, index);
2769 	err = -ENOMEM;
2770 	if (!page)
2771 		goto out;
2772 
2773 	if (!page_has_buffers(page))
2774 		create_empty_buffers(page, blocksize, 0);
2775 
2776 	/* Find the buffer that contains "offset" */
2777 	bh = page_buffers(page);
2778 	pos = blocksize;
2779 	while (offset >= pos) {
2780 		bh = bh->b_this_page;
2781 		iblock++;
2782 		pos += blocksize;
2783 	}
2784 
2785 	err = 0;
2786 	if (!buffer_mapped(bh)) {
2787 		WARN_ON(bh->b_size != blocksize);
2788 		err = get_block(inode, iblock, bh, 0);
2789 		if (err)
2790 			goto unlock;
2791 		/* unmapped? It's a hole - nothing to do */
2792 		if (!buffer_mapped(bh))
2793 			goto unlock;
2794 	}
2795 
2796 	/* Ok, it's mapped. Make sure it's up-to-date */
2797 	if (PageUptodate(page))
2798 		set_buffer_uptodate(bh);
2799 
2800 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2801 		err = -EIO;
2802 		ll_rw_block(READ, 1, &bh);
2803 		wait_on_buffer(bh);
2804 		/* Uhhuh. Read error. Complain and punt. */
2805 		if (!buffer_uptodate(bh))
2806 			goto unlock;
2807 	}
2808 
2809 	zero_user(page, offset, length);
2810 	mark_buffer_dirty(bh);
2811 	err = 0;
2812 
2813 unlock:
2814 	unlock_page(page);
2815 	page_cache_release(page);
2816 out:
2817 	return err;
2818 }
2819 EXPORT_SYMBOL(block_truncate_page);
2820 
2821 /*
2822  * The generic ->writepage function for buffer-backed address_spaces
2823  * this form passes in the end_io handler used to finish the IO.
2824  */
2825 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2826 			struct writeback_control *wbc, bh_end_io_t *handler)
2827 {
2828 	struct inode * const inode = page->mapping->host;
2829 	loff_t i_size = i_size_read(inode);
2830 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2831 	unsigned offset;
2832 
2833 	/* Is the page fully inside i_size? */
2834 	if (page->index < end_index)
2835 		return __block_write_full_page(inode, page, get_block, wbc,
2836 					       handler);
2837 
2838 	/* Is the page fully outside i_size? (truncate in progress) */
2839 	offset = i_size & (PAGE_CACHE_SIZE-1);
2840 	if (page->index >= end_index+1 || !offset) {
2841 		/*
2842 		 * The page may have dirty, unmapped buffers.  For example,
2843 		 * they may have been added in ext3_writepage().  Make them
2844 		 * freeable here, so the page does not leak.
2845 		 */
2846 		do_invalidatepage(page, 0);
2847 		unlock_page(page);
2848 		return 0; /* don't care */
2849 	}
2850 
2851 	/*
2852 	 * The page straddles i_size.  It must be zeroed out on each and every
2853 	 * writepage invocation because it may be mmapped.  "A file is mapped
2854 	 * in multiples of the page size.  For a file that is not a multiple of
2855 	 * the  page size, the remaining memory is zeroed when mapped, and
2856 	 * writes to that region are not written out to the file."
2857 	 */
2858 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2859 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2860 }
2861 EXPORT_SYMBOL(block_write_full_page_endio);
2862 
2863 /*
2864  * The generic ->writepage function for buffer-backed address_spaces
2865  */
2866 int block_write_full_page(struct page *page, get_block_t *get_block,
2867 			struct writeback_control *wbc)
2868 {
2869 	return block_write_full_page_endio(page, get_block, wbc,
2870 					   end_buffer_async_write);
2871 }
2872 EXPORT_SYMBOL(block_write_full_page);
2873 
2874 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2875 			    get_block_t *get_block)
2876 {
2877 	struct buffer_head tmp;
2878 	struct inode *inode = mapping->host;
2879 	tmp.b_state = 0;
2880 	tmp.b_blocknr = 0;
2881 	tmp.b_size = 1 << inode->i_blkbits;
2882 	get_block(inode, block, &tmp, 0);
2883 	return tmp.b_blocknr;
2884 }
2885 EXPORT_SYMBOL(generic_block_bmap);
2886 
2887 static void end_bio_bh_io_sync(struct bio *bio, int err)
2888 {
2889 	struct buffer_head *bh = bio->bi_private;
2890 
2891 	if (err == -EOPNOTSUPP) {
2892 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2893 	}
2894 
2895 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2896 		set_bit(BH_Quiet, &bh->b_state);
2897 
2898 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2899 	bio_put(bio);
2900 }
2901 
2902 /*
2903  * This allows us to do IO even on the odd last sectors
2904  * of a device, even if the bh block size is some multiple
2905  * of the physical sector size.
2906  *
2907  * We'll just truncate the bio to the size of the device,
2908  * and clear the end of the buffer head manually.
2909  *
2910  * Truly out-of-range accesses will turn into actual IO
2911  * errors, this only handles the "we need to be able to
2912  * do IO at the final sector" case.
2913  */
2914 static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2915 {
2916 	sector_t maxsector;
2917 	unsigned bytes;
2918 
2919 	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
2920 	if (!maxsector)
2921 		return;
2922 
2923 	/*
2924 	 * If the *whole* IO is past the end of the device,
2925 	 * let it through, and the IO layer will turn it into
2926 	 * an EIO.
2927 	 */
2928 	if (unlikely(bio->bi_sector >= maxsector))
2929 		return;
2930 
2931 	maxsector -= bio->bi_sector;
2932 	bytes = bio->bi_size;
2933 	if (likely((bytes >> 9) <= maxsector))
2934 		return;
2935 
2936 	/* Uhhuh. We've got a bh that straddles the device size! */
2937 	bytes = maxsector << 9;
2938 
2939 	/* Truncate the bio.. */
2940 	bio->bi_size = bytes;
2941 	bio->bi_io_vec[0].bv_len = bytes;
2942 
2943 	/* ..and clear the end of the buffer for reads */
2944 	if ((rw & RW_MASK) == READ) {
2945 		void *kaddr = kmap_atomic(bh->b_page);
2946 		memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
2947 		kunmap_atomic(kaddr);
2948 		flush_dcache_page(bh->b_page);
2949 	}
2950 }
2951 
2952 int submit_bh(int rw, struct buffer_head * bh)
2953 {
2954 	struct bio *bio;
2955 	int ret = 0;
2956 
2957 	BUG_ON(!buffer_locked(bh));
2958 	BUG_ON(!buffer_mapped(bh));
2959 	BUG_ON(!bh->b_end_io);
2960 	BUG_ON(buffer_delay(bh));
2961 	BUG_ON(buffer_unwritten(bh));
2962 
2963 	/*
2964 	 * Only clear out a write error when rewriting
2965 	 */
2966 	if (test_set_buffer_req(bh) && (rw & WRITE))
2967 		clear_buffer_write_io_error(bh);
2968 
2969 	/*
2970 	 * from here on down, it's all bio -- do the initial mapping,
2971 	 * submit_bio -> generic_make_request may further map this bio around
2972 	 */
2973 	bio = bio_alloc(GFP_NOIO, 1);
2974 
2975 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2976 	bio->bi_bdev = bh->b_bdev;
2977 	bio->bi_io_vec[0].bv_page = bh->b_page;
2978 	bio->bi_io_vec[0].bv_len = bh->b_size;
2979 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2980 
2981 	bio->bi_vcnt = 1;
2982 	bio->bi_idx = 0;
2983 	bio->bi_size = bh->b_size;
2984 
2985 	bio->bi_end_io = end_bio_bh_io_sync;
2986 	bio->bi_private = bh;
2987 
2988 	/* Take care of bh's that straddle the end of the device */
2989 	guard_bh_eod(rw, bio, bh);
2990 
2991 	bio_get(bio);
2992 	submit_bio(rw, bio);
2993 
2994 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2995 		ret = -EOPNOTSUPP;
2996 
2997 	bio_put(bio);
2998 	return ret;
2999 }
3000 EXPORT_SYMBOL(submit_bh);
3001 
3002 /**
3003  * ll_rw_block: low-level access to block devices (DEPRECATED)
3004  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
3005  * @nr: number of &struct buffer_heads in the array
3006  * @bhs: array of pointers to &struct buffer_head
3007  *
3008  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3009  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3010  * %READA option is described in the documentation for generic_make_request()
3011  * which ll_rw_block() calls.
3012  *
3013  * This function drops any buffer that it cannot get a lock on (with the
3014  * BH_Lock state bit), any buffer that appears to be clean when doing a write
3015  * request, and any buffer that appears to be up-to-date when doing read
3016  * request.  Further it marks as clean buffers that are processed for
3017  * writing (the buffer cache won't assume that they are actually clean
3018  * until the buffer gets unlocked).
3019  *
3020  * ll_rw_block sets b_end_io to simple completion handler that marks
3021  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3022  * any waiters.
3023  *
3024  * All of the buffers must be for the same device, and must also be a
3025  * multiple of the current approved size for the device.
3026  */
3027 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3028 {
3029 	int i;
3030 
3031 	for (i = 0; i < nr; i++) {
3032 		struct buffer_head *bh = bhs[i];
3033 
3034 		if (!trylock_buffer(bh))
3035 			continue;
3036 		if (rw == WRITE) {
3037 			if (test_clear_buffer_dirty(bh)) {
3038 				bh->b_end_io = end_buffer_write_sync;
3039 				get_bh(bh);
3040 				submit_bh(WRITE, bh);
3041 				continue;
3042 			}
3043 		} else {
3044 			if (!buffer_uptodate(bh)) {
3045 				bh->b_end_io = end_buffer_read_sync;
3046 				get_bh(bh);
3047 				submit_bh(rw, bh);
3048 				continue;
3049 			}
3050 		}
3051 		unlock_buffer(bh);
3052 	}
3053 }
3054 EXPORT_SYMBOL(ll_rw_block);
3055 
3056 void write_dirty_buffer(struct buffer_head *bh, int rw)
3057 {
3058 	lock_buffer(bh);
3059 	if (!test_clear_buffer_dirty(bh)) {
3060 		unlock_buffer(bh);
3061 		return;
3062 	}
3063 	bh->b_end_io = end_buffer_write_sync;
3064 	get_bh(bh);
3065 	submit_bh(rw, bh);
3066 }
3067 EXPORT_SYMBOL(write_dirty_buffer);
3068 
3069 /*
3070  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3071  * and then start new I/O and then wait upon it.  The caller must have a ref on
3072  * the buffer_head.
3073  */
3074 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3075 {
3076 	int ret = 0;
3077 
3078 	WARN_ON(atomic_read(&bh->b_count) < 1);
3079 	lock_buffer(bh);
3080 	if (test_clear_buffer_dirty(bh)) {
3081 		get_bh(bh);
3082 		bh->b_end_io = end_buffer_write_sync;
3083 		ret = submit_bh(rw, bh);
3084 		wait_on_buffer(bh);
3085 		if (!ret && !buffer_uptodate(bh))
3086 			ret = -EIO;
3087 	} else {
3088 		unlock_buffer(bh);
3089 	}
3090 	return ret;
3091 }
3092 EXPORT_SYMBOL(__sync_dirty_buffer);
3093 
3094 int sync_dirty_buffer(struct buffer_head *bh)
3095 {
3096 	return __sync_dirty_buffer(bh, WRITE_SYNC);
3097 }
3098 EXPORT_SYMBOL(sync_dirty_buffer);
3099 
3100 /*
3101  * try_to_free_buffers() checks if all the buffers on this particular page
3102  * are unused, and releases them if so.
3103  *
3104  * Exclusion against try_to_free_buffers may be obtained by either
3105  * locking the page or by holding its mapping's private_lock.
3106  *
3107  * If the page is dirty but all the buffers are clean then we need to
3108  * be sure to mark the page clean as well.  This is because the page
3109  * may be against a block device, and a later reattachment of buffers
3110  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3111  * filesystem data on the same device.
3112  *
3113  * The same applies to regular filesystem pages: if all the buffers are
3114  * clean then we set the page clean and proceed.  To do that, we require
3115  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3116  * private_lock.
3117  *
3118  * try_to_free_buffers() is non-blocking.
3119  */
3120 static inline int buffer_busy(struct buffer_head *bh)
3121 {
3122 	return atomic_read(&bh->b_count) |
3123 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3124 }
3125 
3126 static int
3127 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3128 {
3129 	struct buffer_head *head = page_buffers(page);
3130 	struct buffer_head *bh;
3131 
3132 	bh = head;
3133 	do {
3134 		if (buffer_write_io_error(bh) && page->mapping)
3135 			set_bit(AS_EIO, &page->mapping->flags);
3136 		if (buffer_busy(bh))
3137 			goto failed;
3138 		bh = bh->b_this_page;
3139 	} while (bh != head);
3140 
3141 	do {
3142 		struct buffer_head *next = bh->b_this_page;
3143 
3144 		if (bh->b_assoc_map)
3145 			__remove_assoc_queue(bh);
3146 		bh = next;
3147 	} while (bh != head);
3148 	*buffers_to_free = head;
3149 	__clear_page_buffers(page);
3150 	return 1;
3151 failed:
3152 	return 0;
3153 }
3154 
3155 int try_to_free_buffers(struct page *page)
3156 {
3157 	struct address_space * const mapping = page->mapping;
3158 	struct buffer_head *buffers_to_free = NULL;
3159 	int ret = 0;
3160 
3161 	BUG_ON(!PageLocked(page));
3162 	if (PageWriteback(page))
3163 		return 0;
3164 
3165 	if (mapping == NULL) {		/* can this still happen? */
3166 		ret = drop_buffers(page, &buffers_to_free);
3167 		goto out;
3168 	}
3169 
3170 	spin_lock(&mapping->private_lock);
3171 	ret = drop_buffers(page, &buffers_to_free);
3172 
3173 	/*
3174 	 * If the filesystem writes its buffers by hand (eg ext3)
3175 	 * then we can have clean buffers against a dirty page.  We
3176 	 * clean the page here; otherwise the VM will never notice
3177 	 * that the filesystem did any IO at all.
3178 	 *
3179 	 * Also, during truncate, discard_buffer will have marked all
3180 	 * the page's buffers clean.  We discover that here and clean
3181 	 * the page also.
3182 	 *
3183 	 * private_lock must be held over this entire operation in order
3184 	 * to synchronise against __set_page_dirty_buffers and prevent the
3185 	 * dirty bit from being lost.
3186 	 */
3187 	if (ret)
3188 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3189 	spin_unlock(&mapping->private_lock);
3190 out:
3191 	if (buffers_to_free) {
3192 		struct buffer_head *bh = buffers_to_free;
3193 
3194 		do {
3195 			struct buffer_head *next = bh->b_this_page;
3196 			free_buffer_head(bh);
3197 			bh = next;
3198 		} while (bh != buffers_to_free);
3199 	}
3200 	return ret;
3201 }
3202 EXPORT_SYMBOL(try_to_free_buffers);
3203 
3204 /*
3205  * There are no bdflush tunables left.  But distributions are
3206  * still running obsolete flush daemons, so we terminate them here.
3207  *
3208  * Use of bdflush() is deprecated and will be removed in a future kernel.
3209  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3210  */
3211 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3212 {
3213 	static int msg_count;
3214 
3215 	if (!capable(CAP_SYS_ADMIN))
3216 		return -EPERM;
3217 
3218 	if (msg_count < 5) {
3219 		msg_count++;
3220 		printk(KERN_INFO
3221 			"warning: process `%s' used the obsolete bdflush"
3222 			" system call\n", current->comm);
3223 		printk(KERN_INFO "Fix your initscripts?\n");
3224 	}
3225 
3226 	if (func == 1)
3227 		do_exit(0);
3228 	return 0;
3229 }
3230 
3231 /*
3232  * Buffer-head allocation
3233  */
3234 static struct kmem_cache *bh_cachep __read_mostly;
3235 
3236 /*
3237  * Once the number of bh's in the machine exceeds this level, we start
3238  * stripping them in writeback.
3239  */
3240 static unsigned long max_buffer_heads;
3241 
3242 int buffer_heads_over_limit;
3243 
3244 struct bh_accounting {
3245 	int nr;			/* Number of live bh's */
3246 	int ratelimit;		/* Limit cacheline bouncing */
3247 };
3248 
3249 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3250 
3251 static void recalc_bh_state(void)
3252 {
3253 	int i;
3254 	int tot = 0;
3255 
3256 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3257 		return;
3258 	__this_cpu_write(bh_accounting.ratelimit, 0);
3259 	for_each_online_cpu(i)
3260 		tot += per_cpu(bh_accounting, i).nr;
3261 	buffer_heads_over_limit = (tot > max_buffer_heads);
3262 }
3263 
3264 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3265 {
3266 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3267 	if (ret) {
3268 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3269 		preempt_disable();
3270 		__this_cpu_inc(bh_accounting.nr);
3271 		recalc_bh_state();
3272 		preempt_enable();
3273 	}
3274 	return ret;
3275 }
3276 EXPORT_SYMBOL(alloc_buffer_head);
3277 
3278 void free_buffer_head(struct buffer_head *bh)
3279 {
3280 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3281 	kmem_cache_free(bh_cachep, bh);
3282 	preempt_disable();
3283 	__this_cpu_dec(bh_accounting.nr);
3284 	recalc_bh_state();
3285 	preempt_enable();
3286 }
3287 EXPORT_SYMBOL(free_buffer_head);
3288 
3289 static void buffer_exit_cpu(int cpu)
3290 {
3291 	int i;
3292 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3293 
3294 	for (i = 0; i < BH_LRU_SIZE; i++) {
3295 		brelse(b->bhs[i]);
3296 		b->bhs[i] = NULL;
3297 	}
3298 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3299 	per_cpu(bh_accounting, cpu).nr = 0;
3300 }
3301 
3302 static int buffer_cpu_notify(struct notifier_block *self,
3303 			      unsigned long action, void *hcpu)
3304 {
3305 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3306 		buffer_exit_cpu((unsigned long)hcpu);
3307 	return NOTIFY_OK;
3308 }
3309 
3310 /**
3311  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3312  * @bh: struct buffer_head
3313  *
3314  * Return true if the buffer is up-to-date and false,
3315  * with the buffer locked, if not.
3316  */
3317 int bh_uptodate_or_lock(struct buffer_head *bh)
3318 {
3319 	if (!buffer_uptodate(bh)) {
3320 		lock_buffer(bh);
3321 		if (!buffer_uptodate(bh))
3322 			return 0;
3323 		unlock_buffer(bh);
3324 	}
3325 	return 1;
3326 }
3327 EXPORT_SYMBOL(bh_uptodate_or_lock);
3328 
3329 /**
3330  * bh_submit_read - Submit a locked buffer for reading
3331  * @bh: struct buffer_head
3332  *
3333  * Returns zero on success and -EIO on error.
3334  */
3335 int bh_submit_read(struct buffer_head *bh)
3336 {
3337 	BUG_ON(!buffer_locked(bh));
3338 
3339 	if (buffer_uptodate(bh)) {
3340 		unlock_buffer(bh);
3341 		return 0;
3342 	}
3343 
3344 	get_bh(bh);
3345 	bh->b_end_io = end_buffer_read_sync;
3346 	submit_bh(READ, bh);
3347 	wait_on_buffer(bh);
3348 	if (buffer_uptodate(bh))
3349 		return 0;
3350 	return -EIO;
3351 }
3352 EXPORT_SYMBOL(bh_submit_read);
3353 
3354 void __init buffer_init(void)
3355 {
3356 	unsigned long nrpages;
3357 
3358 	bh_cachep = kmem_cache_create("buffer_head",
3359 			sizeof(struct buffer_head), 0,
3360 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3361 				SLAB_MEM_SPREAD),
3362 				NULL);
3363 
3364 	/*
3365 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3366 	 */
3367 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3368 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3369 	hotcpu_notifier(buffer_cpu_notify, 0);
3370 }
3371