xref: /openbmc/linux/fs/buffer.c (revision 78c99ba1)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_bit_unlock(BH_Lock, &bh->b_state);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93 
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97 	ClearPagePrivate(page);
98 	set_page_private(page, 0);
99 	page_cache_release(page);
100 }
101 
102 
103 static int quiet_error(struct buffer_head *bh)
104 {
105 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 		return 0;
107 	return 1;
108 }
109 
110 
111 static void buffer_io_error(struct buffer_head *bh)
112 {
113 	char b[BDEVNAME_SIZE];
114 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 			bdevname(bh->b_bdev, b),
116 			(unsigned long long)bh->b_blocknr);
117 }
118 
119 /*
120  * End-of-IO handler helper function which does not touch the bh after
121  * unlocking it.
122  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123  * a race there is benign: unlock_buffer() only use the bh's address for
124  * hashing after unlocking the buffer, so it doesn't actually touch the bh
125  * itself.
126  */
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
128 {
129 	if (uptodate) {
130 		set_buffer_uptodate(bh);
131 	} else {
132 		/* This happens, due to failed READA attempts. */
133 		clear_buffer_uptodate(bh);
134 	}
135 	unlock_buffer(bh);
136 }
137 
138 /*
139  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
140  * unlock the buffer. This is what ll_rw_block uses too.
141  */
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143 {
144 	__end_buffer_read_notouch(bh, uptodate);
145 	put_bh(bh);
146 }
147 
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149 {
150 	char b[BDEVNAME_SIZE];
151 
152 	if (uptodate) {
153 		set_buffer_uptodate(bh);
154 	} else {
155 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
156 			buffer_io_error(bh);
157 			printk(KERN_WARNING "lost page write due to "
158 					"I/O error on %s\n",
159 				       bdevname(bh->b_bdev, b));
160 		}
161 		set_buffer_write_io_error(bh);
162 		clear_buffer_uptodate(bh);
163 	}
164 	unlock_buffer(bh);
165 	put_bh(bh);
166 }
167 
168 /*
169  * Various filesystems appear to want __find_get_block to be non-blocking.
170  * But it's the page lock which protects the buffers.  To get around this,
171  * we get exclusion from try_to_free_buffers with the blockdev mapping's
172  * private_lock.
173  *
174  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175  * may be quite high.  This code could TryLock the page, and if that
176  * succeeds, there is no need to take private_lock. (But if
177  * private_lock is contended then so is mapping->tree_lock).
178  */
179 static struct buffer_head *
180 __find_get_block_slow(struct block_device *bdev, sector_t block)
181 {
182 	struct inode *bd_inode = bdev->bd_inode;
183 	struct address_space *bd_mapping = bd_inode->i_mapping;
184 	struct buffer_head *ret = NULL;
185 	pgoff_t index;
186 	struct buffer_head *bh;
187 	struct buffer_head *head;
188 	struct page *page;
189 	int all_mapped = 1;
190 
191 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 	page = find_get_page(bd_mapping, index);
193 	if (!page)
194 		goto out;
195 
196 	spin_lock(&bd_mapping->private_lock);
197 	if (!page_has_buffers(page))
198 		goto out_unlock;
199 	head = page_buffers(page);
200 	bh = head;
201 	do {
202 		if (!buffer_mapped(bh))
203 			all_mapped = 0;
204 		else if (bh->b_blocknr == block) {
205 			ret = bh;
206 			get_bh(bh);
207 			goto out_unlock;
208 		}
209 		bh = bh->b_this_page;
210 	} while (bh != head);
211 
212 	/* we might be here because some of the buffers on this page are
213 	 * not mapped.  This is due to various races between
214 	 * file io on the block device and getblk.  It gets dealt with
215 	 * elsewhere, don't buffer_error if we had some unmapped buffers
216 	 */
217 	if (all_mapped) {
218 		printk("__find_get_block_slow() failed. "
219 			"block=%llu, b_blocknr=%llu\n",
220 			(unsigned long long)block,
221 			(unsigned long long)bh->b_blocknr);
222 		printk("b_state=0x%08lx, b_size=%zu\n",
223 			bh->b_state, bh->b_size);
224 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 	}
226 out_unlock:
227 	spin_unlock(&bd_mapping->private_lock);
228 	page_cache_release(page);
229 out:
230 	return ret;
231 }
232 
233 /* If invalidate_buffers() will trash dirty buffers, it means some kind
234    of fs corruption is going on. Trashing dirty data always imply losing
235    information that was supposed to be just stored on the physical layer
236    by the user.
237 
238    Thus invalidate_buffers in general usage is not allwowed to trash
239    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240    be preserved.  These buffers are simply skipped.
241 
242    We also skip buffers which are still in use.  For example this can
243    happen if a userspace program is reading the block device.
244 
245    NOTE: In the case where the user removed a removable-media-disk even if
246    there's still dirty data not synced on disk (due a bug in the device driver
247    or due an error of the user), by not destroying the dirty buffers we could
248    generate corruption also on the next media inserted, thus a parameter is
249    necessary to handle this case in the most safe way possible (trying
250    to not corrupt also the new disk inserted with the data belonging to
251    the old now corrupted disk). Also for the ramdisk the natural thing
252    to do in order to release the ramdisk memory is to destroy dirty buffers.
253 
254    These are two special cases. Normal usage imply the device driver
255    to issue a sync on the device (without waiting I/O completion) and
256    then an invalidate_buffers call that doesn't trash dirty buffers.
257 
258    For handling cache coherency with the blkdev pagecache the 'update' case
259    is been introduced. It is needed to re-read from disk any pinned
260    buffer. NOTE: re-reading from disk is destructive so we can do it only
261    when we assume nobody is changing the buffercache under our I/O and when
262    we think the disk contains more recent information than the buffercache.
263    The update == 1 pass marks the buffers we need to update, the update == 2
264    pass does the actual I/O. */
265 void invalidate_bdev(struct block_device *bdev)
266 {
267 	struct address_space *mapping = bdev->bd_inode->i_mapping;
268 
269 	if (mapping->nrpages == 0)
270 		return;
271 
272 	invalidate_bh_lrus();
273 	invalidate_mapping_pages(mapping, 0, -1);
274 }
275 
276 /*
277  * Kick pdflush then try to free up some ZONE_NORMAL memory.
278  */
279 static void free_more_memory(void)
280 {
281 	struct zone *zone;
282 	int nid;
283 
284 	wakeup_pdflush(1024);
285 	yield();
286 
287 	for_each_online_node(nid) {
288 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 						gfp_zone(GFP_NOFS), NULL,
290 						&zone);
291 		if (zone)
292 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293 						GFP_NOFS, NULL);
294 	}
295 }
296 
297 /*
298  * I/O completion handler for block_read_full_page() - pages
299  * which come unlocked at the end of I/O.
300  */
301 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302 {
303 	unsigned long flags;
304 	struct buffer_head *first;
305 	struct buffer_head *tmp;
306 	struct page *page;
307 	int page_uptodate = 1;
308 
309 	BUG_ON(!buffer_async_read(bh));
310 
311 	page = bh->b_page;
312 	if (uptodate) {
313 		set_buffer_uptodate(bh);
314 	} else {
315 		clear_buffer_uptodate(bh);
316 		if (!quiet_error(bh))
317 			buffer_io_error(bh);
318 		SetPageError(page);
319 	}
320 
321 	/*
322 	 * Be _very_ careful from here on. Bad things can happen if
323 	 * two buffer heads end IO at almost the same time and both
324 	 * decide that the page is now completely done.
325 	 */
326 	first = page_buffers(page);
327 	local_irq_save(flags);
328 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
329 	clear_buffer_async_read(bh);
330 	unlock_buffer(bh);
331 	tmp = bh;
332 	do {
333 		if (!buffer_uptodate(tmp))
334 			page_uptodate = 0;
335 		if (buffer_async_read(tmp)) {
336 			BUG_ON(!buffer_locked(tmp));
337 			goto still_busy;
338 		}
339 		tmp = tmp->b_this_page;
340 	} while (tmp != bh);
341 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 	local_irq_restore(flags);
343 
344 	/*
345 	 * If none of the buffers had errors and they are all
346 	 * uptodate then we can set the page uptodate.
347 	 */
348 	if (page_uptodate && !PageError(page))
349 		SetPageUptodate(page);
350 	unlock_page(page);
351 	return;
352 
353 still_busy:
354 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 	local_irq_restore(flags);
356 	return;
357 }
358 
359 /*
360  * Completion handler for block_write_full_page() - pages which are unlocked
361  * during I/O, and which have PageWriteback cleared upon I/O completion.
362  */
363 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
364 {
365 	char b[BDEVNAME_SIZE];
366 	unsigned long flags;
367 	struct buffer_head *first;
368 	struct buffer_head *tmp;
369 	struct page *page;
370 
371 	BUG_ON(!buffer_async_write(bh));
372 
373 	page = bh->b_page;
374 	if (uptodate) {
375 		set_buffer_uptodate(bh);
376 	} else {
377 		if (!quiet_error(bh)) {
378 			buffer_io_error(bh);
379 			printk(KERN_WARNING "lost page write due to "
380 					"I/O error on %s\n",
381 			       bdevname(bh->b_bdev, b));
382 		}
383 		set_bit(AS_EIO, &page->mapping->flags);
384 		set_buffer_write_io_error(bh);
385 		clear_buffer_uptodate(bh);
386 		SetPageError(page);
387 	}
388 
389 	first = page_buffers(page);
390 	local_irq_save(flags);
391 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392 
393 	clear_buffer_async_write(bh);
394 	unlock_buffer(bh);
395 	tmp = bh->b_this_page;
396 	while (tmp != bh) {
397 		if (buffer_async_write(tmp)) {
398 			BUG_ON(!buffer_locked(tmp));
399 			goto still_busy;
400 		}
401 		tmp = tmp->b_this_page;
402 	}
403 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 	local_irq_restore(flags);
405 	end_page_writeback(page);
406 	return;
407 
408 still_busy:
409 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 	local_irq_restore(flags);
411 	return;
412 }
413 
414 /*
415  * If a page's buffers are under async readin (end_buffer_async_read
416  * completion) then there is a possibility that another thread of
417  * control could lock one of the buffers after it has completed
418  * but while some of the other buffers have not completed.  This
419  * locked buffer would confuse end_buffer_async_read() into not unlocking
420  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
421  * that this buffer is not under async I/O.
422  *
423  * The page comes unlocked when it has no locked buffer_async buffers
424  * left.
425  *
426  * PageLocked prevents anyone starting new async I/O reads any of
427  * the buffers.
428  *
429  * PageWriteback is used to prevent simultaneous writeout of the same
430  * page.
431  *
432  * PageLocked prevents anyone from starting writeback of a page which is
433  * under read I/O (PageWriteback is only ever set against a locked page).
434  */
435 static void mark_buffer_async_read(struct buffer_head *bh)
436 {
437 	bh->b_end_io = end_buffer_async_read;
438 	set_buffer_async_read(bh);
439 }
440 
441 void mark_buffer_async_write_endio(struct buffer_head *bh,
442 				   bh_end_io_t *handler)
443 {
444 	bh->b_end_io = handler;
445 	set_buffer_async_write(bh);
446 }
447 
448 void mark_buffer_async_write(struct buffer_head *bh)
449 {
450 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
451 }
452 EXPORT_SYMBOL(mark_buffer_async_write);
453 
454 
455 /*
456  * fs/buffer.c contains helper functions for buffer-backed address space's
457  * fsync functions.  A common requirement for buffer-based filesystems is
458  * that certain data from the backing blockdev needs to be written out for
459  * a successful fsync().  For example, ext2 indirect blocks need to be
460  * written back and waited upon before fsync() returns.
461  *
462  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464  * management of a list of dependent buffers at ->i_mapping->private_list.
465  *
466  * Locking is a little subtle: try_to_free_buffers() will remove buffers
467  * from their controlling inode's queue when they are being freed.  But
468  * try_to_free_buffers() will be operating against the *blockdev* mapping
469  * at the time, not against the S_ISREG file which depends on those buffers.
470  * So the locking for private_list is via the private_lock in the address_space
471  * which backs the buffers.  Which is different from the address_space
472  * against which the buffers are listed.  So for a particular address_space,
473  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
474  * mapping->private_list will always be protected by the backing blockdev's
475  * ->private_lock.
476  *
477  * Which introduces a requirement: all buffers on an address_space's
478  * ->private_list must be from the same address_space: the blockdev's.
479  *
480  * address_spaces which do not place buffers at ->private_list via these
481  * utility functions are free to use private_lock and private_list for
482  * whatever they want.  The only requirement is that list_empty(private_list)
483  * be true at clear_inode() time.
484  *
485  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
486  * filesystems should do that.  invalidate_inode_buffers() should just go
487  * BUG_ON(!list_empty).
488  *
489  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
490  * take an address_space, not an inode.  And it should be called
491  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
492  * queued up.
493  *
494  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495  * list if it is already on a list.  Because if the buffer is on a list,
496  * it *must* already be on the right one.  If not, the filesystem is being
497  * silly.  This will save a ton of locking.  But first we have to ensure
498  * that buffers are taken *off* the old inode's list when they are freed
499  * (presumably in truncate).  That requires careful auditing of all
500  * filesystems (do it inside bforget()).  It could also be done by bringing
501  * b_inode back.
502  */
503 
504 /*
505  * The buffer's backing address_space's private_lock must be held
506  */
507 static void __remove_assoc_queue(struct buffer_head *bh)
508 {
509 	list_del_init(&bh->b_assoc_buffers);
510 	WARN_ON(!bh->b_assoc_map);
511 	if (buffer_write_io_error(bh))
512 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 	bh->b_assoc_map = NULL;
514 }
515 
516 int inode_has_buffers(struct inode *inode)
517 {
518 	return !list_empty(&inode->i_data.private_list);
519 }
520 
521 /*
522  * osync is designed to support O_SYNC io.  It waits synchronously for
523  * all already-submitted IO to complete, but does not queue any new
524  * writes to the disk.
525  *
526  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527  * you dirty the buffers, and then use osync_inode_buffers to wait for
528  * completion.  Any other dirty buffers which are not yet queued for
529  * write will not be flushed to disk by the osync.
530  */
531 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
532 {
533 	struct buffer_head *bh;
534 	struct list_head *p;
535 	int err = 0;
536 
537 	spin_lock(lock);
538 repeat:
539 	list_for_each_prev(p, list) {
540 		bh = BH_ENTRY(p);
541 		if (buffer_locked(bh)) {
542 			get_bh(bh);
543 			spin_unlock(lock);
544 			wait_on_buffer(bh);
545 			if (!buffer_uptodate(bh))
546 				err = -EIO;
547 			brelse(bh);
548 			spin_lock(lock);
549 			goto repeat;
550 		}
551 	}
552 	spin_unlock(lock);
553 	return err;
554 }
555 
556 void do_thaw_all(struct work_struct *work)
557 {
558 	struct super_block *sb;
559 	char b[BDEVNAME_SIZE];
560 
561 	spin_lock(&sb_lock);
562 restart:
563 	list_for_each_entry(sb, &super_blocks, s_list) {
564 		sb->s_count++;
565 		spin_unlock(&sb_lock);
566 		down_read(&sb->s_umount);
567 		while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 			printk(KERN_WARNING "Emergency Thaw on %s\n",
569 			       bdevname(sb->s_bdev, b));
570 		up_read(&sb->s_umount);
571 		spin_lock(&sb_lock);
572 		if (__put_super_and_need_restart(sb))
573 			goto restart;
574 	}
575 	spin_unlock(&sb_lock);
576 	kfree(work);
577 	printk(KERN_WARNING "Emergency Thaw complete\n");
578 }
579 
580 /**
581  * emergency_thaw_all -- forcibly thaw every frozen filesystem
582  *
583  * Used for emergency unfreeze of all filesystems via SysRq
584  */
585 void emergency_thaw_all(void)
586 {
587 	struct work_struct *work;
588 
589 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 	if (work) {
591 		INIT_WORK(work, do_thaw_all);
592 		schedule_work(work);
593 	}
594 }
595 
596 /**
597  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
598  * @mapping: the mapping which wants those buffers written
599  *
600  * Starts I/O against the buffers at mapping->private_list, and waits upon
601  * that I/O.
602  *
603  * Basically, this is a convenience function for fsync().
604  * @mapping is a file or directory which needs those buffers to be written for
605  * a successful fsync().
606  */
607 int sync_mapping_buffers(struct address_space *mapping)
608 {
609 	struct address_space *buffer_mapping = mapping->assoc_mapping;
610 
611 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
612 		return 0;
613 
614 	return fsync_buffers_list(&buffer_mapping->private_lock,
615 					&mapping->private_list);
616 }
617 EXPORT_SYMBOL(sync_mapping_buffers);
618 
619 /*
620  * Called when we've recently written block `bblock', and it is known that
621  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
622  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
623  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
624  */
625 void write_boundary_block(struct block_device *bdev,
626 			sector_t bblock, unsigned blocksize)
627 {
628 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 	if (bh) {
630 		if (buffer_dirty(bh))
631 			ll_rw_block(WRITE, 1, &bh);
632 		put_bh(bh);
633 	}
634 }
635 
636 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637 {
638 	struct address_space *mapping = inode->i_mapping;
639 	struct address_space *buffer_mapping = bh->b_page->mapping;
640 
641 	mark_buffer_dirty(bh);
642 	if (!mapping->assoc_mapping) {
643 		mapping->assoc_mapping = buffer_mapping;
644 	} else {
645 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
646 	}
647 	if (!bh->b_assoc_map) {
648 		spin_lock(&buffer_mapping->private_lock);
649 		list_move_tail(&bh->b_assoc_buffers,
650 				&mapping->private_list);
651 		bh->b_assoc_map = mapping;
652 		spin_unlock(&buffer_mapping->private_lock);
653 	}
654 }
655 EXPORT_SYMBOL(mark_buffer_dirty_inode);
656 
657 /*
658  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
659  * dirty.
660  *
661  * If warn is true, then emit a warning if the page is not uptodate and has
662  * not been truncated.
663  */
664 static void __set_page_dirty(struct page *page,
665 		struct address_space *mapping, int warn)
666 {
667 	spin_lock_irq(&mapping->tree_lock);
668 	if (page->mapping) {	/* Race with truncate? */
669 		WARN_ON_ONCE(warn && !PageUptodate(page));
670 		account_page_dirtied(page, mapping);
671 		radix_tree_tag_set(&mapping->page_tree,
672 				page_index(page), PAGECACHE_TAG_DIRTY);
673 	}
674 	spin_unlock_irq(&mapping->tree_lock);
675 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
676 }
677 
678 /*
679  * Add a page to the dirty page list.
680  *
681  * It is a sad fact of life that this function is called from several places
682  * deeply under spinlocking.  It may not sleep.
683  *
684  * If the page has buffers, the uptodate buffers are set dirty, to preserve
685  * dirty-state coherency between the page and the buffers.  It the page does
686  * not have buffers then when they are later attached they will all be set
687  * dirty.
688  *
689  * The buffers are dirtied before the page is dirtied.  There's a small race
690  * window in which a writepage caller may see the page cleanness but not the
691  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
692  * before the buffers, a concurrent writepage caller could clear the page dirty
693  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694  * page on the dirty page list.
695  *
696  * We use private_lock to lock against try_to_free_buffers while using the
697  * page's buffer list.  Also use this to protect against clean buffers being
698  * added to the page after it was set dirty.
699  *
700  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
701  * address_space though.
702  */
703 int __set_page_dirty_buffers(struct page *page)
704 {
705 	int newly_dirty;
706 	struct address_space *mapping = page_mapping(page);
707 
708 	if (unlikely(!mapping))
709 		return !TestSetPageDirty(page);
710 
711 	spin_lock(&mapping->private_lock);
712 	if (page_has_buffers(page)) {
713 		struct buffer_head *head = page_buffers(page);
714 		struct buffer_head *bh = head;
715 
716 		do {
717 			set_buffer_dirty(bh);
718 			bh = bh->b_this_page;
719 		} while (bh != head);
720 	}
721 	newly_dirty = !TestSetPageDirty(page);
722 	spin_unlock(&mapping->private_lock);
723 
724 	if (newly_dirty)
725 		__set_page_dirty(page, mapping, 1);
726 	return newly_dirty;
727 }
728 EXPORT_SYMBOL(__set_page_dirty_buffers);
729 
730 /*
731  * Write out and wait upon a list of buffers.
732  *
733  * We have conflicting pressures: we want to make sure that all
734  * initially dirty buffers get waited on, but that any subsequently
735  * dirtied buffers don't.  After all, we don't want fsync to last
736  * forever if somebody is actively writing to the file.
737  *
738  * Do this in two main stages: first we copy dirty buffers to a
739  * temporary inode list, queueing the writes as we go.  Then we clean
740  * up, waiting for those writes to complete.
741  *
742  * During this second stage, any subsequent updates to the file may end
743  * up refiling the buffer on the original inode's dirty list again, so
744  * there is a chance we will end up with a buffer queued for write but
745  * not yet completed on that list.  So, as a final cleanup we go through
746  * the osync code to catch these locked, dirty buffers without requeuing
747  * any newly dirty buffers for write.
748  */
749 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
750 {
751 	struct buffer_head *bh;
752 	struct list_head tmp;
753 	struct address_space *mapping, *prev_mapping = NULL;
754 	int err = 0, err2;
755 
756 	INIT_LIST_HEAD(&tmp);
757 
758 	spin_lock(lock);
759 	while (!list_empty(list)) {
760 		bh = BH_ENTRY(list->next);
761 		mapping = bh->b_assoc_map;
762 		__remove_assoc_queue(bh);
763 		/* Avoid race with mark_buffer_dirty_inode() which does
764 		 * a lockless check and we rely on seeing the dirty bit */
765 		smp_mb();
766 		if (buffer_dirty(bh) || buffer_locked(bh)) {
767 			list_add(&bh->b_assoc_buffers, &tmp);
768 			bh->b_assoc_map = mapping;
769 			if (buffer_dirty(bh)) {
770 				get_bh(bh);
771 				spin_unlock(lock);
772 				/*
773 				 * Ensure any pending I/O completes so that
774 				 * ll_rw_block() actually writes the current
775 				 * contents - it is a noop if I/O is still in
776 				 * flight on potentially older contents.
777 				 */
778 				ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
779 
780 				/*
781 				 * Kick off IO for the previous mapping. Note
782 				 * that we will not run the very last mapping,
783 				 * wait_on_buffer() will do that for us
784 				 * through sync_buffer().
785 				 */
786 				if (prev_mapping && prev_mapping != mapping)
787 					blk_run_address_space(prev_mapping);
788 				prev_mapping = mapping;
789 
790 				brelse(bh);
791 				spin_lock(lock);
792 			}
793 		}
794 	}
795 
796 	while (!list_empty(&tmp)) {
797 		bh = BH_ENTRY(tmp.prev);
798 		get_bh(bh);
799 		mapping = bh->b_assoc_map;
800 		__remove_assoc_queue(bh);
801 		/* Avoid race with mark_buffer_dirty_inode() which does
802 		 * a lockless check and we rely on seeing the dirty bit */
803 		smp_mb();
804 		if (buffer_dirty(bh)) {
805 			list_add(&bh->b_assoc_buffers,
806 				 &mapping->private_list);
807 			bh->b_assoc_map = mapping;
808 		}
809 		spin_unlock(lock);
810 		wait_on_buffer(bh);
811 		if (!buffer_uptodate(bh))
812 			err = -EIO;
813 		brelse(bh);
814 		spin_lock(lock);
815 	}
816 
817 	spin_unlock(lock);
818 	err2 = osync_buffers_list(lock, list);
819 	if (err)
820 		return err;
821 	else
822 		return err2;
823 }
824 
825 /*
826  * Invalidate any and all dirty buffers on a given inode.  We are
827  * probably unmounting the fs, but that doesn't mean we have already
828  * done a sync().  Just drop the buffers from the inode list.
829  *
830  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
831  * assumes that all the buffers are against the blockdev.  Not true
832  * for reiserfs.
833  */
834 void invalidate_inode_buffers(struct inode *inode)
835 {
836 	if (inode_has_buffers(inode)) {
837 		struct address_space *mapping = &inode->i_data;
838 		struct list_head *list = &mapping->private_list;
839 		struct address_space *buffer_mapping = mapping->assoc_mapping;
840 
841 		spin_lock(&buffer_mapping->private_lock);
842 		while (!list_empty(list))
843 			__remove_assoc_queue(BH_ENTRY(list->next));
844 		spin_unlock(&buffer_mapping->private_lock);
845 	}
846 }
847 EXPORT_SYMBOL(invalidate_inode_buffers);
848 
849 /*
850  * Remove any clean buffers from the inode's buffer list.  This is called
851  * when we're trying to free the inode itself.  Those buffers can pin it.
852  *
853  * Returns true if all buffers were removed.
854  */
855 int remove_inode_buffers(struct inode *inode)
856 {
857 	int ret = 1;
858 
859 	if (inode_has_buffers(inode)) {
860 		struct address_space *mapping = &inode->i_data;
861 		struct list_head *list = &mapping->private_list;
862 		struct address_space *buffer_mapping = mapping->assoc_mapping;
863 
864 		spin_lock(&buffer_mapping->private_lock);
865 		while (!list_empty(list)) {
866 			struct buffer_head *bh = BH_ENTRY(list->next);
867 			if (buffer_dirty(bh)) {
868 				ret = 0;
869 				break;
870 			}
871 			__remove_assoc_queue(bh);
872 		}
873 		spin_unlock(&buffer_mapping->private_lock);
874 	}
875 	return ret;
876 }
877 
878 /*
879  * Create the appropriate buffers when given a page for data area and
880  * the size of each buffer.. Use the bh->b_this_page linked list to
881  * follow the buffers created.  Return NULL if unable to create more
882  * buffers.
883  *
884  * The retry flag is used to differentiate async IO (paging, swapping)
885  * which may not fail from ordinary buffer allocations.
886  */
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
888 		int retry)
889 {
890 	struct buffer_head *bh, *head;
891 	long offset;
892 
893 try_again:
894 	head = NULL;
895 	offset = PAGE_SIZE;
896 	while ((offset -= size) >= 0) {
897 		bh = alloc_buffer_head(GFP_NOFS);
898 		if (!bh)
899 			goto no_grow;
900 
901 		bh->b_bdev = NULL;
902 		bh->b_this_page = head;
903 		bh->b_blocknr = -1;
904 		head = bh;
905 
906 		bh->b_state = 0;
907 		atomic_set(&bh->b_count, 0);
908 		bh->b_private = NULL;
909 		bh->b_size = size;
910 
911 		/* Link the buffer to its page */
912 		set_bh_page(bh, page, offset);
913 
914 		init_buffer(bh, NULL, NULL);
915 	}
916 	return head;
917 /*
918  * In case anything failed, we just free everything we got.
919  */
920 no_grow:
921 	if (head) {
922 		do {
923 			bh = head;
924 			head = head->b_this_page;
925 			free_buffer_head(bh);
926 		} while (head);
927 	}
928 
929 	/*
930 	 * Return failure for non-async IO requests.  Async IO requests
931 	 * are not allowed to fail, so we have to wait until buffer heads
932 	 * become available.  But we don't want tasks sleeping with
933 	 * partially complete buffers, so all were released above.
934 	 */
935 	if (!retry)
936 		return NULL;
937 
938 	/* We're _really_ low on memory. Now we just
939 	 * wait for old buffer heads to become free due to
940 	 * finishing IO.  Since this is an async request and
941 	 * the reserve list is empty, we're sure there are
942 	 * async buffer heads in use.
943 	 */
944 	free_more_memory();
945 	goto try_again;
946 }
947 EXPORT_SYMBOL_GPL(alloc_page_buffers);
948 
949 static inline void
950 link_dev_buffers(struct page *page, struct buffer_head *head)
951 {
952 	struct buffer_head *bh, *tail;
953 
954 	bh = head;
955 	do {
956 		tail = bh;
957 		bh = bh->b_this_page;
958 	} while (bh);
959 	tail->b_this_page = head;
960 	attach_page_buffers(page, head);
961 }
962 
963 /*
964  * Initialise the state of a blockdev page's buffers.
965  */
966 static void
967 init_page_buffers(struct page *page, struct block_device *bdev,
968 			sector_t block, int size)
969 {
970 	struct buffer_head *head = page_buffers(page);
971 	struct buffer_head *bh = head;
972 	int uptodate = PageUptodate(page);
973 
974 	do {
975 		if (!buffer_mapped(bh)) {
976 			init_buffer(bh, NULL, NULL);
977 			bh->b_bdev = bdev;
978 			bh->b_blocknr = block;
979 			if (uptodate)
980 				set_buffer_uptodate(bh);
981 			set_buffer_mapped(bh);
982 		}
983 		block++;
984 		bh = bh->b_this_page;
985 	} while (bh != head);
986 }
987 
988 /*
989  * Create the page-cache page that contains the requested block.
990  *
991  * This is user purely for blockdev mappings.
992  */
993 static struct page *
994 grow_dev_page(struct block_device *bdev, sector_t block,
995 		pgoff_t index, int size)
996 {
997 	struct inode *inode = bdev->bd_inode;
998 	struct page *page;
999 	struct buffer_head *bh;
1000 
1001 	page = find_or_create_page(inode->i_mapping, index,
1002 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1003 	if (!page)
1004 		return NULL;
1005 
1006 	BUG_ON(!PageLocked(page));
1007 
1008 	if (page_has_buffers(page)) {
1009 		bh = page_buffers(page);
1010 		if (bh->b_size == size) {
1011 			init_page_buffers(page, bdev, block, size);
1012 			return page;
1013 		}
1014 		if (!try_to_free_buffers(page))
1015 			goto failed;
1016 	}
1017 
1018 	/*
1019 	 * Allocate some buffers for this page
1020 	 */
1021 	bh = alloc_page_buffers(page, size, 0);
1022 	if (!bh)
1023 		goto failed;
1024 
1025 	/*
1026 	 * Link the page to the buffers and initialise them.  Take the
1027 	 * lock to be atomic wrt __find_get_block(), which does not
1028 	 * run under the page lock.
1029 	 */
1030 	spin_lock(&inode->i_mapping->private_lock);
1031 	link_dev_buffers(page, bh);
1032 	init_page_buffers(page, bdev, block, size);
1033 	spin_unlock(&inode->i_mapping->private_lock);
1034 	return page;
1035 
1036 failed:
1037 	BUG();
1038 	unlock_page(page);
1039 	page_cache_release(page);
1040 	return NULL;
1041 }
1042 
1043 /*
1044  * Create buffers for the specified block device block's page.  If
1045  * that page was dirty, the buffers are set dirty also.
1046  */
1047 static int
1048 grow_buffers(struct block_device *bdev, sector_t block, int size)
1049 {
1050 	struct page *page;
1051 	pgoff_t index;
1052 	int sizebits;
1053 
1054 	sizebits = -1;
1055 	do {
1056 		sizebits++;
1057 	} while ((size << sizebits) < PAGE_SIZE);
1058 
1059 	index = block >> sizebits;
1060 
1061 	/*
1062 	 * Check for a block which wants to lie outside our maximum possible
1063 	 * pagecache index.  (this comparison is done using sector_t types).
1064 	 */
1065 	if (unlikely(index != block >> sizebits)) {
1066 		char b[BDEVNAME_SIZE];
1067 
1068 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1069 			"device %s\n",
1070 			__func__, (unsigned long long)block,
1071 			bdevname(bdev, b));
1072 		return -EIO;
1073 	}
1074 	block = index << sizebits;
1075 	/* Create a page with the proper size buffers.. */
1076 	page = grow_dev_page(bdev, block, index, size);
1077 	if (!page)
1078 		return 0;
1079 	unlock_page(page);
1080 	page_cache_release(page);
1081 	return 1;
1082 }
1083 
1084 static struct buffer_head *
1085 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1086 {
1087 	/* Size must be multiple of hard sectorsize */
1088 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1089 			(size < 512 || size > PAGE_SIZE))) {
1090 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 					size);
1092 		printk(KERN_ERR "logical block size: %d\n",
1093 					bdev_logical_block_size(bdev));
1094 
1095 		dump_stack();
1096 		return NULL;
1097 	}
1098 
1099 	for (;;) {
1100 		struct buffer_head * bh;
1101 		int ret;
1102 
1103 		bh = __find_get_block(bdev, block, size);
1104 		if (bh)
1105 			return bh;
1106 
1107 		ret = grow_buffers(bdev, block, size);
1108 		if (ret < 0)
1109 			return NULL;
1110 		if (ret == 0)
1111 			free_more_memory();
1112 	}
1113 }
1114 
1115 /*
1116  * The relationship between dirty buffers and dirty pages:
1117  *
1118  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1119  * the page is tagged dirty in its radix tree.
1120  *
1121  * At all times, the dirtiness of the buffers represents the dirtiness of
1122  * subsections of the page.  If the page has buffers, the page dirty bit is
1123  * merely a hint about the true dirty state.
1124  *
1125  * When a page is set dirty in its entirety, all its buffers are marked dirty
1126  * (if the page has buffers).
1127  *
1128  * When a buffer is marked dirty, its page is dirtied, but the page's other
1129  * buffers are not.
1130  *
1131  * Also.  When blockdev buffers are explicitly read with bread(), they
1132  * individually become uptodate.  But their backing page remains not
1133  * uptodate - even if all of its buffers are uptodate.  A subsequent
1134  * block_read_full_page() against that page will discover all the uptodate
1135  * buffers, will set the page uptodate and will perform no I/O.
1136  */
1137 
1138 /**
1139  * mark_buffer_dirty - mark a buffer_head as needing writeout
1140  * @bh: the buffer_head to mark dirty
1141  *
1142  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1143  * backing page dirty, then tag the page as dirty in its address_space's radix
1144  * tree and then attach the address_space's inode to its superblock's dirty
1145  * inode list.
1146  *
1147  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1148  * mapping->tree_lock and the global inode_lock.
1149  */
1150 void mark_buffer_dirty(struct buffer_head *bh)
1151 {
1152 	WARN_ON_ONCE(!buffer_uptodate(bh));
1153 
1154 	/*
1155 	 * Very *carefully* optimize the it-is-already-dirty case.
1156 	 *
1157 	 * Don't let the final "is it dirty" escape to before we
1158 	 * perhaps modified the buffer.
1159 	 */
1160 	if (buffer_dirty(bh)) {
1161 		smp_mb();
1162 		if (buffer_dirty(bh))
1163 			return;
1164 	}
1165 
1166 	if (!test_set_buffer_dirty(bh)) {
1167 		struct page *page = bh->b_page;
1168 		if (!TestSetPageDirty(page))
1169 			__set_page_dirty(page, page_mapping(page), 0);
1170 	}
1171 }
1172 
1173 /*
1174  * Decrement a buffer_head's reference count.  If all buffers against a page
1175  * have zero reference count, are clean and unlocked, and if the page is clean
1176  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1177  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1178  * a page but it ends up not being freed, and buffers may later be reattached).
1179  */
1180 void __brelse(struct buffer_head * buf)
1181 {
1182 	if (atomic_read(&buf->b_count)) {
1183 		put_bh(buf);
1184 		return;
1185 	}
1186 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1187 }
1188 
1189 /*
1190  * bforget() is like brelse(), except it discards any
1191  * potentially dirty data.
1192  */
1193 void __bforget(struct buffer_head *bh)
1194 {
1195 	clear_buffer_dirty(bh);
1196 	if (bh->b_assoc_map) {
1197 		struct address_space *buffer_mapping = bh->b_page->mapping;
1198 
1199 		spin_lock(&buffer_mapping->private_lock);
1200 		list_del_init(&bh->b_assoc_buffers);
1201 		bh->b_assoc_map = NULL;
1202 		spin_unlock(&buffer_mapping->private_lock);
1203 	}
1204 	__brelse(bh);
1205 }
1206 
1207 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208 {
1209 	lock_buffer(bh);
1210 	if (buffer_uptodate(bh)) {
1211 		unlock_buffer(bh);
1212 		return bh;
1213 	} else {
1214 		get_bh(bh);
1215 		bh->b_end_io = end_buffer_read_sync;
1216 		submit_bh(READ, bh);
1217 		wait_on_buffer(bh);
1218 		if (buffer_uptodate(bh))
1219 			return bh;
1220 	}
1221 	brelse(bh);
1222 	return NULL;
1223 }
1224 
1225 /*
1226  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1227  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1228  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1229  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1230  * CPU's LRUs at the same time.
1231  *
1232  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233  * sb_find_get_block().
1234  *
1235  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1236  * a local interrupt disable for that.
1237  */
1238 
1239 #define BH_LRU_SIZE	8
1240 
1241 struct bh_lru {
1242 	struct buffer_head *bhs[BH_LRU_SIZE];
1243 };
1244 
1245 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1246 
1247 #ifdef CONFIG_SMP
1248 #define bh_lru_lock()	local_irq_disable()
1249 #define bh_lru_unlock()	local_irq_enable()
1250 #else
1251 #define bh_lru_lock()	preempt_disable()
1252 #define bh_lru_unlock()	preempt_enable()
1253 #endif
1254 
1255 static inline void check_irqs_on(void)
1256 {
1257 #ifdef irqs_disabled
1258 	BUG_ON(irqs_disabled());
1259 #endif
1260 }
1261 
1262 /*
1263  * The LRU management algorithm is dopey-but-simple.  Sorry.
1264  */
1265 static void bh_lru_install(struct buffer_head *bh)
1266 {
1267 	struct buffer_head *evictee = NULL;
1268 	struct bh_lru *lru;
1269 
1270 	check_irqs_on();
1271 	bh_lru_lock();
1272 	lru = &__get_cpu_var(bh_lrus);
1273 	if (lru->bhs[0] != bh) {
1274 		struct buffer_head *bhs[BH_LRU_SIZE];
1275 		int in;
1276 		int out = 0;
1277 
1278 		get_bh(bh);
1279 		bhs[out++] = bh;
1280 		for (in = 0; in < BH_LRU_SIZE; in++) {
1281 			struct buffer_head *bh2 = lru->bhs[in];
1282 
1283 			if (bh2 == bh) {
1284 				__brelse(bh2);
1285 			} else {
1286 				if (out >= BH_LRU_SIZE) {
1287 					BUG_ON(evictee != NULL);
1288 					evictee = bh2;
1289 				} else {
1290 					bhs[out++] = bh2;
1291 				}
1292 			}
1293 		}
1294 		while (out < BH_LRU_SIZE)
1295 			bhs[out++] = NULL;
1296 		memcpy(lru->bhs, bhs, sizeof(bhs));
1297 	}
1298 	bh_lru_unlock();
1299 
1300 	if (evictee)
1301 		__brelse(evictee);
1302 }
1303 
1304 /*
1305  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1306  */
1307 static struct buffer_head *
1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1309 {
1310 	struct buffer_head *ret = NULL;
1311 	struct bh_lru *lru;
1312 	unsigned int i;
1313 
1314 	check_irqs_on();
1315 	bh_lru_lock();
1316 	lru = &__get_cpu_var(bh_lrus);
1317 	for (i = 0; i < BH_LRU_SIZE; i++) {
1318 		struct buffer_head *bh = lru->bhs[i];
1319 
1320 		if (bh && bh->b_bdev == bdev &&
1321 				bh->b_blocknr == block && bh->b_size == size) {
1322 			if (i) {
1323 				while (i) {
1324 					lru->bhs[i] = lru->bhs[i - 1];
1325 					i--;
1326 				}
1327 				lru->bhs[0] = bh;
1328 			}
1329 			get_bh(bh);
1330 			ret = bh;
1331 			break;
1332 		}
1333 	}
1334 	bh_lru_unlock();
1335 	return ret;
1336 }
1337 
1338 /*
1339  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1340  * it in the LRU and mark it as accessed.  If it is not present then return
1341  * NULL
1342  */
1343 struct buffer_head *
1344 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1345 {
1346 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1347 
1348 	if (bh == NULL) {
1349 		bh = __find_get_block_slow(bdev, block);
1350 		if (bh)
1351 			bh_lru_install(bh);
1352 	}
1353 	if (bh)
1354 		touch_buffer(bh);
1355 	return bh;
1356 }
1357 EXPORT_SYMBOL(__find_get_block);
1358 
1359 /*
1360  * __getblk will locate (and, if necessary, create) the buffer_head
1361  * which corresponds to the passed block_device, block and size. The
1362  * returned buffer has its reference count incremented.
1363  *
1364  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1365  * illegal block number, __getblk() will happily return a buffer_head
1366  * which represents the non-existent block.  Very weird.
1367  *
1368  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369  * attempt is failing.  FIXME, perhaps?
1370  */
1371 struct buffer_head *
1372 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1373 {
1374 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1375 
1376 	might_sleep();
1377 	if (bh == NULL)
1378 		bh = __getblk_slow(bdev, block, size);
1379 	return bh;
1380 }
1381 EXPORT_SYMBOL(__getblk);
1382 
1383 /*
1384  * Do async read-ahead on a buffer..
1385  */
1386 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1387 {
1388 	struct buffer_head *bh = __getblk(bdev, block, size);
1389 	if (likely(bh)) {
1390 		ll_rw_block(READA, 1, &bh);
1391 		brelse(bh);
1392 	}
1393 }
1394 EXPORT_SYMBOL(__breadahead);
1395 
1396 /**
1397  *  __bread() - reads a specified block and returns the bh
1398  *  @bdev: the block_device to read from
1399  *  @block: number of block
1400  *  @size: size (in bytes) to read
1401  *
1402  *  Reads a specified block, and returns buffer head that contains it.
1403  *  It returns NULL if the block was unreadable.
1404  */
1405 struct buffer_head *
1406 __bread(struct block_device *bdev, sector_t block, unsigned size)
1407 {
1408 	struct buffer_head *bh = __getblk(bdev, block, size);
1409 
1410 	if (likely(bh) && !buffer_uptodate(bh))
1411 		bh = __bread_slow(bh);
1412 	return bh;
1413 }
1414 EXPORT_SYMBOL(__bread);
1415 
1416 /*
1417  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418  * This doesn't race because it runs in each cpu either in irq
1419  * or with preempt disabled.
1420  */
1421 static void invalidate_bh_lru(void *arg)
1422 {
1423 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 	int i;
1425 
1426 	for (i = 0; i < BH_LRU_SIZE; i++) {
1427 		brelse(b->bhs[i]);
1428 		b->bhs[i] = NULL;
1429 	}
1430 	put_cpu_var(bh_lrus);
1431 }
1432 
1433 void invalidate_bh_lrus(void)
1434 {
1435 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1436 }
1437 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1438 
1439 void set_bh_page(struct buffer_head *bh,
1440 		struct page *page, unsigned long offset)
1441 {
1442 	bh->b_page = page;
1443 	BUG_ON(offset >= PAGE_SIZE);
1444 	if (PageHighMem(page))
1445 		/*
1446 		 * This catches illegal uses and preserves the offset:
1447 		 */
1448 		bh->b_data = (char *)(0 + offset);
1449 	else
1450 		bh->b_data = page_address(page) + offset;
1451 }
1452 EXPORT_SYMBOL(set_bh_page);
1453 
1454 /*
1455  * Called when truncating a buffer on a page completely.
1456  */
1457 static void discard_buffer(struct buffer_head * bh)
1458 {
1459 	lock_buffer(bh);
1460 	clear_buffer_dirty(bh);
1461 	bh->b_bdev = NULL;
1462 	clear_buffer_mapped(bh);
1463 	clear_buffer_req(bh);
1464 	clear_buffer_new(bh);
1465 	clear_buffer_delay(bh);
1466 	clear_buffer_unwritten(bh);
1467 	unlock_buffer(bh);
1468 }
1469 
1470 /**
1471  * block_invalidatepage - invalidate part of all of a buffer-backed page
1472  *
1473  * @page: the page which is affected
1474  * @offset: the index of the truncation point
1475  *
1476  * block_invalidatepage() is called when all or part of the page has become
1477  * invalidatedby a truncate operation.
1478  *
1479  * block_invalidatepage() does not have to release all buffers, but it must
1480  * ensure that no dirty buffer is left outside @offset and that no I/O
1481  * is underway against any of the blocks which are outside the truncation
1482  * point.  Because the caller is about to free (and possibly reuse) those
1483  * blocks on-disk.
1484  */
1485 void block_invalidatepage(struct page *page, unsigned long offset)
1486 {
1487 	struct buffer_head *head, *bh, *next;
1488 	unsigned int curr_off = 0;
1489 
1490 	BUG_ON(!PageLocked(page));
1491 	if (!page_has_buffers(page))
1492 		goto out;
1493 
1494 	head = page_buffers(page);
1495 	bh = head;
1496 	do {
1497 		unsigned int next_off = curr_off + bh->b_size;
1498 		next = bh->b_this_page;
1499 
1500 		/*
1501 		 * is this block fully invalidated?
1502 		 */
1503 		if (offset <= curr_off)
1504 			discard_buffer(bh);
1505 		curr_off = next_off;
1506 		bh = next;
1507 	} while (bh != head);
1508 
1509 	/*
1510 	 * We release buffers only if the entire page is being invalidated.
1511 	 * The get_block cached value has been unconditionally invalidated,
1512 	 * so real IO is not possible anymore.
1513 	 */
1514 	if (offset == 0)
1515 		try_to_release_page(page, 0);
1516 out:
1517 	return;
1518 }
1519 EXPORT_SYMBOL(block_invalidatepage);
1520 
1521 /*
1522  * We attach and possibly dirty the buffers atomically wrt
1523  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1524  * is already excluded via the page lock.
1525  */
1526 void create_empty_buffers(struct page *page,
1527 			unsigned long blocksize, unsigned long b_state)
1528 {
1529 	struct buffer_head *bh, *head, *tail;
1530 
1531 	head = alloc_page_buffers(page, blocksize, 1);
1532 	bh = head;
1533 	do {
1534 		bh->b_state |= b_state;
1535 		tail = bh;
1536 		bh = bh->b_this_page;
1537 	} while (bh);
1538 	tail->b_this_page = head;
1539 
1540 	spin_lock(&page->mapping->private_lock);
1541 	if (PageUptodate(page) || PageDirty(page)) {
1542 		bh = head;
1543 		do {
1544 			if (PageDirty(page))
1545 				set_buffer_dirty(bh);
1546 			if (PageUptodate(page))
1547 				set_buffer_uptodate(bh);
1548 			bh = bh->b_this_page;
1549 		} while (bh != head);
1550 	}
1551 	attach_page_buffers(page, head);
1552 	spin_unlock(&page->mapping->private_lock);
1553 }
1554 EXPORT_SYMBOL(create_empty_buffers);
1555 
1556 /*
1557  * We are taking a block for data and we don't want any output from any
1558  * buffer-cache aliases starting from return from that function and
1559  * until the moment when something will explicitly mark the buffer
1560  * dirty (hopefully that will not happen until we will free that block ;-)
1561  * We don't even need to mark it not-uptodate - nobody can expect
1562  * anything from a newly allocated buffer anyway. We used to used
1563  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1564  * don't want to mark the alias unmapped, for example - it would confuse
1565  * anyone who might pick it with bread() afterwards...
1566  *
1567  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1568  * be writeout I/O going on against recently-freed buffers.  We don't
1569  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1570  * only if we really need to.  That happens here.
1571  */
1572 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1573 {
1574 	struct buffer_head *old_bh;
1575 
1576 	might_sleep();
1577 
1578 	old_bh = __find_get_block_slow(bdev, block);
1579 	if (old_bh) {
1580 		clear_buffer_dirty(old_bh);
1581 		wait_on_buffer(old_bh);
1582 		clear_buffer_req(old_bh);
1583 		__brelse(old_bh);
1584 	}
1585 }
1586 EXPORT_SYMBOL(unmap_underlying_metadata);
1587 
1588 /*
1589  * NOTE! All mapped/uptodate combinations are valid:
1590  *
1591  *	Mapped	Uptodate	Meaning
1592  *
1593  *	No	No		"unknown" - must do get_block()
1594  *	No	Yes		"hole" - zero-filled
1595  *	Yes	No		"allocated" - allocated on disk, not read in
1596  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1597  *
1598  * "Dirty" is valid only with the last case (mapped+uptodate).
1599  */
1600 
1601 /*
1602  * While block_write_full_page is writing back the dirty buffers under
1603  * the page lock, whoever dirtied the buffers may decide to clean them
1604  * again at any time.  We handle that by only looking at the buffer
1605  * state inside lock_buffer().
1606  *
1607  * If block_write_full_page() is called for regular writeback
1608  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1609  * locked buffer.   This only can happen if someone has written the buffer
1610  * directly, with submit_bh().  At the address_space level PageWriteback
1611  * prevents this contention from occurring.
1612  *
1613  * If block_write_full_page() is called with wbc->sync_mode ==
1614  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1615  * causes the writes to be flagged as synchronous writes, but the
1616  * block device queue will NOT be unplugged, since usually many pages
1617  * will be pushed to the out before the higher-level caller actually
1618  * waits for the writes to be completed.  The various wait functions,
1619  * such as wait_on_writeback_range() will ultimately call sync_page()
1620  * which will ultimately call blk_run_backing_dev(), which will end up
1621  * unplugging the device queue.
1622  */
1623 static int __block_write_full_page(struct inode *inode, struct page *page,
1624 			get_block_t *get_block, struct writeback_control *wbc,
1625 			bh_end_io_t *handler)
1626 {
1627 	int err;
1628 	sector_t block;
1629 	sector_t last_block;
1630 	struct buffer_head *bh, *head;
1631 	const unsigned blocksize = 1 << inode->i_blkbits;
1632 	int nr_underway = 0;
1633 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1634 			WRITE_SYNC_PLUG : WRITE);
1635 
1636 	BUG_ON(!PageLocked(page));
1637 
1638 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1639 
1640 	if (!page_has_buffers(page)) {
1641 		create_empty_buffers(page, blocksize,
1642 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1643 	}
1644 
1645 	/*
1646 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1647 	 * here, and the (potentially unmapped) buffers may become dirty at
1648 	 * any time.  If a buffer becomes dirty here after we've inspected it
1649 	 * then we just miss that fact, and the page stays dirty.
1650 	 *
1651 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1652 	 * handle that here by just cleaning them.
1653 	 */
1654 
1655 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1656 	head = page_buffers(page);
1657 	bh = head;
1658 
1659 	/*
1660 	 * Get all the dirty buffers mapped to disk addresses and
1661 	 * handle any aliases from the underlying blockdev's mapping.
1662 	 */
1663 	do {
1664 		if (block > last_block) {
1665 			/*
1666 			 * mapped buffers outside i_size will occur, because
1667 			 * this page can be outside i_size when there is a
1668 			 * truncate in progress.
1669 			 */
1670 			/*
1671 			 * The buffer was zeroed by block_write_full_page()
1672 			 */
1673 			clear_buffer_dirty(bh);
1674 			set_buffer_uptodate(bh);
1675 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1676 			   buffer_dirty(bh)) {
1677 			WARN_ON(bh->b_size != blocksize);
1678 			err = get_block(inode, block, bh, 1);
1679 			if (err)
1680 				goto recover;
1681 			clear_buffer_delay(bh);
1682 			if (buffer_new(bh)) {
1683 				/* blockdev mappings never come here */
1684 				clear_buffer_new(bh);
1685 				unmap_underlying_metadata(bh->b_bdev,
1686 							bh->b_blocknr);
1687 			}
1688 		}
1689 		bh = bh->b_this_page;
1690 		block++;
1691 	} while (bh != head);
1692 
1693 	do {
1694 		if (!buffer_mapped(bh))
1695 			continue;
1696 		/*
1697 		 * If it's a fully non-blocking write attempt and we cannot
1698 		 * lock the buffer then redirty the page.  Note that this can
1699 		 * potentially cause a busy-wait loop from pdflush and kswapd
1700 		 * activity, but those code paths have their own higher-level
1701 		 * throttling.
1702 		 */
1703 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1704 			lock_buffer(bh);
1705 		} else if (!trylock_buffer(bh)) {
1706 			redirty_page_for_writepage(wbc, page);
1707 			continue;
1708 		}
1709 		if (test_clear_buffer_dirty(bh)) {
1710 			mark_buffer_async_write_endio(bh, handler);
1711 		} else {
1712 			unlock_buffer(bh);
1713 		}
1714 	} while ((bh = bh->b_this_page) != head);
1715 
1716 	/*
1717 	 * The page and its buffers are protected by PageWriteback(), so we can
1718 	 * drop the bh refcounts early.
1719 	 */
1720 	BUG_ON(PageWriteback(page));
1721 	set_page_writeback(page);
1722 
1723 	do {
1724 		struct buffer_head *next = bh->b_this_page;
1725 		if (buffer_async_write(bh)) {
1726 			submit_bh(write_op, bh);
1727 			nr_underway++;
1728 		}
1729 		bh = next;
1730 	} while (bh != head);
1731 	unlock_page(page);
1732 
1733 	err = 0;
1734 done:
1735 	if (nr_underway == 0) {
1736 		/*
1737 		 * The page was marked dirty, but the buffers were
1738 		 * clean.  Someone wrote them back by hand with
1739 		 * ll_rw_block/submit_bh.  A rare case.
1740 		 */
1741 		end_page_writeback(page);
1742 
1743 		/*
1744 		 * The page and buffer_heads can be released at any time from
1745 		 * here on.
1746 		 */
1747 	}
1748 	return err;
1749 
1750 recover:
1751 	/*
1752 	 * ENOSPC, or some other error.  We may already have added some
1753 	 * blocks to the file, so we need to write these out to avoid
1754 	 * exposing stale data.
1755 	 * The page is currently locked and not marked for writeback
1756 	 */
1757 	bh = head;
1758 	/* Recovery: lock and submit the mapped buffers */
1759 	do {
1760 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1761 		    !buffer_delay(bh)) {
1762 			lock_buffer(bh);
1763 			mark_buffer_async_write_endio(bh, handler);
1764 		} else {
1765 			/*
1766 			 * The buffer may have been set dirty during
1767 			 * attachment to a dirty page.
1768 			 */
1769 			clear_buffer_dirty(bh);
1770 		}
1771 	} while ((bh = bh->b_this_page) != head);
1772 	SetPageError(page);
1773 	BUG_ON(PageWriteback(page));
1774 	mapping_set_error(page->mapping, err);
1775 	set_page_writeback(page);
1776 	do {
1777 		struct buffer_head *next = bh->b_this_page;
1778 		if (buffer_async_write(bh)) {
1779 			clear_buffer_dirty(bh);
1780 			submit_bh(write_op, bh);
1781 			nr_underway++;
1782 		}
1783 		bh = next;
1784 	} while (bh != head);
1785 	unlock_page(page);
1786 	goto done;
1787 }
1788 
1789 /*
1790  * If a page has any new buffers, zero them out here, and mark them uptodate
1791  * and dirty so they'll be written out (in order to prevent uninitialised
1792  * block data from leaking). And clear the new bit.
1793  */
1794 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1795 {
1796 	unsigned int block_start, block_end;
1797 	struct buffer_head *head, *bh;
1798 
1799 	BUG_ON(!PageLocked(page));
1800 	if (!page_has_buffers(page))
1801 		return;
1802 
1803 	bh = head = page_buffers(page);
1804 	block_start = 0;
1805 	do {
1806 		block_end = block_start + bh->b_size;
1807 
1808 		if (buffer_new(bh)) {
1809 			if (block_end > from && block_start < to) {
1810 				if (!PageUptodate(page)) {
1811 					unsigned start, size;
1812 
1813 					start = max(from, block_start);
1814 					size = min(to, block_end) - start;
1815 
1816 					zero_user(page, start, size);
1817 					set_buffer_uptodate(bh);
1818 				}
1819 
1820 				clear_buffer_new(bh);
1821 				mark_buffer_dirty(bh);
1822 			}
1823 		}
1824 
1825 		block_start = block_end;
1826 		bh = bh->b_this_page;
1827 	} while (bh != head);
1828 }
1829 EXPORT_SYMBOL(page_zero_new_buffers);
1830 
1831 static int __block_prepare_write(struct inode *inode, struct page *page,
1832 		unsigned from, unsigned to, get_block_t *get_block)
1833 {
1834 	unsigned block_start, block_end;
1835 	sector_t block;
1836 	int err = 0;
1837 	unsigned blocksize, bbits;
1838 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1839 
1840 	BUG_ON(!PageLocked(page));
1841 	BUG_ON(from > PAGE_CACHE_SIZE);
1842 	BUG_ON(to > PAGE_CACHE_SIZE);
1843 	BUG_ON(from > to);
1844 
1845 	blocksize = 1 << inode->i_blkbits;
1846 	if (!page_has_buffers(page))
1847 		create_empty_buffers(page, blocksize, 0);
1848 	head = page_buffers(page);
1849 
1850 	bbits = inode->i_blkbits;
1851 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1852 
1853 	for(bh = head, block_start = 0; bh != head || !block_start;
1854 	    block++, block_start=block_end, bh = bh->b_this_page) {
1855 		block_end = block_start + blocksize;
1856 		if (block_end <= from || block_start >= to) {
1857 			if (PageUptodate(page)) {
1858 				if (!buffer_uptodate(bh))
1859 					set_buffer_uptodate(bh);
1860 			}
1861 			continue;
1862 		}
1863 		if (buffer_new(bh))
1864 			clear_buffer_new(bh);
1865 		if (!buffer_mapped(bh)) {
1866 			WARN_ON(bh->b_size != blocksize);
1867 			err = get_block(inode, block, bh, 1);
1868 			if (err)
1869 				break;
1870 			if (buffer_new(bh)) {
1871 				unmap_underlying_metadata(bh->b_bdev,
1872 							bh->b_blocknr);
1873 				if (PageUptodate(page)) {
1874 					clear_buffer_new(bh);
1875 					set_buffer_uptodate(bh);
1876 					mark_buffer_dirty(bh);
1877 					continue;
1878 				}
1879 				if (block_end > to || block_start < from)
1880 					zero_user_segments(page,
1881 						to, block_end,
1882 						block_start, from);
1883 				continue;
1884 			}
1885 		}
1886 		if (PageUptodate(page)) {
1887 			if (!buffer_uptodate(bh))
1888 				set_buffer_uptodate(bh);
1889 			continue;
1890 		}
1891 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1892 		    !buffer_unwritten(bh) &&
1893 		     (block_start < from || block_end > to)) {
1894 			ll_rw_block(READ, 1, &bh);
1895 			*wait_bh++=bh;
1896 		}
1897 	}
1898 	/*
1899 	 * If we issued read requests - let them complete.
1900 	 */
1901 	while(wait_bh > wait) {
1902 		wait_on_buffer(*--wait_bh);
1903 		if (!buffer_uptodate(*wait_bh))
1904 			err = -EIO;
1905 	}
1906 	if (unlikely(err))
1907 		page_zero_new_buffers(page, from, to);
1908 	return err;
1909 }
1910 
1911 static int __block_commit_write(struct inode *inode, struct page *page,
1912 		unsigned from, unsigned to)
1913 {
1914 	unsigned block_start, block_end;
1915 	int partial = 0;
1916 	unsigned blocksize;
1917 	struct buffer_head *bh, *head;
1918 
1919 	blocksize = 1 << inode->i_blkbits;
1920 
1921 	for(bh = head = page_buffers(page), block_start = 0;
1922 	    bh != head || !block_start;
1923 	    block_start=block_end, bh = bh->b_this_page) {
1924 		block_end = block_start + blocksize;
1925 		if (block_end <= from || block_start >= to) {
1926 			if (!buffer_uptodate(bh))
1927 				partial = 1;
1928 		} else {
1929 			set_buffer_uptodate(bh);
1930 			mark_buffer_dirty(bh);
1931 		}
1932 		clear_buffer_new(bh);
1933 	}
1934 
1935 	/*
1936 	 * If this is a partial write which happened to make all buffers
1937 	 * uptodate then we can optimize away a bogus readpage() for
1938 	 * the next read(). Here we 'discover' whether the page went
1939 	 * uptodate as a result of this (potentially partial) write.
1940 	 */
1941 	if (!partial)
1942 		SetPageUptodate(page);
1943 	return 0;
1944 }
1945 
1946 /*
1947  * block_write_begin takes care of the basic task of block allocation and
1948  * bringing partial write blocks uptodate first.
1949  *
1950  * If *pagep is not NULL, then block_write_begin uses the locked page
1951  * at *pagep rather than allocating its own. In this case, the page will
1952  * not be unlocked or deallocated on failure.
1953  */
1954 int block_write_begin(struct file *file, struct address_space *mapping,
1955 			loff_t pos, unsigned len, unsigned flags,
1956 			struct page **pagep, void **fsdata,
1957 			get_block_t *get_block)
1958 {
1959 	struct inode *inode = mapping->host;
1960 	int status = 0;
1961 	struct page *page;
1962 	pgoff_t index;
1963 	unsigned start, end;
1964 	int ownpage = 0;
1965 
1966 	index = pos >> PAGE_CACHE_SHIFT;
1967 	start = pos & (PAGE_CACHE_SIZE - 1);
1968 	end = start + len;
1969 
1970 	page = *pagep;
1971 	if (page == NULL) {
1972 		ownpage = 1;
1973 		page = grab_cache_page_write_begin(mapping, index, flags);
1974 		if (!page) {
1975 			status = -ENOMEM;
1976 			goto out;
1977 		}
1978 		*pagep = page;
1979 	} else
1980 		BUG_ON(!PageLocked(page));
1981 
1982 	status = __block_prepare_write(inode, page, start, end, get_block);
1983 	if (unlikely(status)) {
1984 		ClearPageUptodate(page);
1985 
1986 		if (ownpage) {
1987 			unlock_page(page);
1988 			page_cache_release(page);
1989 			*pagep = NULL;
1990 
1991 			/*
1992 			 * prepare_write() may have instantiated a few blocks
1993 			 * outside i_size.  Trim these off again. Don't need
1994 			 * i_size_read because we hold i_mutex.
1995 			 */
1996 			if (pos + len > inode->i_size)
1997 				vmtruncate(inode, inode->i_size);
1998 		}
1999 	}
2000 
2001 out:
2002 	return status;
2003 }
2004 EXPORT_SYMBOL(block_write_begin);
2005 
2006 int block_write_end(struct file *file, struct address_space *mapping,
2007 			loff_t pos, unsigned len, unsigned copied,
2008 			struct page *page, void *fsdata)
2009 {
2010 	struct inode *inode = mapping->host;
2011 	unsigned start;
2012 
2013 	start = pos & (PAGE_CACHE_SIZE - 1);
2014 
2015 	if (unlikely(copied < len)) {
2016 		/*
2017 		 * The buffers that were written will now be uptodate, so we
2018 		 * don't have to worry about a readpage reading them and
2019 		 * overwriting a partial write. However if we have encountered
2020 		 * a short write and only partially written into a buffer, it
2021 		 * will not be marked uptodate, so a readpage might come in and
2022 		 * destroy our partial write.
2023 		 *
2024 		 * Do the simplest thing, and just treat any short write to a
2025 		 * non uptodate page as a zero-length write, and force the
2026 		 * caller to redo the whole thing.
2027 		 */
2028 		if (!PageUptodate(page))
2029 			copied = 0;
2030 
2031 		page_zero_new_buffers(page, start+copied, start+len);
2032 	}
2033 	flush_dcache_page(page);
2034 
2035 	/* This could be a short (even 0-length) commit */
2036 	__block_commit_write(inode, page, start, start+copied);
2037 
2038 	return copied;
2039 }
2040 EXPORT_SYMBOL(block_write_end);
2041 
2042 int generic_write_end(struct file *file, struct address_space *mapping,
2043 			loff_t pos, unsigned len, unsigned copied,
2044 			struct page *page, void *fsdata)
2045 {
2046 	struct inode *inode = mapping->host;
2047 	int i_size_changed = 0;
2048 
2049 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2050 
2051 	/*
2052 	 * No need to use i_size_read() here, the i_size
2053 	 * cannot change under us because we hold i_mutex.
2054 	 *
2055 	 * But it's important to update i_size while still holding page lock:
2056 	 * page writeout could otherwise come in and zero beyond i_size.
2057 	 */
2058 	if (pos+copied > inode->i_size) {
2059 		i_size_write(inode, pos+copied);
2060 		i_size_changed = 1;
2061 	}
2062 
2063 	unlock_page(page);
2064 	page_cache_release(page);
2065 
2066 	/*
2067 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2068 	 * makes the holding time of page lock longer. Second, it forces lock
2069 	 * ordering of page lock and transaction start for journaling
2070 	 * filesystems.
2071 	 */
2072 	if (i_size_changed)
2073 		mark_inode_dirty(inode);
2074 
2075 	return copied;
2076 }
2077 EXPORT_SYMBOL(generic_write_end);
2078 
2079 /*
2080  * block_is_partially_uptodate checks whether buffers within a page are
2081  * uptodate or not.
2082  *
2083  * Returns true if all buffers which correspond to a file portion
2084  * we want to read are uptodate.
2085  */
2086 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2087 					unsigned long from)
2088 {
2089 	struct inode *inode = page->mapping->host;
2090 	unsigned block_start, block_end, blocksize;
2091 	unsigned to;
2092 	struct buffer_head *bh, *head;
2093 	int ret = 1;
2094 
2095 	if (!page_has_buffers(page))
2096 		return 0;
2097 
2098 	blocksize = 1 << inode->i_blkbits;
2099 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2100 	to = from + to;
2101 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2102 		return 0;
2103 
2104 	head = page_buffers(page);
2105 	bh = head;
2106 	block_start = 0;
2107 	do {
2108 		block_end = block_start + blocksize;
2109 		if (block_end > from && block_start < to) {
2110 			if (!buffer_uptodate(bh)) {
2111 				ret = 0;
2112 				break;
2113 			}
2114 			if (block_end >= to)
2115 				break;
2116 		}
2117 		block_start = block_end;
2118 		bh = bh->b_this_page;
2119 	} while (bh != head);
2120 
2121 	return ret;
2122 }
2123 EXPORT_SYMBOL(block_is_partially_uptodate);
2124 
2125 /*
2126  * Generic "read page" function for block devices that have the normal
2127  * get_block functionality. This is most of the block device filesystems.
2128  * Reads the page asynchronously --- the unlock_buffer() and
2129  * set/clear_buffer_uptodate() functions propagate buffer state into the
2130  * page struct once IO has completed.
2131  */
2132 int block_read_full_page(struct page *page, get_block_t *get_block)
2133 {
2134 	struct inode *inode = page->mapping->host;
2135 	sector_t iblock, lblock;
2136 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2137 	unsigned int blocksize;
2138 	int nr, i;
2139 	int fully_mapped = 1;
2140 
2141 	BUG_ON(!PageLocked(page));
2142 	blocksize = 1 << inode->i_blkbits;
2143 	if (!page_has_buffers(page))
2144 		create_empty_buffers(page, blocksize, 0);
2145 	head = page_buffers(page);
2146 
2147 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2148 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2149 	bh = head;
2150 	nr = 0;
2151 	i = 0;
2152 
2153 	do {
2154 		if (buffer_uptodate(bh))
2155 			continue;
2156 
2157 		if (!buffer_mapped(bh)) {
2158 			int err = 0;
2159 
2160 			fully_mapped = 0;
2161 			if (iblock < lblock) {
2162 				WARN_ON(bh->b_size != blocksize);
2163 				err = get_block(inode, iblock, bh, 0);
2164 				if (err)
2165 					SetPageError(page);
2166 			}
2167 			if (!buffer_mapped(bh)) {
2168 				zero_user(page, i * blocksize, blocksize);
2169 				if (!err)
2170 					set_buffer_uptodate(bh);
2171 				continue;
2172 			}
2173 			/*
2174 			 * get_block() might have updated the buffer
2175 			 * synchronously
2176 			 */
2177 			if (buffer_uptodate(bh))
2178 				continue;
2179 		}
2180 		arr[nr++] = bh;
2181 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2182 
2183 	if (fully_mapped)
2184 		SetPageMappedToDisk(page);
2185 
2186 	if (!nr) {
2187 		/*
2188 		 * All buffers are uptodate - we can set the page uptodate
2189 		 * as well. But not if get_block() returned an error.
2190 		 */
2191 		if (!PageError(page))
2192 			SetPageUptodate(page);
2193 		unlock_page(page);
2194 		return 0;
2195 	}
2196 
2197 	/* Stage two: lock the buffers */
2198 	for (i = 0; i < nr; i++) {
2199 		bh = arr[i];
2200 		lock_buffer(bh);
2201 		mark_buffer_async_read(bh);
2202 	}
2203 
2204 	/*
2205 	 * Stage 3: start the IO.  Check for uptodateness
2206 	 * inside the buffer lock in case another process reading
2207 	 * the underlying blockdev brought it uptodate (the sct fix).
2208 	 */
2209 	for (i = 0; i < nr; i++) {
2210 		bh = arr[i];
2211 		if (buffer_uptodate(bh))
2212 			end_buffer_async_read(bh, 1);
2213 		else
2214 			submit_bh(READ, bh);
2215 	}
2216 	return 0;
2217 }
2218 
2219 /* utility function for filesystems that need to do work on expanding
2220  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2221  * deal with the hole.
2222  */
2223 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2224 {
2225 	struct address_space *mapping = inode->i_mapping;
2226 	struct page *page;
2227 	void *fsdata;
2228 	unsigned long limit;
2229 	int err;
2230 
2231 	err = -EFBIG;
2232         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2233 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2234 		send_sig(SIGXFSZ, current, 0);
2235 		goto out;
2236 	}
2237 	if (size > inode->i_sb->s_maxbytes)
2238 		goto out;
2239 
2240 	err = pagecache_write_begin(NULL, mapping, size, 0,
2241 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2242 				&page, &fsdata);
2243 	if (err)
2244 		goto out;
2245 
2246 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2247 	BUG_ON(err > 0);
2248 
2249 out:
2250 	return err;
2251 }
2252 
2253 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2254 			    loff_t pos, loff_t *bytes)
2255 {
2256 	struct inode *inode = mapping->host;
2257 	unsigned blocksize = 1 << inode->i_blkbits;
2258 	struct page *page;
2259 	void *fsdata;
2260 	pgoff_t index, curidx;
2261 	loff_t curpos;
2262 	unsigned zerofrom, offset, len;
2263 	int err = 0;
2264 
2265 	index = pos >> PAGE_CACHE_SHIFT;
2266 	offset = pos & ~PAGE_CACHE_MASK;
2267 
2268 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2269 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2270 		if (zerofrom & (blocksize-1)) {
2271 			*bytes |= (blocksize-1);
2272 			(*bytes)++;
2273 		}
2274 		len = PAGE_CACHE_SIZE - zerofrom;
2275 
2276 		err = pagecache_write_begin(file, mapping, curpos, len,
2277 						AOP_FLAG_UNINTERRUPTIBLE,
2278 						&page, &fsdata);
2279 		if (err)
2280 			goto out;
2281 		zero_user(page, zerofrom, len);
2282 		err = pagecache_write_end(file, mapping, curpos, len, len,
2283 						page, fsdata);
2284 		if (err < 0)
2285 			goto out;
2286 		BUG_ON(err != len);
2287 		err = 0;
2288 
2289 		balance_dirty_pages_ratelimited(mapping);
2290 	}
2291 
2292 	/* page covers the boundary, find the boundary offset */
2293 	if (index == curidx) {
2294 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2295 		/* if we will expand the thing last block will be filled */
2296 		if (offset <= zerofrom) {
2297 			goto out;
2298 		}
2299 		if (zerofrom & (blocksize-1)) {
2300 			*bytes |= (blocksize-1);
2301 			(*bytes)++;
2302 		}
2303 		len = offset - zerofrom;
2304 
2305 		err = pagecache_write_begin(file, mapping, curpos, len,
2306 						AOP_FLAG_UNINTERRUPTIBLE,
2307 						&page, &fsdata);
2308 		if (err)
2309 			goto out;
2310 		zero_user(page, zerofrom, len);
2311 		err = pagecache_write_end(file, mapping, curpos, len, len,
2312 						page, fsdata);
2313 		if (err < 0)
2314 			goto out;
2315 		BUG_ON(err != len);
2316 		err = 0;
2317 	}
2318 out:
2319 	return err;
2320 }
2321 
2322 /*
2323  * For moronic filesystems that do not allow holes in file.
2324  * We may have to extend the file.
2325  */
2326 int cont_write_begin(struct file *file, struct address_space *mapping,
2327 			loff_t pos, unsigned len, unsigned flags,
2328 			struct page **pagep, void **fsdata,
2329 			get_block_t *get_block, loff_t *bytes)
2330 {
2331 	struct inode *inode = mapping->host;
2332 	unsigned blocksize = 1 << inode->i_blkbits;
2333 	unsigned zerofrom;
2334 	int err;
2335 
2336 	err = cont_expand_zero(file, mapping, pos, bytes);
2337 	if (err)
2338 		goto out;
2339 
2340 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2341 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2342 		*bytes |= (blocksize-1);
2343 		(*bytes)++;
2344 	}
2345 
2346 	*pagep = NULL;
2347 	err = block_write_begin(file, mapping, pos, len,
2348 				flags, pagep, fsdata, get_block);
2349 out:
2350 	return err;
2351 }
2352 
2353 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2354 			get_block_t *get_block)
2355 {
2356 	struct inode *inode = page->mapping->host;
2357 	int err = __block_prepare_write(inode, page, from, to, get_block);
2358 	if (err)
2359 		ClearPageUptodate(page);
2360 	return err;
2361 }
2362 
2363 int block_commit_write(struct page *page, unsigned from, unsigned to)
2364 {
2365 	struct inode *inode = page->mapping->host;
2366 	__block_commit_write(inode,page,from,to);
2367 	return 0;
2368 }
2369 
2370 /*
2371  * block_page_mkwrite() is not allowed to change the file size as it gets
2372  * called from a page fault handler when a page is first dirtied. Hence we must
2373  * be careful to check for EOF conditions here. We set the page up correctly
2374  * for a written page which means we get ENOSPC checking when writing into
2375  * holes and correct delalloc and unwritten extent mapping on filesystems that
2376  * support these features.
2377  *
2378  * We are not allowed to take the i_mutex here so we have to play games to
2379  * protect against truncate races as the page could now be beyond EOF.  Because
2380  * vmtruncate() writes the inode size before removing pages, once we have the
2381  * page lock we can determine safely if the page is beyond EOF. If it is not
2382  * beyond EOF, then the page is guaranteed safe against truncation until we
2383  * unlock the page.
2384  */
2385 int
2386 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2387 		   get_block_t get_block)
2388 {
2389 	struct page *page = vmf->page;
2390 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2391 	unsigned long end;
2392 	loff_t size;
2393 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2394 
2395 	lock_page(page);
2396 	size = i_size_read(inode);
2397 	if ((page->mapping != inode->i_mapping) ||
2398 	    (page_offset(page) > size)) {
2399 		/* page got truncated out from underneath us */
2400 		unlock_page(page);
2401 		goto out;
2402 	}
2403 
2404 	/* page is wholly or partially inside EOF */
2405 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2406 		end = size & ~PAGE_CACHE_MASK;
2407 	else
2408 		end = PAGE_CACHE_SIZE;
2409 
2410 	ret = block_prepare_write(page, 0, end, get_block);
2411 	if (!ret)
2412 		ret = block_commit_write(page, 0, end);
2413 
2414 	if (unlikely(ret)) {
2415 		unlock_page(page);
2416 		if (ret == -ENOMEM)
2417 			ret = VM_FAULT_OOM;
2418 		else /* -ENOSPC, -EIO, etc */
2419 			ret = VM_FAULT_SIGBUS;
2420 	} else
2421 		ret = VM_FAULT_LOCKED;
2422 
2423 out:
2424 	return ret;
2425 }
2426 
2427 /*
2428  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2429  * immediately, while under the page lock.  So it needs a special end_io
2430  * handler which does not touch the bh after unlocking it.
2431  */
2432 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2433 {
2434 	__end_buffer_read_notouch(bh, uptodate);
2435 }
2436 
2437 /*
2438  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2439  * the page (converting it to circular linked list and taking care of page
2440  * dirty races).
2441  */
2442 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2443 {
2444 	struct buffer_head *bh;
2445 
2446 	BUG_ON(!PageLocked(page));
2447 
2448 	spin_lock(&page->mapping->private_lock);
2449 	bh = head;
2450 	do {
2451 		if (PageDirty(page))
2452 			set_buffer_dirty(bh);
2453 		if (!bh->b_this_page)
2454 			bh->b_this_page = head;
2455 		bh = bh->b_this_page;
2456 	} while (bh != head);
2457 	attach_page_buffers(page, head);
2458 	spin_unlock(&page->mapping->private_lock);
2459 }
2460 
2461 /*
2462  * On entry, the page is fully not uptodate.
2463  * On exit the page is fully uptodate in the areas outside (from,to)
2464  */
2465 int nobh_write_begin(struct file *file, struct address_space *mapping,
2466 			loff_t pos, unsigned len, unsigned flags,
2467 			struct page **pagep, void **fsdata,
2468 			get_block_t *get_block)
2469 {
2470 	struct inode *inode = mapping->host;
2471 	const unsigned blkbits = inode->i_blkbits;
2472 	const unsigned blocksize = 1 << blkbits;
2473 	struct buffer_head *head, *bh;
2474 	struct page *page;
2475 	pgoff_t index;
2476 	unsigned from, to;
2477 	unsigned block_in_page;
2478 	unsigned block_start, block_end;
2479 	sector_t block_in_file;
2480 	int nr_reads = 0;
2481 	int ret = 0;
2482 	int is_mapped_to_disk = 1;
2483 
2484 	index = pos >> PAGE_CACHE_SHIFT;
2485 	from = pos & (PAGE_CACHE_SIZE - 1);
2486 	to = from + len;
2487 
2488 	page = grab_cache_page_write_begin(mapping, index, flags);
2489 	if (!page)
2490 		return -ENOMEM;
2491 	*pagep = page;
2492 	*fsdata = NULL;
2493 
2494 	if (page_has_buffers(page)) {
2495 		unlock_page(page);
2496 		page_cache_release(page);
2497 		*pagep = NULL;
2498 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2499 					fsdata, get_block);
2500 	}
2501 
2502 	if (PageMappedToDisk(page))
2503 		return 0;
2504 
2505 	/*
2506 	 * Allocate buffers so that we can keep track of state, and potentially
2507 	 * attach them to the page if an error occurs. In the common case of
2508 	 * no error, they will just be freed again without ever being attached
2509 	 * to the page (which is all OK, because we're under the page lock).
2510 	 *
2511 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2512 	 * than the circular one we're used to.
2513 	 */
2514 	head = alloc_page_buffers(page, blocksize, 0);
2515 	if (!head) {
2516 		ret = -ENOMEM;
2517 		goto out_release;
2518 	}
2519 
2520 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2521 
2522 	/*
2523 	 * We loop across all blocks in the page, whether or not they are
2524 	 * part of the affected region.  This is so we can discover if the
2525 	 * page is fully mapped-to-disk.
2526 	 */
2527 	for (block_start = 0, block_in_page = 0, bh = head;
2528 		  block_start < PAGE_CACHE_SIZE;
2529 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2530 		int create;
2531 
2532 		block_end = block_start + blocksize;
2533 		bh->b_state = 0;
2534 		create = 1;
2535 		if (block_start >= to)
2536 			create = 0;
2537 		ret = get_block(inode, block_in_file + block_in_page,
2538 					bh, create);
2539 		if (ret)
2540 			goto failed;
2541 		if (!buffer_mapped(bh))
2542 			is_mapped_to_disk = 0;
2543 		if (buffer_new(bh))
2544 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2545 		if (PageUptodate(page)) {
2546 			set_buffer_uptodate(bh);
2547 			continue;
2548 		}
2549 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2550 			zero_user_segments(page, block_start, from,
2551 							to, block_end);
2552 			continue;
2553 		}
2554 		if (buffer_uptodate(bh))
2555 			continue;	/* reiserfs does this */
2556 		if (block_start < from || block_end > to) {
2557 			lock_buffer(bh);
2558 			bh->b_end_io = end_buffer_read_nobh;
2559 			submit_bh(READ, bh);
2560 			nr_reads++;
2561 		}
2562 	}
2563 
2564 	if (nr_reads) {
2565 		/*
2566 		 * The page is locked, so these buffers are protected from
2567 		 * any VM or truncate activity.  Hence we don't need to care
2568 		 * for the buffer_head refcounts.
2569 		 */
2570 		for (bh = head; bh; bh = bh->b_this_page) {
2571 			wait_on_buffer(bh);
2572 			if (!buffer_uptodate(bh))
2573 				ret = -EIO;
2574 		}
2575 		if (ret)
2576 			goto failed;
2577 	}
2578 
2579 	if (is_mapped_to_disk)
2580 		SetPageMappedToDisk(page);
2581 
2582 	*fsdata = head; /* to be released by nobh_write_end */
2583 
2584 	return 0;
2585 
2586 failed:
2587 	BUG_ON(!ret);
2588 	/*
2589 	 * Error recovery is a bit difficult. We need to zero out blocks that
2590 	 * were newly allocated, and dirty them to ensure they get written out.
2591 	 * Buffers need to be attached to the page at this point, otherwise
2592 	 * the handling of potential IO errors during writeout would be hard
2593 	 * (could try doing synchronous writeout, but what if that fails too?)
2594 	 */
2595 	attach_nobh_buffers(page, head);
2596 	page_zero_new_buffers(page, from, to);
2597 
2598 out_release:
2599 	unlock_page(page);
2600 	page_cache_release(page);
2601 	*pagep = NULL;
2602 
2603 	if (pos + len > inode->i_size)
2604 		vmtruncate(inode, inode->i_size);
2605 
2606 	return ret;
2607 }
2608 EXPORT_SYMBOL(nobh_write_begin);
2609 
2610 int nobh_write_end(struct file *file, struct address_space *mapping,
2611 			loff_t pos, unsigned len, unsigned copied,
2612 			struct page *page, void *fsdata)
2613 {
2614 	struct inode *inode = page->mapping->host;
2615 	struct buffer_head *head = fsdata;
2616 	struct buffer_head *bh;
2617 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2618 
2619 	if (unlikely(copied < len) && head)
2620 		attach_nobh_buffers(page, head);
2621 	if (page_has_buffers(page))
2622 		return generic_write_end(file, mapping, pos, len,
2623 					copied, page, fsdata);
2624 
2625 	SetPageUptodate(page);
2626 	set_page_dirty(page);
2627 	if (pos+copied > inode->i_size) {
2628 		i_size_write(inode, pos+copied);
2629 		mark_inode_dirty(inode);
2630 	}
2631 
2632 	unlock_page(page);
2633 	page_cache_release(page);
2634 
2635 	while (head) {
2636 		bh = head;
2637 		head = head->b_this_page;
2638 		free_buffer_head(bh);
2639 	}
2640 
2641 	return copied;
2642 }
2643 EXPORT_SYMBOL(nobh_write_end);
2644 
2645 /*
2646  * nobh_writepage() - based on block_full_write_page() except
2647  * that it tries to operate without attaching bufferheads to
2648  * the page.
2649  */
2650 int nobh_writepage(struct page *page, get_block_t *get_block,
2651 			struct writeback_control *wbc)
2652 {
2653 	struct inode * const inode = page->mapping->host;
2654 	loff_t i_size = i_size_read(inode);
2655 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2656 	unsigned offset;
2657 	int ret;
2658 
2659 	/* Is the page fully inside i_size? */
2660 	if (page->index < end_index)
2661 		goto out;
2662 
2663 	/* Is the page fully outside i_size? (truncate in progress) */
2664 	offset = i_size & (PAGE_CACHE_SIZE-1);
2665 	if (page->index >= end_index+1 || !offset) {
2666 		/*
2667 		 * The page may have dirty, unmapped buffers.  For example,
2668 		 * they may have been added in ext3_writepage().  Make them
2669 		 * freeable here, so the page does not leak.
2670 		 */
2671 #if 0
2672 		/* Not really sure about this  - do we need this ? */
2673 		if (page->mapping->a_ops->invalidatepage)
2674 			page->mapping->a_ops->invalidatepage(page, offset);
2675 #endif
2676 		unlock_page(page);
2677 		return 0; /* don't care */
2678 	}
2679 
2680 	/*
2681 	 * The page straddles i_size.  It must be zeroed out on each and every
2682 	 * writepage invocation because it may be mmapped.  "A file is mapped
2683 	 * in multiples of the page size.  For a file that is not a multiple of
2684 	 * the  page size, the remaining memory is zeroed when mapped, and
2685 	 * writes to that region are not written out to the file."
2686 	 */
2687 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2688 out:
2689 	ret = mpage_writepage(page, get_block, wbc);
2690 	if (ret == -EAGAIN)
2691 		ret = __block_write_full_page(inode, page, get_block, wbc,
2692 					      end_buffer_async_write);
2693 	return ret;
2694 }
2695 EXPORT_SYMBOL(nobh_writepage);
2696 
2697 int nobh_truncate_page(struct address_space *mapping,
2698 			loff_t from, get_block_t *get_block)
2699 {
2700 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2701 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2702 	unsigned blocksize;
2703 	sector_t iblock;
2704 	unsigned length, pos;
2705 	struct inode *inode = mapping->host;
2706 	struct page *page;
2707 	struct buffer_head map_bh;
2708 	int err;
2709 
2710 	blocksize = 1 << inode->i_blkbits;
2711 	length = offset & (blocksize - 1);
2712 
2713 	/* Block boundary? Nothing to do */
2714 	if (!length)
2715 		return 0;
2716 
2717 	length = blocksize - length;
2718 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2719 
2720 	page = grab_cache_page(mapping, index);
2721 	err = -ENOMEM;
2722 	if (!page)
2723 		goto out;
2724 
2725 	if (page_has_buffers(page)) {
2726 has_buffers:
2727 		unlock_page(page);
2728 		page_cache_release(page);
2729 		return block_truncate_page(mapping, from, get_block);
2730 	}
2731 
2732 	/* Find the buffer that contains "offset" */
2733 	pos = blocksize;
2734 	while (offset >= pos) {
2735 		iblock++;
2736 		pos += blocksize;
2737 	}
2738 
2739 	map_bh.b_size = blocksize;
2740 	map_bh.b_state = 0;
2741 	err = get_block(inode, iblock, &map_bh, 0);
2742 	if (err)
2743 		goto unlock;
2744 	/* unmapped? It's a hole - nothing to do */
2745 	if (!buffer_mapped(&map_bh))
2746 		goto unlock;
2747 
2748 	/* Ok, it's mapped. Make sure it's up-to-date */
2749 	if (!PageUptodate(page)) {
2750 		err = mapping->a_ops->readpage(NULL, page);
2751 		if (err) {
2752 			page_cache_release(page);
2753 			goto out;
2754 		}
2755 		lock_page(page);
2756 		if (!PageUptodate(page)) {
2757 			err = -EIO;
2758 			goto unlock;
2759 		}
2760 		if (page_has_buffers(page))
2761 			goto has_buffers;
2762 	}
2763 	zero_user(page, offset, length);
2764 	set_page_dirty(page);
2765 	err = 0;
2766 
2767 unlock:
2768 	unlock_page(page);
2769 	page_cache_release(page);
2770 out:
2771 	return err;
2772 }
2773 EXPORT_SYMBOL(nobh_truncate_page);
2774 
2775 int block_truncate_page(struct address_space *mapping,
2776 			loff_t from, get_block_t *get_block)
2777 {
2778 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2779 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2780 	unsigned blocksize;
2781 	sector_t iblock;
2782 	unsigned length, pos;
2783 	struct inode *inode = mapping->host;
2784 	struct page *page;
2785 	struct buffer_head *bh;
2786 	int err;
2787 
2788 	blocksize = 1 << inode->i_blkbits;
2789 	length = offset & (blocksize - 1);
2790 
2791 	/* Block boundary? Nothing to do */
2792 	if (!length)
2793 		return 0;
2794 
2795 	length = blocksize - length;
2796 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2797 
2798 	page = grab_cache_page(mapping, index);
2799 	err = -ENOMEM;
2800 	if (!page)
2801 		goto out;
2802 
2803 	if (!page_has_buffers(page))
2804 		create_empty_buffers(page, blocksize, 0);
2805 
2806 	/* Find the buffer that contains "offset" */
2807 	bh = page_buffers(page);
2808 	pos = blocksize;
2809 	while (offset >= pos) {
2810 		bh = bh->b_this_page;
2811 		iblock++;
2812 		pos += blocksize;
2813 	}
2814 
2815 	err = 0;
2816 	if (!buffer_mapped(bh)) {
2817 		WARN_ON(bh->b_size != blocksize);
2818 		err = get_block(inode, iblock, bh, 0);
2819 		if (err)
2820 			goto unlock;
2821 		/* unmapped? It's a hole - nothing to do */
2822 		if (!buffer_mapped(bh))
2823 			goto unlock;
2824 	}
2825 
2826 	/* Ok, it's mapped. Make sure it's up-to-date */
2827 	if (PageUptodate(page))
2828 		set_buffer_uptodate(bh);
2829 
2830 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2831 		err = -EIO;
2832 		ll_rw_block(READ, 1, &bh);
2833 		wait_on_buffer(bh);
2834 		/* Uhhuh. Read error. Complain and punt. */
2835 		if (!buffer_uptodate(bh))
2836 			goto unlock;
2837 	}
2838 
2839 	zero_user(page, offset, length);
2840 	mark_buffer_dirty(bh);
2841 	err = 0;
2842 
2843 unlock:
2844 	unlock_page(page);
2845 	page_cache_release(page);
2846 out:
2847 	return err;
2848 }
2849 
2850 /*
2851  * The generic ->writepage function for buffer-backed address_spaces
2852  * this form passes in the end_io handler used to finish the IO.
2853  */
2854 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2855 			struct writeback_control *wbc, bh_end_io_t *handler)
2856 {
2857 	struct inode * const inode = page->mapping->host;
2858 	loff_t i_size = i_size_read(inode);
2859 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2860 	unsigned offset;
2861 
2862 	/* Is the page fully inside i_size? */
2863 	if (page->index < end_index)
2864 		return __block_write_full_page(inode, page, get_block, wbc,
2865 					       handler);
2866 
2867 	/* Is the page fully outside i_size? (truncate in progress) */
2868 	offset = i_size & (PAGE_CACHE_SIZE-1);
2869 	if (page->index >= end_index+1 || !offset) {
2870 		/*
2871 		 * The page may have dirty, unmapped buffers.  For example,
2872 		 * they may have been added in ext3_writepage().  Make them
2873 		 * freeable here, so the page does not leak.
2874 		 */
2875 		do_invalidatepage(page, 0);
2876 		unlock_page(page);
2877 		return 0; /* don't care */
2878 	}
2879 
2880 	/*
2881 	 * The page straddles i_size.  It must be zeroed out on each and every
2882 	 * writepage invokation because it may be mmapped.  "A file is mapped
2883 	 * in multiples of the page size.  For a file that is not a multiple of
2884 	 * the  page size, the remaining memory is zeroed when mapped, and
2885 	 * writes to that region are not written out to the file."
2886 	 */
2887 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2888 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2889 }
2890 
2891 /*
2892  * The generic ->writepage function for buffer-backed address_spaces
2893  */
2894 int block_write_full_page(struct page *page, get_block_t *get_block,
2895 			struct writeback_control *wbc)
2896 {
2897 	return block_write_full_page_endio(page, get_block, wbc,
2898 					   end_buffer_async_write);
2899 }
2900 
2901 
2902 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2903 			    get_block_t *get_block)
2904 {
2905 	struct buffer_head tmp;
2906 	struct inode *inode = mapping->host;
2907 	tmp.b_state = 0;
2908 	tmp.b_blocknr = 0;
2909 	tmp.b_size = 1 << inode->i_blkbits;
2910 	get_block(inode, block, &tmp, 0);
2911 	return tmp.b_blocknr;
2912 }
2913 
2914 static void end_bio_bh_io_sync(struct bio *bio, int err)
2915 {
2916 	struct buffer_head *bh = bio->bi_private;
2917 
2918 	if (err == -EOPNOTSUPP) {
2919 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2920 		set_bit(BH_Eopnotsupp, &bh->b_state);
2921 	}
2922 
2923 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2924 		set_bit(BH_Quiet, &bh->b_state);
2925 
2926 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2927 	bio_put(bio);
2928 }
2929 
2930 int submit_bh(int rw, struct buffer_head * bh)
2931 {
2932 	struct bio *bio;
2933 	int ret = 0;
2934 
2935 	BUG_ON(!buffer_locked(bh));
2936 	BUG_ON(!buffer_mapped(bh));
2937 	BUG_ON(!bh->b_end_io);
2938 	BUG_ON(buffer_delay(bh));
2939 	BUG_ON(buffer_unwritten(bh));
2940 
2941 	/*
2942 	 * Mask in barrier bit for a write (could be either a WRITE or a
2943 	 * WRITE_SYNC
2944 	 */
2945 	if (buffer_ordered(bh) && (rw & WRITE))
2946 		rw |= WRITE_BARRIER;
2947 
2948 	/*
2949 	 * Only clear out a write error when rewriting
2950 	 */
2951 	if (test_set_buffer_req(bh) && (rw & WRITE))
2952 		clear_buffer_write_io_error(bh);
2953 
2954 	/*
2955 	 * from here on down, it's all bio -- do the initial mapping,
2956 	 * submit_bio -> generic_make_request may further map this bio around
2957 	 */
2958 	bio = bio_alloc(GFP_NOIO, 1);
2959 
2960 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2961 	bio->bi_bdev = bh->b_bdev;
2962 	bio->bi_io_vec[0].bv_page = bh->b_page;
2963 	bio->bi_io_vec[0].bv_len = bh->b_size;
2964 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2965 
2966 	bio->bi_vcnt = 1;
2967 	bio->bi_idx = 0;
2968 	bio->bi_size = bh->b_size;
2969 
2970 	bio->bi_end_io = end_bio_bh_io_sync;
2971 	bio->bi_private = bh;
2972 
2973 	bio_get(bio);
2974 	submit_bio(rw, bio);
2975 
2976 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2977 		ret = -EOPNOTSUPP;
2978 
2979 	bio_put(bio);
2980 	return ret;
2981 }
2982 
2983 /**
2984  * ll_rw_block: low-level access to block devices (DEPRECATED)
2985  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2986  * @nr: number of &struct buffer_heads in the array
2987  * @bhs: array of pointers to &struct buffer_head
2988  *
2989  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2990  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2991  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2992  * are sent to disk. The fourth %READA option is described in the documentation
2993  * for generic_make_request() which ll_rw_block() calls.
2994  *
2995  * This function drops any buffer that it cannot get a lock on (with the
2996  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2997  * clean when doing a write request, and any buffer that appears to be
2998  * up-to-date when doing read request.  Further it marks as clean buffers that
2999  * are processed for writing (the buffer cache won't assume that they are
3000  * actually clean until the buffer gets unlocked).
3001  *
3002  * ll_rw_block sets b_end_io to simple completion handler that marks
3003  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3004  * any waiters.
3005  *
3006  * All of the buffers must be for the same device, and must also be a
3007  * multiple of the current approved size for the device.
3008  */
3009 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3010 {
3011 	int i;
3012 
3013 	for (i = 0; i < nr; i++) {
3014 		struct buffer_head *bh = bhs[i];
3015 
3016 		if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3017 			lock_buffer(bh);
3018 		else if (!trylock_buffer(bh))
3019 			continue;
3020 
3021 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3022 		    rw == SWRITE_SYNC_PLUG) {
3023 			if (test_clear_buffer_dirty(bh)) {
3024 				bh->b_end_io = end_buffer_write_sync;
3025 				get_bh(bh);
3026 				if (rw == SWRITE_SYNC)
3027 					submit_bh(WRITE_SYNC, bh);
3028 				else
3029 					submit_bh(WRITE, bh);
3030 				continue;
3031 			}
3032 		} else {
3033 			if (!buffer_uptodate(bh)) {
3034 				bh->b_end_io = end_buffer_read_sync;
3035 				get_bh(bh);
3036 				submit_bh(rw, bh);
3037 				continue;
3038 			}
3039 		}
3040 		unlock_buffer(bh);
3041 	}
3042 }
3043 
3044 /*
3045  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3046  * and then start new I/O and then wait upon it.  The caller must have a ref on
3047  * the buffer_head.
3048  */
3049 int sync_dirty_buffer(struct buffer_head *bh)
3050 {
3051 	int ret = 0;
3052 
3053 	WARN_ON(atomic_read(&bh->b_count) < 1);
3054 	lock_buffer(bh);
3055 	if (test_clear_buffer_dirty(bh)) {
3056 		get_bh(bh);
3057 		bh->b_end_io = end_buffer_write_sync;
3058 		ret = submit_bh(WRITE_SYNC, bh);
3059 		wait_on_buffer(bh);
3060 		if (buffer_eopnotsupp(bh)) {
3061 			clear_buffer_eopnotsupp(bh);
3062 			ret = -EOPNOTSUPP;
3063 		}
3064 		if (!ret && !buffer_uptodate(bh))
3065 			ret = -EIO;
3066 	} else {
3067 		unlock_buffer(bh);
3068 	}
3069 	return ret;
3070 }
3071 
3072 /*
3073  * try_to_free_buffers() checks if all the buffers on this particular page
3074  * are unused, and releases them if so.
3075  *
3076  * Exclusion against try_to_free_buffers may be obtained by either
3077  * locking the page or by holding its mapping's private_lock.
3078  *
3079  * If the page is dirty but all the buffers are clean then we need to
3080  * be sure to mark the page clean as well.  This is because the page
3081  * may be against a block device, and a later reattachment of buffers
3082  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3083  * filesystem data on the same device.
3084  *
3085  * The same applies to regular filesystem pages: if all the buffers are
3086  * clean then we set the page clean and proceed.  To do that, we require
3087  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3088  * private_lock.
3089  *
3090  * try_to_free_buffers() is non-blocking.
3091  */
3092 static inline int buffer_busy(struct buffer_head *bh)
3093 {
3094 	return atomic_read(&bh->b_count) |
3095 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3096 }
3097 
3098 static int
3099 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3100 {
3101 	struct buffer_head *head = page_buffers(page);
3102 	struct buffer_head *bh;
3103 
3104 	bh = head;
3105 	do {
3106 		if (buffer_write_io_error(bh) && page->mapping)
3107 			set_bit(AS_EIO, &page->mapping->flags);
3108 		if (buffer_busy(bh))
3109 			goto failed;
3110 		bh = bh->b_this_page;
3111 	} while (bh != head);
3112 
3113 	do {
3114 		struct buffer_head *next = bh->b_this_page;
3115 
3116 		if (bh->b_assoc_map)
3117 			__remove_assoc_queue(bh);
3118 		bh = next;
3119 	} while (bh != head);
3120 	*buffers_to_free = head;
3121 	__clear_page_buffers(page);
3122 	return 1;
3123 failed:
3124 	return 0;
3125 }
3126 
3127 int try_to_free_buffers(struct page *page)
3128 {
3129 	struct address_space * const mapping = page->mapping;
3130 	struct buffer_head *buffers_to_free = NULL;
3131 	int ret = 0;
3132 
3133 	BUG_ON(!PageLocked(page));
3134 	if (PageWriteback(page))
3135 		return 0;
3136 
3137 	if (mapping == NULL) {		/* can this still happen? */
3138 		ret = drop_buffers(page, &buffers_to_free);
3139 		goto out;
3140 	}
3141 
3142 	spin_lock(&mapping->private_lock);
3143 	ret = drop_buffers(page, &buffers_to_free);
3144 
3145 	/*
3146 	 * If the filesystem writes its buffers by hand (eg ext3)
3147 	 * then we can have clean buffers against a dirty page.  We
3148 	 * clean the page here; otherwise the VM will never notice
3149 	 * that the filesystem did any IO at all.
3150 	 *
3151 	 * Also, during truncate, discard_buffer will have marked all
3152 	 * the page's buffers clean.  We discover that here and clean
3153 	 * the page also.
3154 	 *
3155 	 * private_lock must be held over this entire operation in order
3156 	 * to synchronise against __set_page_dirty_buffers and prevent the
3157 	 * dirty bit from being lost.
3158 	 */
3159 	if (ret)
3160 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3161 	spin_unlock(&mapping->private_lock);
3162 out:
3163 	if (buffers_to_free) {
3164 		struct buffer_head *bh = buffers_to_free;
3165 
3166 		do {
3167 			struct buffer_head *next = bh->b_this_page;
3168 			free_buffer_head(bh);
3169 			bh = next;
3170 		} while (bh != buffers_to_free);
3171 	}
3172 	return ret;
3173 }
3174 EXPORT_SYMBOL(try_to_free_buffers);
3175 
3176 void block_sync_page(struct page *page)
3177 {
3178 	struct address_space *mapping;
3179 
3180 	smp_mb();
3181 	mapping = page_mapping(page);
3182 	if (mapping)
3183 		blk_run_backing_dev(mapping->backing_dev_info, page);
3184 }
3185 
3186 /*
3187  * There are no bdflush tunables left.  But distributions are
3188  * still running obsolete flush daemons, so we terminate them here.
3189  *
3190  * Use of bdflush() is deprecated and will be removed in a future kernel.
3191  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3192  */
3193 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3194 {
3195 	static int msg_count;
3196 
3197 	if (!capable(CAP_SYS_ADMIN))
3198 		return -EPERM;
3199 
3200 	if (msg_count < 5) {
3201 		msg_count++;
3202 		printk(KERN_INFO
3203 			"warning: process `%s' used the obsolete bdflush"
3204 			" system call\n", current->comm);
3205 		printk(KERN_INFO "Fix your initscripts?\n");
3206 	}
3207 
3208 	if (func == 1)
3209 		do_exit(0);
3210 	return 0;
3211 }
3212 
3213 /*
3214  * Buffer-head allocation
3215  */
3216 static struct kmem_cache *bh_cachep;
3217 
3218 /*
3219  * Once the number of bh's in the machine exceeds this level, we start
3220  * stripping them in writeback.
3221  */
3222 static int max_buffer_heads;
3223 
3224 int buffer_heads_over_limit;
3225 
3226 struct bh_accounting {
3227 	int nr;			/* Number of live bh's */
3228 	int ratelimit;		/* Limit cacheline bouncing */
3229 };
3230 
3231 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3232 
3233 static void recalc_bh_state(void)
3234 {
3235 	int i;
3236 	int tot = 0;
3237 
3238 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3239 		return;
3240 	__get_cpu_var(bh_accounting).ratelimit = 0;
3241 	for_each_online_cpu(i)
3242 		tot += per_cpu(bh_accounting, i).nr;
3243 	buffer_heads_over_limit = (tot > max_buffer_heads);
3244 }
3245 
3246 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3247 {
3248 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3249 	if (ret) {
3250 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3251 		get_cpu_var(bh_accounting).nr++;
3252 		recalc_bh_state();
3253 		put_cpu_var(bh_accounting);
3254 	}
3255 	return ret;
3256 }
3257 EXPORT_SYMBOL(alloc_buffer_head);
3258 
3259 void free_buffer_head(struct buffer_head *bh)
3260 {
3261 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3262 	kmem_cache_free(bh_cachep, bh);
3263 	get_cpu_var(bh_accounting).nr--;
3264 	recalc_bh_state();
3265 	put_cpu_var(bh_accounting);
3266 }
3267 EXPORT_SYMBOL(free_buffer_head);
3268 
3269 static void buffer_exit_cpu(int cpu)
3270 {
3271 	int i;
3272 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3273 
3274 	for (i = 0; i < BH_LRU_SIZE; i++) {
3275 		brelse(b->bhs[i]);
3276 		b->bhs[i] = NULL;
3277 	}
3278 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3279 	per_cpu(bh_accounting, cpu).nr = 0;
3280 	put_cpu_var(bh_accounting);
3281 }
3282 
3283 static int buffer_cpu_notify(struct notifier_block *self,
3284 			      unsigned long action, void *hcpu)
3285 {
3286 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3287 		buffer_exit_cpu((unsigned long)hcpu);
3288 	return NOTIFY_OK;
3289 }
3290 
3291 /**
3292  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3293  * @bh: struct buffer_head
3294  *
3295  * Return true if the buffer is up-to-date and false,
3296  * with the buffer locked, if not.
3297  */
3298 int bh_uptodate_or_lock(struct buffer_head *bh)
3299 {
3300 	if (!buffer_uptodate(bh)) {
3301 		lock_buffer(bh);
3302 		if (!buffer_uptodate(bh))
3303 			return 0;
3304 		unlock_buffer(bh);
3305 	}
3306 	return 1;
3307 }
3308 EXPORT_SYMBOL(bh_uptodate_or_lock);
3309 
3310 /**
3311  * bh_submit_read - Submit a locked buffer for reading
3312  * @bh: struct buffer_head
3313  *
3314  * Returns zero on success and -EIO on error.
3315  */
3316 int bh_submit_read(struct buffer_head *bh)
3317 {
3318 	BUG_ON(!buffer_locked(bh));
3319 
3320 	if (buffer_uptodate(bh)) {
3321 		unlock_buffer(bh);
3322 		return 0;
3323 	}
3324 
3325 	get_bh(bh);
3326 	bh->b_end_io = end_buffer_read_sync;
3327 	submit_bh(READ, bh);
3328 	wait_on_buffer(bh);
3329 	if (buffer_uptodate(bh))
3330 		return 0;
3331 	return -EIO;
3332 }
3333 EXPORT_SYMBOL(bh_submit_read);
3334 
3335 static void
3336 init_buffer_head(void *data)
3337 {
3338 	struct buffer_head *bh = data;
3339 
3340 	memset(bh, 0, sizeof(*bh));
3341 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3342 }
3343 
3344 void __init buffer_init(void)
3345 {
3346 	int nrpages;
3347 
3348 	bh_cachep = kmem_cache_create("buffer_head",
3349 			sizeof(struct buffer_head), 0,
3350 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3351 				SLAB_MEM_SPREAD),
3352 				init_buffer_head);
3353 
3354 	/*
3355 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3356 	 */
3357 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3358 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3359 	hotcpu_notifier(buffer_cpu_notify, 0);
3360 }
3361 
3362 EXPORT_SYMBOL(__bforget);
3363 EXPORT_SYMBOL(__brelse);
3364 EXPORT_SYMBOL(__wait_on_buffer);
3365 EXPORT_SYMBOL(block_commit_write);
3366 EXPORT_SYMBOL(block_prepare_write);
3367 EXPORT_SYMBOL(block_page_mkwrite);
3368 EXPORT_SYMBOL(block_read_full_page);
3369 EXPORT_SYMBOL(block_sync_page);
3370 EXPORT_SYMBOL(block_truncate_page);
3371 EXPORT_SYMBOL(block_write_full_page);
3372 EXPORT_SYMBOL(block_write_full_page_endio);
3373 EXPORT_SYMBOL(cont_write_begin);
3374 EXPORT_SYMBOL(end_buffer_read_sync);
3375 EXPORT_SYMBOL(end_buffer_write_sync);
3376 EXPORT_SYMBOL(end_buffer_async_write);
3377 EXPORT_SYMBOL(file_fsync);
3378 EXPORT_SYMBOL(generic_block_bmap);
3379 EXPORT_SYMBOL(generic_cont_expand_simple);
3380 EXPORT_SYMBOL(init_buffer);
3381 EXPORT_SYMBOL(invalidate_bdev);
3382 EXPORT_SYMBOL(ll_rw_block);
3383 EXPORT_SYMBOL(mark_buffer_dirty);
3384 EXPORT_SYMBOL(submit_bh);
3385 EXPORT_SYMBOL(sync_dirty_buffer);
3386 EXPORT_SYMBOL(unlock_buffer);
3387