xref: /openbmc/linux/fs/buffer.c (revision e2fc4d19292ef2eb208f76976ddc3320cc5839b6)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_bit_unlock(BH_Lock, &bh->b_state);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93 
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97 	ClearPagePrivate(page);
98 	set_page_private(page, 0);
99 	page_cache_release(page);
100 }
101 
102 
103 static int quiet_error(struct buffer_head *bh)
104 {
105 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 		return 0;
107 	return 1;
108 }
109 
110 
111 static void buffer_io_error(struct buffer_head *bh)
112 {
113 	char b[BDEVNAME_SIZE];
114 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 			bdevname(bh->b_bdev, b),
116 			(unsigned long long)bh->b_blocknr);
117 }
118 
119 /*
120  * End-of-IO handler helper function which does not touch the bh after
121  * unlocking it.
122  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123  * a race there is benign: unlock_buffer() only use the bh's address for
124  * hashing after unlocking the buffer, so it doesn't actually touch the bh
125  * itself.
126  */
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
128 {
129 	if (uptodate) {
130 		set_buffer_uptodate(bh);
131 	} else {
132 		/* This happens, due to failed READA attempts. */
133 		clear_buffer_uptodate(bh);
134 	}
135 	unlock_buffer(bh);
136 }
137 
138 /*
139  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
140  * unlock the buffer. This is what ll_rw_block uses too.
141  */
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143 {
144 	__end_buffer_read_notouch(bh, uptodate);
145 	put_bh(bh);
146 }
147 
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149 {
150 	char b[BDEVNAME_SIZE];
151 
152 	if (uptodate) {
153 		set_buffer_uptodate(bh);
154 	} else {
155 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
156 			buffer_io_error(bh);
157 			printk(KERN_WARNING "lost page write due to "
158 					"I/O error on %s\n",
159 				       bdevname(bh->b_bdev, b));
160 		}
161 		set_buffer_write_io_error(bh);
162 		clear_buffer_uptodate(bh);
163 	}
164 	unlock_buffer(bh);
165 	put_bh(bh);
166 }
167 
168 /*
169  * Write out and wait upon all the dirty data associated with a block
170  * device via its mapping.  Does not take the superblock lock.
171  */
172 int sync_blockdev(struct block_device *bdev)
173 {
174 	int ret = 0;
175 
176 	if (bdev)
177 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 	return ret;
179 }
180 EXPORT_SYMBOL(sync_blockdev);
181 
182 /*
183  * Write out and wait upon all dirty data associated with this
184  * device.   Filesystem data as well as the underlying block
185  * device.  Takes the superblock lock.
186  */
187 int fsync_bdev(struct block_device *bdev)
188 {
189 	struct super_block *sb = get_super(bdev);
190 	if (sb) {
191 		int res = fsync_super(sb);
192 		drop_super(sb);
193 		return res;
194 	}
195 	return sync_blockdev(bdev);
196 }
197 
198 /**
199  * freeze_bdev  --  lock a filesystem and force it into a consistent state
200  * @bdev:	blockdevice to lock
201  *
202  * This takes the block device bd_mount_sem to make sure no new mounts
203  * happen on bdev until thaw_bdev() is called.
204  * If a superblock is found on this device, we take the s_umount semaphore
205  * on it to make sure nobody unmounts until the snapshot creation is done.
206  * The reference counter (bd_fsfreeze_count) guarantees that only the last
207  * unfreeze process can unfreeze the frozen filesystem actually when multiple
208  * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209  * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210  * actually.
211  */
212 struct super_block *freeze_bdev(struct block_device *bdev)
213 {
214 	struct super_block *sb;
215 	int error = 0;
216 
217 	mutex_lock(&bdev->bd_fsfreeze_mutex);
218 	if (bdev->bd_fsfreeze_count > 0) {
219 		bdev->bd_fsfreeze_count++;
220 		sb = get_super(bdev);
221 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
222 		return sb;
223 	}
224 	bdev->bd_fsfreeze_count++;
225 
226 	down(&bdev->bd_mount_sem);
227 	sb = get_super(bdev);
228 	if (sb && !(sb->s_flags & MS_RDONLY)) {
229 		sb->s_frozen = SB_FREEZE_WRITE;
230 		smp_wmb();
231 
232 		__fsync_super(sb);
233 
234 		sb->s_frozen = SB_FREEZE_TRANS;
235 		smp_wmb();
236 
237 		sync_blockdev(sb->s_bdev);
238 
239 		if (sb->s_op->freeze_fs) {
240 			error = sb->s_op->freeze_fs(sb);
241 			if (error) {
242 				printk(KERN_ERR
243 					"VFS:Filesystem freeze failed\n");
244 				sb->s_frozen = SB_UNFROZEN;
245 				drop_super(sb);
246 				up(&bdev->bd_mount_sem);
247 				bdev->bd_fsfreeze_count--;
248 				mutex_unlock(&bdev->bd_fsfreeze_mutex);
249 				return ERR_PTR(error);
250 			}
251 		}
252 	}
253 
254 	sync_blockdev(bdev);
255 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
256 
257 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
258 }
259 EXPORT_SYMBOL(freeze_bdev);
260 
261 /**
262  * thaw_bdev  -- unlock filesystem
263  * @bdev:	blockdevice to unlock
264  * @sb:		associated superblock
265  *
266  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
267  */
268 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
269 {
270 	int error = 0;
271 
272 	mutex_lock(&bdev->bd_fsfreeze_mutex);
273 	if (!bdev->bd_fsfreeze_count) {
274 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
275 		return -EINVAL;
276 	}
277 
278 	bdev->bd_fsfreeze_count--;
279 	if (bdev->bd_fsfreeze_count > 0) {
280 		if (sb)
281 			drop_super(sb);
282 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
283 		return 0;
284 	}
285 
286 	if (sb) {
287 		BUG_ON(sb->s_bdev != bdev);
288 		if (!(sb->s_flags & MS_RDONLY)) {
289 			if (sb->s_op->unfreeze_fs) {
290 				error = sb->s_op->unfreeze_fs(sb);
291 				if (error) {
292 					printk(KERN_ERR
293 						"VFS:Filesystem thaw failed\n");
294 					sb->s_frozen = SB_FREEZE_TRANS;
295 					bdev->bd_fsfreeze_count++;
296 					mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 					return error;
298 				}
299 			}
300 			sb->s_frozen = SB_UNFROZEN;
301 			smp_wmb();
302 			wake_up(&sb->s_wait_unfrozen);
303 		}
304 		drop_super(sb);
305 	}
306 
307 	up(&bdev->bd_mount_sem);
308 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 	return 0;
310 }
311 EXPORT_SYMBOL(thaw_bdev);
312 
313 /*
314  * Various filesystems appear to want __find_get_block to be non-blocking.
315  * But it's the page lock which protects the buffers.  To get around this,
316  * we get exclusion from try_to_free_buffers with the blockdev mapping's
317  * private_lock.
318  *
319  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
320  * may be quite high.  This code could TryLock the page, and if that
321  * succeeds, there is no need to take private_lock. (But if
322  * private_lock is contended then so is mapping->tree_lock).
323  */
324 static struct buffer_head *
325 __find_get_block_slow(struct block_device *bdev, sector_t block)
326 {
327 	struct inode *bd_inode = bdev->bd_inode;
328 	struct address_space *bd_mapping = bd_inode->i_mapping;
329 	struct buffer_head *ret = NULL;
330 	pgoff_t index;
331 	struct buffer_head *bh;
332 	struct buffer_head *head;
333 	struct page *page;
334 	int all_mapped = 1;
335 
336 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
337 	page = find_get_page(bd_mapping, index);
338 	if (!page)
339 		goto out;
340 
341 	spin_lock(&bd_mapping->private_lock);
342 	if (!page_has_buffers(page))
343 		goto out_unlock;
344 	head = page_buffers(page);
345 	bh = head;
346 	do {
347 		if (bh->b_blocknr == block) {
348 			ret = bh;
349 			get_bh(bh);
350 			goto out_unlock;
351 		}
352 		if (!buffer_mapped(bh))
353 			all_mapped = 0;
354 		bh = bh->b_this_page;
355 	} while (bh != head);
356 
357 	/* we might be here because some of the buffers on this page are
358 	 * not mapped.  This is due to various races between
359 	 * file io on the block device and getblk.  It gets dealt with
360 	 * elsewhere, don't buffer_error if we had some unmapped buffers
361 	 */
362 	if (all_mapped) {
363 		printk("__find_get_block_slow() failed. "
364 			"block=%llu, b_blocknr=%llu\n",
365 			(unsigned long long)block,
366 			(unsigned long long)bh->b_blocknr);
367 		printk("b_state=0x%08lx, b_size=%zu\n",
368 			bh->b_state, bh->b_size);
369 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
370 	}
371 out_unlock:
372 	spin_unlock(&bd_mapping->private_lock);
373 	page_cache_release(page);
374 out:
375 	return ret;
376 }
377 
378 /* If invalidate_buffers() will trash dirty buffers, it means some kind
379    of fs corruption is going on. Trashing dirty data always imply losing
380    information that was supposed to be just stored on the physical layer
381    by the user.
382 
383    Thus invalidate_buffers in general usage is not allwowed to trash
384    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
385    be preserved.  These buffers are simply skipped.
386 
387    We also skip buffers which are still in use.  For example this can
388    happen if a userspace program is reading the block device.
389 
390    NOTE: In the case where the user removed a removable-media-disk even if
391    there's still dirty data not synced on disk (due a bug in the device driver
392    or due an error of the user), by not destroying the dirty buffers we could
393    generate corruption also on the next media inserted, thus a parameter is
394    necessary to handle this case in the most safe way possible (trying
395    to not corrupt also the new disk inserted with the data belonging to
396    the old now corrupted disk). Also for the ramdisk the natural thing
397    to do in order to release the ramdisk memory is to destroy dirty buffers.
398 
399    These are two special cases. Normal usage imply the device driver
400    to issue a sync on the device (without waiting I/O completion) and
401    then an invalidate_buffers call that doesn't trash dirty buffers.
402 
403    For handling cache coherency with the blkdev pagecache the 'update' case
404    is been introduced. It is needed to re-read from disk any pinned
405    buffer. NOTE: re-reading from disk is destructive so we can do it only
406    when we assume nobody is changing the buffercache under our I/O and when
407    we think the disk contains more recent information than the buffercache.
408    The update == 1 pass marks the buffers we need to update, the update == 2
409    pass does the actual I/O. */
410 void invalidate_bdev(struct block_device *bdev)
411 {
412 	struct address_space *mapping = bdev->bd_inode->i_mapping;
413 
414 	if (mapping->nrpages == 0)
415 		return;
416 
417 	invalidate_bh_lrus();
418 	invalidate_mapping_pages(mapping, 0, -1);
419 }
420 
421 /*
422  * Kick pdflush then try to free up some ZONE_NORMAL memory.
423  */
424 static void free_more_memory(void)
425 {
426 	struct zone *zone;
427 	int nid;
428 
429 	wakeup_pdflush(1024);
430 	yield();
431 
432 	for_each_online_node(nid) {
433 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
434 						gfp_zone(GFP_NOFS), NULL,
435 						&zone);
436 		if (zone)
437 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
438 						GFP_NOFS);
439 	}
440 }
441 
442 /*
443  * I/O completion handler for block_read_full_page() - pages
444  * which come unlocked at the end of I/O.
445  */
446 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
447 {
448 	unsigned long flags;
449 	struct buffer_head *first;
450 	struct buffer_head *tmp;
451 	struct page *page;
452 	int page_uptodate = 1;
453 
454 	BUG_ON(!buffer_async_read(bh));
455 
456 	page = bh->b_page;
457 	if (uptodate) {
458 		set_buffer_uptodate(bh);
459 	} else {
460 		clear_buffer_uptodate(bh);
461 		if (!quiet_error(bh))
462 			buffer_io_error(bh);
463 		SetPageError(page);
464 	}
465 
466 	/*
467 	 * Be _very_ careful from here on. Bad things can happen if
468 	 * two buffer heads end IO at almost the same time and both
469 	 * decide that the page is now completely done.
470 	 */
471 	first = page_buffers(page);
472 	local_irq_save(flags);
473 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
474 	clear_buffer_async_read(bh);
475 	unlock_buffer(bh);
476 	tmp = bh;
477 	do {
478 		if (!buffer_uptodate(tmp))
479 			page_uptodate = 0;
480 		if (buffer_async_read(tmp)) {
481 			BUG_ON(!buffer_locked(tmp));
482 			goto still_busy;
483 		}
484 		tmp = tmp->b_this_page;
485 	} while (tmp != bh);
486 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
487 	local_irq_restore(flags);
488 
489 	/*
490 	 * If none of the buffers had errors and they are all
491 	 * uptodate then we can set the page uptodate.
492 	 */
493 	if (page_uptodate && !PageError(page))
494 		SetPageUptodate(page);
495 	unlock_page(page);
496 	return;
497 
498 still_busy:
499 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
500 	local_irq_restore(flags);
501 	return;
502 }
503 
504 /*
505  * Completion handler for block_write_full_page() - pages which are unlocked
506  * during I/O, and which have PageWriteback cleared upon I/O completion.
507  */
508 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
509 {
510 	char b[BDEVNAME_SIZE];
511 	unsigned long flags;
512 	struct buffer_head *first;
513 	struct buffer_head *tmp;
514 	struct page *page;
515 
516 	BUG_ON(!buffer_async_write(bh));
517 
518 	page = bh->b_page;
519 	if (uptodate) {
520 		set_buffer_uptodate(bh);
521 	} else {
522 		if (!quiet_error(bh)) {
523 			buffer_io_error(bh);
524 			printk(KERN_WARNING "lost page write due to "
525 					"I/O error on %s\n",
526 			       bdevname(bh->b_bdev, b));
527 		}
528 		set_bit(AS_EIO, &page->mapping->flags);
529 		set_buffer_write_io_error(bh);
530 		clear_buffer_uptodate(bh);
531 		SetPageError(page);
532 	}
533 
534 	first = page_buffers(page);
535 	local_irq_save(flags);
536 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
537 
538 	clear_buffer_async_write(bh);
539 	unlock_buffer(bh);
540 	tmp = bh->b_this_page;
541 	while (tmp != bh) {
542 		if (buffer_async_write(tmp)) {
543 			BUG_ON(!buffer_locked(tmp));
544 			goto still_busy;
545 		}
546 		tmp = tmp->b_this_page;
547 	}
548 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
549 	local_irq_restore(flags);
550 	end_page_writeback(page);
551 	return;
552 
553 still_busy:
554 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
555 	local_irq_restore(flags);
556 	return;
557 }
558 
559 /*
560  * If a page's buffers are under async readin (end_buffer_async_read
561  * completion) then there is a possibility that another thread of
562  * control could lock one of the buffers after it has completed
563  * but while some of the other buffers have not completed.  This
564  * locked buffer would confuse end_buffer_async_read() into not unlocking
565  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
566  * that this buffer is not under async I/O.
567  *
568  * The page comes unlocked when it has no locked buffer_async buffers
569  * left.
570  *
571  * PageLocked prevents anyone starting new async I/O reads any of
572  * the buffers.
573  *
574  * PageWriteback is used to prevent simultaneous writeout of the same
575  * page.
576  *
577  * PageLocked prevents anyone from starting writeback of a page which is
578  * under read I/O (PageWriteback is only ever set against a locked page).
579  */
580 static void mark_buffer_async_read(struct buffer_head *bh)
581 {
582 	bh->b_end_io = end_buffer_async_read;
583 	set_buffer_async_read(bh);
584 }
585 
586 void mark_buffer_async_write(struct buffer_head *bh)
587 {
588 	bh->b_end_io = end_buffer_async_write;
589 	set_buffer_async_write(bh);
590 }
591 EXPORT_SYMBOL(mark_buffer_async_write);
592 
593 
594 /*
595  * fs/buffer.c contains helper functions for buffer-backed address space's
596  * fsync functions.  A common requirement for buffer-based filesystems is
597  * that certain data from the backing blockdev needs to be written out for
598  * a successful fsync().  For example, ext2 indirect blocks need to be
599  * written back and waited upon before fsync() returns.
600  *
601  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
602  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
603  * management of a list of dependent buffers at ->i_mapping->private_list.
604  *
605  * Locking is a little subtle: try_to_free_buffers() will remove buffers
606  * from their controlling inode's queue when they are being freed.  But
607  * try_to_free_buffers() will be operating against the *blockdev* mapping
608  * at the time, not against the S_ISREG file which depends on those buffers.
609  * So the locking for private_list is via the private_lock in the address_space
610  * which backs the buffers.  Which is different from the address_space
611  * against which the buffers are listed.  So for a particular address_space,
612  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
613  * mapping->private_list will always be protected by the backing blockdev's
614  * ->private_lock.
615  *
616  * Which introduces a requirement: all buffers on an address_space's
617  * ->private_list must be from the same address_space: the blockdev's.
618  *
619  * address_spaces which do not place buffers at ->private_list via these
620  * utility functions are free to use private_lock and private_list for
621  * whatever they want.  The only requirement is that list_empty(private_list)
622  * be true at clear_inode() time.
623  *
624  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
625  * filesystems should do that.  invalidate_inode_buffers() should just go
626  * BUG_ON(!list_empty).
627  *
628  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
629  * take an address_space, not an inode.  And it should be called
630  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
631  * queued up.
632  *
633  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
634  * list if it is already on a list.  Because if the buffer is on a list,
635  * it *must* already be on the right one.  If not, the filesystem is being
636  * silly.  This will save a ton of locking.  But first we have to ensure
637  * that buffers are taken *off* the old inode's list when they are freed
638  * (presumably in truncate).  That requires careful auditing of all
639  * filesystems (do it inside bforget()).  It could also be done by bringing
640  * b_inode back.
641  */
642 
643 /*
644  * The buffer's backing address_space's private_lock must be held
645  */
646 static void __remove_assoc_queue(struct buffer_head *bh)
647 {
648 	list_del_init(&bh->b_assoc_buffers);
649 	WARN_ON(!bh->b_assoc_map);
650 	if (buffer_write_io_error(bh))
651 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
652 	bh->b_assoc_map = NULL;
653 }
654 
655 int inode_has_buffers(struct inode *inode)
656 {
657 	return !list_empty(&inode->i_data.private_list);
658 }
659 
660 /*
661  * osync is designed to support O_SYNC io.  It waits synchronously for
662  * all already-submitted IO to complete, but does not queue any new
663  * writes to the disk.
664  *
665  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
666  * you dirty the buffers, and then use osync_inode_buffers to wait for
667  * completion.  Any other dirty buffers which are not yet queued for
668  * write will not be flushed to disk by the osync.
669  */
670 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
671 {
672 	struct buffer_head *bh;
673 	struct list_head *p;
674 	int err = 0;
675 
676 	spin_lock(lock);
677 repeat:
678 	list_for_each_prev(p, list) {
679 		bh = BH_ENTRY(p);
680 		if (buffer_locked(bh)) {
681 			get_bh(bh);
682 			spin_unlock(lock);
683 			wait_on_buffer(bh);
684 			if (!buffer_uptodate(bh))
685 				err = -EIO;
686 			brelse(bh);
687 			spin_lock(lock);
688 			goto repeat;
689 		}
690 	}
691 	spin_unlock(lock);
692 	return err;
693 }
694 
695 /**
696  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
697  * @mapping: the mapping which wants those buffers written
698  *
699  * Starts I/O against the buffers at mapping->private_list, and waits upon
700  * that I/O.
701  *
702  * Basically, this is a convenience function for fsync().
703  * @mapping is a file or directory which needs those buffers to be written for
704  * a successful fsync().
705  */
706 int sync_mapping_buffers(struct address_space *mapping)
707 {
708 	struct address_space *buffer_mapping = mapping->assoc_mapping;
709 
710 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
711 		return 0;
712 
713 	return fsync_buffers_list(&buffer_mapping->private_lock,
714 					&mapping->private_list);
715 }
716 EXPORT_SYMBOL(sync_mapping_buffers);
717 
718 /*
719  * Called when we've recently written block `bblock', and it is known that
720  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
721  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
722  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
723  */
724 void write_boundary_block(struct block_device *bdev,
725 			sector_t bblock, unsigned blocksize)
726 {
727 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
728 	if (bh) {
729 		if (buffer_dirty(bh))
730 			ll_rw_block(WRITE, 1, &bh);
731 		put_bh(bh);
732 	}
733 }
734 
735 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
736 {
737 	struct address_space *mapping = inode->i_mapping;
738 	struct address_space *buffer_mapping = bh->b_page->mapping;
739 
740 	mark_buffer_dirty(bh);
741 	if (!mapping->assoc_mapping) {
742 		mapping->assoc_mapping = buffer_mapping;
743 	} else {
744 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
745 	}
746 	if (!bh->b_assoc_map) {
747 		spin_lock(&buffer_mapping->private_lock);
748 		list_move_tail(&bh->b_assoc_buffers,
749 				&mapping->private_list);
750 		bh->b_assoc_map = mapping;
751 		spin_unlock(&buffer_mapping->private_lock);
752 	}
753 }
754 EXPORT_SYMBOL(mark_buffer_dirty_inode);
755 
756 /*
757  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
758  * dirty.
759  *
760  * If warn is true, then emit a warning if the page is not uptodate and has
761  * not been truncated.
762  */
763 static int __set_page_dirty(struct page *page,
764 		struct address_space *mapping, int warn)
765 {
766 	if (unlikely(!mapping))
767 		return !TestSetPageDirty(page);
768 
769 	if (TestSetPageDirty(page))
770 		return 0;
771 
772 	spin_lock_irq(&mapping->tree_lock);
773 	if (page->mapping) {	/* Race with truncate? */
774 		WARN_ON_ONCE(warn && !PageUptodate(page));
775 
776 		if (mapping_cap_account_dirty(mapping)) {
777 			__inc_zone_page_state(page, NR_FILE_DIRTY);
778 			__inc_bdi_stat(mapping->backing_dev_info,
779 					BDI_RECLAIMABLE);
780 			task_dirty_inc(current);
781 			task_io_account_write(PAGE_CACHE_SIZE);
782 		}
783 		radix_tree_tag_set(&mapping->page_tree,
784 				page_index(page), PAGECACHE_TAG_DIRTY);
785 	}
786 	spin_unlock_irq(&mapping->tree_lock);
787 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
788 
789 	return 1;
790 }
791 
792 /*
793  * Add a page to the dirty page list.
794  *
795  * It is a sad fact of life that this function is called from several places
796  * deeply under spinlocking.  It may not sleep.
797  *
798  * If the page has buffers, the uptodate buffers are set dirty, to preserve
799  * dirty-state coherency between the page and the buffers.  It the page does
800  * not have buffers then when they are later attached they will all be set
801  * dirty.
802  *
803  * The buffers are dirtied before the page is dirtied.  There's a small race
804  * window in which a writepage caller may see the page cleanness but not the
805  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
806  * before the buffers, a concurrent writepage caller could clear the page dirty
807  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
808  * page on the dirty page list.
809  *
810  * We use private_lock to lock against try_to_free_buffers while using the
811  * page's buffer list.  Also use this to protect against clean buffers being
812  * added to the page after it was set dirty.
813  *
814  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
815  * address_space though.
816  */
817 int __set_page_dirty_buffers(struct page *page)
818 {
819 	struct address_space *mapping = page_mapping(page);
820 
821 	if (unlikely(!mapping))
822 		return !TestSetPageDirty(page);
823 
824 	spin_lock(&mapping->private_lock);
825 	if (page_has_buffers(page)) {
826 		struct buffer_head *head = page_buffers(page);
827 		struct buffer_head *bh = head;
828 
829 		do {
830 			set_buffer_dirty(bh);
831 			bh = bh->b_this_page;
832 		} while (bh != head);
833 	}
834 	spin_unlock(&mapping->private_lock);
835 
836 	return __set_page_dirty(page, mapping, 1);
837 }
838 EXPORT_SYMBOL(__set_page_dirty_buffers);
839 
840 /*
841  * Write out and wait upon a list of buffers.
842  *
843  * We have conflicting pressures: we want to make sure that all
844  * initially dirty buffers get waited on, but that any subsequently
845  * dirtied buffers don't.  After all, we don't want fsync to last
846  * forever if somebody is actively writing to the file.
847  *
848  * Do this in two main stages: first we copy dirty buffers to a
849  * temporary inode list, queueing the writes as we go.  Then we clean
850  * up, waiting for those writes to complete.
851  *
852  * During this second stage, any subsequent updates to the file may end
853  * up refiling the buffer on the original inode's dirty list again, so
854  * there is a chance we will end up with a buffer queued for write but
855  * not yet completed on that list.  So, as a final cleanup we go through
856  * the osync code to catch these locked, dirty buffers without requeuing
857  * any newly dirty buffers for write.
858  */
859 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
860 {
861 	struct buffer_head *bh;
862 	struct list_head tmp;
863 	struct address_space *mapping;
864 	int err = 0, err2;
865 
866 	INIT_LIST_HEAD(&tmp);
867 
868 	spin_lock(lock);
869 	while (!list_empty(list)) {
870 		bh = BH_ENTRY(list->next);
871 		mapping = bh->b_assoc_map;
872 		__remove_assoc_queue(bh);
873 		/* Avoid race with mark_buffer_dirty_inode() which does
874 		 * a lockless check and we rely on seeing the dirty bit */
875 		smp_mb();
876 		if (buffer_dirty(bh) || buffer_locked(bh)) {
877 			list_add(&bh->b_assoc_buffers, &tmp);
878 			bh->b_assoc_map = mapping;
879 			if (buffer_dirty(bh)) {
880 				get_bh(bh);
881 				spin_unlock(lock);
882 				/*
883 				 * Ensure any pending I/O completes so that
884 				 * ll_rw_block() actually writes the current
885 				 * contents - it is a noop if I/O is still in
886 				 * flight on potentially older contents.
887 				 */
888 				ll_rw_block(SWRITE_SYNC, 1, &bh);
889 				brelse(bh);
890 				spin_lock(lock);
891 			}
892 		}
893 	}
894 
895 	while (!list_empty(&tmp)) {
896 		bh = BH_ENTRY(tmp.prev);
897 		get_bh(bh);
898 		mapping = bh->b_assoc_map;
899 		__remove_assoc_queue(bh);
900 		/* Avoid race with mark_buffer_dirty_inode() which does
901 		 * a lockless check and we rely on seeing the dirty bit */
902 		smp_mb();
903 		if (buffer_dirty(bh)) {
904 			list_add(&bh->b_assoc_buffers,
905 				 &mapping->private_list);
906 			bh->b_assoc_map = mapping;
907 		}
908 		spin_unlock(lock);
909 		wait_on_buffer(bh);
910 		if (!buffer_uptodate(bh))
911 			err = -EIO;
912 		brelse(bh);
913 		spin_lock(lock);
914 	}
915 
916 	spin_unlock(lock);
917 	err2 = osync_buffers_list(lock, list);
918 	if (err)
919 		return err;
920 	else
921 		return err2;
922 }
923 
924 /*
925  * Invalidate any and all dirty buffers on a given inode.  We are
926  * probably unmounting the fs, but that doesn't mean we have already
927  * done a sync().  Just drop the buffers from the inode list.
928  *
929  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
930  * assumes that all the buffers are against the blockdev.  Not true
931  * for reiserfs.
932  */
933 void invalidate_inode_buffers(struct inode *inode)
934 {
935 	if (inode_has_buffers(inode)) {
936 		struct address_space *mapping = &inode->i_data;
937 		struct list_head *list = &mapping->private_list;
938 		struct address_space *buffer_mapping = mapping->assoc_mapping;
939 
940 		spin_lock(&buffer_mapping->private_lock);
941 		while (!list_empty(list))
942 			__remove_assoc_queue(BH_ENTRY(list->next));
943 		spin_unlock(&buffer_mapping->private_lock);
944 	}
945 }
946 EXPORT_SYMBOL(invalidate_inode_buffers);
947 
948 /*
949  * Remove any clean buffers from the inode's buffer list.  This is called
950  * when we're trying to free the inode itself.  Those buffers can pin it.
951  *
952  * Returns true if all buffers were removed.
953  */
954 int remove_inode_buffers(struct inode *inode)
955 {
956 	int ret = 1;
957 
958 	if (inode_has_buffers(inode)) {
959 		struct address_space *mapping = &inode->i_data;
960 		struct list_head *list = &mapping->private_list;
961 		struct address_space *buffer_mapping = mapping->assoc_mapping;
962 
963 		spin_lock(&buffer_mapping->private_lock);
964 		while (!list_empty(list)) {
965 			struct buffer_head *bh = BH_ENTRY(list->next);
966 			if (buffer_dirty(bh)) {
967 				ret = 0;
968 				break;
969 			}
970 			__remove_assoc_queue(bh);
971 		}
972 		spin_unlock(&buffer_mapping->private_lock);
973 	}
974 	return ret;
975 }
976 
977 /*
978  * Create the appropriate buffers when given a page for data area and
979  * the size of each buffer.. Use the bh->b_this_page linked list to
980  * follow the buffers created.  Return NULL if unable to create more
981  * buffers.
982  *
983  * The retry flag is used to differentiate async IO (paging, swapping)
984  * which may not fail from ordinary buffer allocations.
985  */
986 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
987 		int retry)
988 {
989 	struct buffer_head *bh, *head;
990 	long offset;
991 
992 try_again:
993 	head = NULL;
994 	offset = PAGE_SIZE;
995 	while ((offset -= size) >= 0) {
996 		bh = alloc_buffer_head(GFP_NOFS);
997 		if (!bh)
998 			goto no_grow;
999 
1000 		bh->b_bdev = NULL;
1001 		bh->b_this_page = head;
1002 		bh->b_blocknr = -1;
1003 		head = bh;
1004 
1005 		bh->b_state = 0;
1006 		atomic_set(&bh->b_count, 0);
1007 		bh->b_private = NULL;
1008 		bh->b_size = size;
1009 
1010 		/* Link the buffer to its page */
1011 		set_bh_page(bh, page, offset);
1012 
1013 		init_buffer(bh, NULL, NULL);
1014 	}
1015 	return head;
1016 /*
1017  * In case anything failed, we just free everything we got.
1018  */
1019 no_grow:
1020 	if (head) {
1021 		do {
1022 			bh = head;
1023 			head = head->b_this_page;
1024 			free_buffer_head(bh);
1025 		} while (head);
1026 	}
1027 
1028 	/*
1029 	 * Return failure for non-async IO requests.  Async IO requests
1030 	 * are not allowed to fail, so we have to wait until buffer heads
1031 	 * become available.  But we don't want tasks sleeping with
1032 	 * partially complete buffers, so all were released above.
1033 	 */
1034 	if (!retry)
1035 		return NULL;
1036 
1037 	/* We're _really_ low on memory. Now we just
1038 	 * wait for old buffer heads to become free due to
1039 	 * finishing IO.  Since this is an async request and
1040 	 * the reserve list is empty, we're sure there are
1041 	 * async buffer heads in use.
1042 	 */
1043 	free_more_memory();
1044 	goto try_again;
1045 }
1046 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1047 
1048 static inline void
1049 link_dev_buffers(struct page *page, struct buffer_head *head)
1050 {
1051 	struct buffer_head *bh, *tail;
1052 
1053 	bh = head;
1054 	do {
1055 		tail = bh;
1056 		bh = bh->b_this_page;
1057 	} while (bh);
1058 	tail->b_this_page = head;
1059 	attach_page_buffers(page, head);
1060 }
1061 
1062 /*
1063  * Initialise the state of a blockdev page's buffers.
1064  */
1065 static void
1066 init_page_buffers(struct page *page, struct block_device *bdev,
1067 			sector_t block, int size)
1068 {
1069 	struct buffer_head *head = page_buffers(page);
1070 	struct buffer_head *bh = head;
1071 	int uptodate = PageUptodate(page);
1072 
1073 	do {
1074 		if (!buffer_mapped(bh)) {
1075 			init_buffer(bh, NULL, NULL);
1076 			bh->b_bdev = bdev;
1077 			bh->b_blocknr = block;
1078 			if (uptodate)
1079 				set_buffer_uptodate(bh);
1080 			set_buffer_mapped(bh);
1081 		}
1082 		block++;
1083 		bh = bh->b_this_page;
1084 	} while (bh != head);
1085 }
1086 
1087 /*
1088  * Create the page-cache page that contains the requested block.
1089  *
1090  * This is user purely for blockdev mappings.
1091  */
1092 static struct page *
1093 grow_dev_page(struct block_device *bdev, sector_t block,
1094 		pgoff_t index, int size)
1095 {
1096 	struct inode *inode = bdev->bd_inode;
1097 	struct page *page;
1098 	struct buffer_head *bh;
1099 
1100 	page = find_or_create_page(inode->i_mapping, index,
1101 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1102 	if (!page)
1103 		return NULL;
1104 
1105 	BUG_ON(!PageLocked(page));
1106 
1107 	if (page_has_buffers(page)) {
1108 		bh = page_buffers(page);
1109 		if (bh->b_size == size) {
1110 			init_page_buffers(page, bdev, block, size);
1111 			return page;
1112 		}
1113 		if (!try_to_free_buffers(page))
1114 			goto failed;
1115 	}
1116 
1117 	/*
1118 	 * Allocate some buffers for this page
1119 	 */
1120 	bh = alloc_page_buffers(page, size, 0);
1121 	if (!bh)
1122 		goto failed;
1123 
1124 	/*
1125 	 * Link the page to the buffers and initialise them.  Take the
1126 	 * lock to be atomic wrt __find_get_block(), which does not
1127 	 * run under the page lock.
1128 	 */
1129 	spin_lock(&inode->i_mapping->private_lock);
1130 	link_dev_buffers(page, bh);
1131 	init_page_buffers(page, bdev, block, size);
1132 	spin_unlock(&inode->i_mapping->private_lock);
1133 	return page;
1134 
1135 failed:
1136 	BUG();
1137 	unlock_page(page);
1138 	page_cache_release(page);
1139 	return NULL;
1140 }
1141 
1142 /*
1143  * Create buffers for the specified block device block's page.  If
1144  * that page was dirty, the buffers are set dirty also.
1145  */
1146 static int
1147 grow_buffers(struct block_device *bdev, sector_t block, int size)
1148 {
1149 	struct page *page;
1150 	pgoff_t index;
1151 	int sizebits;
1152 
1153 	sizebits = -1;
1154 	do {
1155 		sizebits++;
1156 	} while ((size << sizebits) < PAGE_SIZE);
1157 
1158 	index = block >> sizebits;
1159 
1160 	/*
1161 	 * Check for a block which wants to lie outside our maximum possible
1162 	 * pagecache index.  (this comparison is done using sector_t types).
1163 	 */
1164 	if (unlikely(index != block >> sizebits)) {
1165 		char b[BDEVNAME_SIZE];
1166 
1167 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1168 			"device %s\n",
1169 			__func__, (unsigned long long)block,
1170 			bdevname(bdev, b));
1171 		return -EIO;
1172 	}
1173 	block = index << sizebits;
1174 	/* Create a page with the proper size buffers.. */
1175 	page = grow_dev_page(bdev, block, index, size);
1176 	if (!page)
1177 		return 0;
1178 	unlock_page(page);
1179 	page_cache_release(page);
1180 	return 1;
1181 }
1182 
1183 static struct buffer_head *
1184 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1185 {
1186 	/* Size must be multiple of hard sectorsize */
1187 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1188 			(size < 512 || size > PAGE_SIZE))) {
1189 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1190 					size);
1191 		printk(KERN_ERR "hardsect size: %d\n",
1192 					bdev_hardsect_size(bdev));
1193 
1194 		dump_stack();
1195 		return NULL;
1196 	}
1197 
1198 	for (;;) {
1199 		struct buffer_head * bh;
1200 		int ret;
1201 
1202 		bh = __find_get_block(bdev, block, size);
1203 		if (bh)
1204 			return bh;
1205 
1206 		ret = grow_buffers(bdev, block, size);
1207 		if (ret < 0)
1208 			return NULL;
1209 		if (ret == 0)
1210 			free_more_memory();
1211 	}
1212 }
1213 
1214 /*
1215  * The relationship between dirty buffers and dirty pages:
1216  *
1217  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1218  * the page is tagged dirty in its radix tree.
1219  *
1220  * At all times, the dirtiness of the buffers represents the dirtiness of
1221  * subsections of the page.  If the page has buffers, the page dirty bit is
1222  * merely a hint about the true dirty state.
1223  *
1224  * When a page is set dirty in its entirety, all its buffers are marked dirty
1225  * (if the page has buffers).
1226  *
1227  * When a buffer is marked dirty, its page is dirtied, but the page's other
1228  * buffers are not.
1229  *
1230  * Also.  When blockdev buffers are explicitly read with bread(), they
1231  * individually become uptodate.  But their backing page remains not
1232  * uptodate - even if all of its buffers are uptodate.  A subsequent
1233  * block_read_full_page() against that page will discover all the uptodate
1234  * buffers, will set the page uptodate and will perform no I/O.
1235  */
1236 
1237 /**
1238  * mark_buffer_dirty - mark a buffer_head as needing writeout
1239  * @bh: the buffer_head to mark dirty
1240  *
1241  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1242  * backing page dirty, then tag the page as dirty in its address_space's radix
1243  * tree and then attach the address_space's inode to its superblock's dirty
1244  * inode list.
1245  *
1246  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1247  * mapping->tree_lock and the global inode_lock.
1248  */
1249 void mark_buffer_dirty(struct buffer_head *bh)
1250 {
1251 	WARN_ON_ONCE(!buffer_uptodate(bh));
1252 
1253 	/*
1254 	 * Very *carefully* optimize the it-is-already-dirty case.
1255 	 *
1256 	 * Don't let the final "is it dirty" escape to before we
1257 	 * perhaps modified the buffer.
1258 	 */
1259 	if (buffer_dirty(bh)) {
1260 		smp_mb();
1261 		if (buffer_dirty(bh))
1262 			return;
1263 	}
1264 
1265 	if (!test_set_buffer_dirty(bh))
1266 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1267 }
1268 
1269 /*
1270  * Decrement a buffer_head's reference count.  If all buffers against a page
1271  * have zero reference count, are clean and unlocked, and if the page is clean
1272  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1273  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1274  * a page but it ends up not being freed, and buffers may later be reattached).
1275  */
1276 void __brelse(struct buffer_head * buf)
1277 {
1278 	if (atomic_read(&buf->b_count)) {
1279 		put_bh(buf);
1280 		return;
1281 	}
1282 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1283 }
1284 
1285 /*
1286  * bforget() is like brelse(), except it discards any
1287  * potentially dirty data.
1288  */
1289 void __bforget(struct buffer_head *bh)
1290 {
1291 	clear_buffer_dirty(bh);
1292 	if (bh->b_assoc_map) {
1293 		struct address_space *buffer_mapping = bh->b_page->mapping;
1294 
1295 		spin_lock(&buffer_mapping->private_lock);
1296 		list_del_init(&bh->b_assoc_buffers);
1297 		bh->b_assoc_map = NULL;
1298 		spin_unlock(&buffer_mapping->private_lock);
1299 	}
1300 	__brelse(bh);
1301 }
1302 
1303 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1304 {
1305 	lock_buffer(bh);
1306 	if (buffer_uptodate(bh)) {
1307 		unlock_buffer(bh);
1308 		return bh;
1309 	} else {
1310 		get_bh(bh);
1311 		bh->b_end_io = end_buffer_read_sync;
1312 		submit_bh(READ, bh);
1313 		wait_on_buffer(bh);
1314 		if (buffer_uptodate(bh))
1315 			return bh;
1316 	}
1317 	brelse(bh);
1318 	return NULL;
1319 }
1320 
1321 /*
1322  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1323  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1324  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1325  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1326  * CPU's LRUs at the same time.
1327  *
1328  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1329  * sb_find_get_block().
1330  *
1331  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1332  * a local interrupt disable for that.
1333  */
1334 
1335 #define BH_LRU_SIZE	8
1336 
1337 struct bh_lru {
1338 	struct buffer_head *bhs[BH_LRU_SIZE];
1339 };
1340 
1341 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1342 
1343 #ifdef CONFIG_SMP
1344 #define bh_lru_lock()	local_irq_disable()
1345 #define bh_lru_unlock()	local_irq_enable()
1346 #else
1347 #define bh_lru_lock()	preempt_disable()
1348 #define bh_lru_unlock()	preempt_enable()
1349 #endif
1350 
1351 static inline void check_irqs_on(void)
1352 {
1353 #ifdef irqs_disabled
1354 	BUG_ON(irqs_disabled());
1355 #endif
1356 }
1357 
1358 /*
1359  * The LRU management algorithm is dopey-but-simple.  Sorry.
1360  */
1361 static void bh_lru_install(struct buffer_head *bh)
1362 {
1363 	struct buffer_head *evictee = NULL;
1364 	struct bh_lru *lru;
1365 
1366 	check_irqs_on();
1367 	bh_lru_lock();
1368 	lru = &__get_cpu_var(bh_lrus);
1369 	if (lru->bhs[0] != bh) {
1370 		struct buffer_head *bhs[BH_LRU_SIZE];
1371 		int in;
1372 		int out = 0;
1373 
1374 		get_bh(bh);
1375 		bhs[out++] = bh;
1376 		for (in = 0; in < BH_LRU_SIZE; in++) {
1377 			struct buffer_head *bh2 = lru->bhs[in];
1378 
1379 			if (bh2 == bh) {
1380 				__brelse(bh2);
1381 			} else {
1382 				if (out >= BH_LRU_SIZE) {
1383 					BUG_ON(evictee != NULL);
1384 					evictee = bh2;
1385 				} else {
1386 					bhs[out++] = bh2;
1387 				}
1388 			}
1389 		}
1390 		while (out < BH_LRU_SIZE)
1391 			bhs[out++] = NULL;
1392 		memcpy(lru->bhs, bhs, sizeof(bhs));
1393 	}
1394 	bh_lru_unlock();
1395 
1396 	if (evictee)
1397 		__brelse(evictee);
1398 }
1399 
1400 /*
1401  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1402  */
1403 static struct buffer_head *
1404 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1405 {
1406 	struct buffer_head *ret = NULL;
1407 	struct bh_lru *lru;
1408 	unsigned int i;
1409 
1410 	check_irqs_on();
1411 	bh_lru_lock();
1412 	lru = &__get_cpu_var(bh_lrus);
1413 	for (i = 0; i < BH_LRU_SIZE; i++) {
1414 		struct buffer_head *bh = lru->bhs[i];
1415 
1416 		if (bh && bh->b_bdev == bdev &&
1417 				bh->b_blocknr == block && bh->b_size == size) {
1418 			if (i) {
1419 				while (i) {
1420 					lru->bhs[i] = lru->bhs[i - 1];
1421 					i--;
1422 				}
1423 				lru->bhs[0] = bh;
1424 			}
1425 			get_bh(bh);
1426 			ret = bh;
1427 			break;
1428 		}
1429 	}
1430 	bh_lru_unlock();
1431 	return ret;
1432 }
1433 
1434 /*
1435  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1436  * it in the LRU and mark it as accessed.  If it is not present then return
1437  * NULL
1438  */
1439 struct buffer_head *
1440 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1441 {
1442 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1443 
1444 	if (bh == NULL) {
1445 		bh = __find_get_block_slow(bdev, block);
1446 		if (bh)
1447 			bh_lru_install(bh);
1448 	}
1449 	if (bh)
1450 		touch_buffer(bh);
1451 	return bh;
1452 }
1453 EXPORT_SYMBOL(__find_get_block);
1454 
1455 /*
1456  * __getblk will locate (and, if necessary, create) the buffer_head
1457  * which corresponds to the passed block_device, block and size. The
1458  * returned buffer has its reference count incremented.
1459  *
1460  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1461  * illegal block number, __getblk() will happily return a buffer_head
1462  * which represents the non-existent block.  Very weird.
1463  *
1464  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1465  * attempt is failing.  FIXME, perhaps?
1466  */
1467 struct buffer_head *
1468 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1469 {
1470 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1471 
1472 	might_sleep();
1473 	if (bh == NULL)
1474 		bh = __getblk_slow(bdev, block, size);
1475 	return bh;
1476 }
1477 EXPORT_SYMBOL(__getblk);
1478 
1479 /*
1480  * Do async read-ahead on a buffer..
1481  */
1482 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1483 {
1484 	struct buffer_head *bh = __getblk(bdev, block, size);
1485 	if (likely(bh)) {
1486 		ll_rw_block(READA, 1, &bh);
1487 		brelse(bh);
1488 	}
1489 }
1490 EXPORT_SYMBOL(__breadahead);
1491 
1492 /**
1493  *  __bread() - reads a specified block and returns the bh
1494  *  @bdev: the block_device to read from
1495  *  @block: number of block
1496  *  @size: size (in bytes) to read
1497  *
1498  *  Reads a specified block, and returns buffer head that contains it.
1499  *  It returns NULL if the block was unreadable.
1500  */
1501 struct buffer_head *
1502 __bread(struct block_device *bdev, sector_t block, unsigned size)
1503 {
1504 	struct buffer_head *bh = __getblk(bdev, block, size);
1505 
1506 	if (likely(bh) && !buffer_uptodate(bh))
1507 		bh = __bread_slow(bh);
1508 	return bh;
1509 }
1510 EXPORT_SYMBOL(__bread);
1511 
1512 /*
1513  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1514  * This doesn't race because it runs in each cpu either in irq
1515  * or with preempt disabled.
1516  */
1517 static void invalidate_bh_lru(void *arg)
1518 {
1519 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1520 	int i;
1521 
1522 	for (i = 0; i < BH_LRU_SIZE; i++) {
1523 		brelse(b->bhs[i]);
1524 		b->bhs[i] = NULL;
1525 	}
1526 	put_cpu_var(bh_lrus);
1527 }
1528 
1529 void invalidate_bh_lrus(void)
1530 {
1531 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1532 }
1533 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1534 
1535 void set_bh_page(struct buffer_head *bh,
1536 		struct page *page, unsigned long offset)
1537 {
1538 	bh->b_page = page;
1539 	BUG_ON(offset >= PAGE_SIZE);
1540 	if (PageHighMem(page))
1541 		/*
1542 		 * This catches illegal uses and preserves the offset:
1543 		 */
1544 		bh->b_data = (char *)(0 + offset);
1545 	else
1546 		bh->b_data = page_address(page) + offset;
1547 }
1548 EXPORT_SYMBOL(set_bh_page);
1549 
1550 /*
1551  * Called when truncating a buffer on a page completely.
1552  */
1553 static void discard_buffer(struct buffer_head * bh)
1554 {
1555 	lock_buffer(bh);
1556 	clear_buffer_dirty(bh);
1557 	bh->b_bdev = NULL;
1558 	clear_buffer_mapped(bh);
1559 	clear_buffer_req(bh);
1560 	clear_buffer_new(bh);
1561 	clear_buffer_delay(bh);
1562 	clear_buffer_unwritten(bh);
1563 	unlock_buffer(bh);
1564 }
1565 
1566 /**
1567  * block_invalidatepage - invalidate part of all of a buffer-backed page
1568  *
1569  * @page: the page which is affected
1570  * @offset: the index of the truncation point
1571  *
1572  * block_invalidatepage() is called when all or part of the page has become
1573  * invalidatedby a truncate operation.
1574  *
1575  * block_invalidatepage() does not have to release all buffers, but it must
1576  * ensure that no dirty buffer is left outside @offset and that no I/O
1577  * is underway against any of the blocks which are outside the truncation
1578  * point.  Because the caller is about to free (and possibly reuse) those
1579  * blocks on-disk.
1580  */
1581 void block_invalidatepage(struct page *page, unsigned long offset)
1582 {
1583 	struct buffer_head *head, *bh, *next;
1584 	unsigned int curr_off = 0;
1585 
1586 	BUG_ON(!PageLocked(page));
1587 	if (!page_has_buffers(page))
1588 		goto out;
1589 
1590 	head = page_buffers(page);
1591 	bh = head;
1592 	do {
1593 		unsigned int next_off = curr_off + bh->b_size;
1594 		next = bh->b_this_page;
1595 
1596 		/*
1597 		 * is this block fully invalidated?
1598 		 */
1599 		if (offset <= curr_off)
1600 			discard_buffer(bh);
1601 		curr_off = next_off;
1602 		bh = next;
1603 	} while (bh != head);
1604 
1605 	/*
1606 	 * We release buffers only if the entire page is being invalidated.
1607 	 * The get_block cached value has been unconditionally invalidated,
1608 	 * so real IO is not possible anymore.
1609 	 */
1610 	if (offset == 0)
1611 		try_to_release_page(page, 0);
1612 out:
1613 	return;
1614 }
1615 EXPORT_SYMBOL(block_invalidatepage);
1616 
1617 /*
1618  * We attach and possibly dirty the buffers atomically wrt
1619  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1620  * is already excluded via the page lock.
1621  */
1622 void create_empty_buffers(struct page *page,
1623 			unsigned long blocksize, unsigned long b_state)
1624 {
1625 	struct buffer_head *bh, *head, *tail;
1626 
1627 	head = alloc_page_buffers(page, blocksize, 1);
1628 	bh = head;
1629 	do {
1630 		bh->b_state |= b_state;
1631 		tail = bh;
1632 		bh = bh->b_this_page;
1633 	} while (bh);
1634 	tail->b_this_page = head;
1635 
1636 	spin_lock(&page->mapping->private_lock);
1637 	if (PageUptodate(page) || PageDirty(page)) {
1638 		bh = head;
1639 		do {
1640 			if (PageDirty(page))
1641 				set_buffer_dirty(bh);
1642 			if (PageUptodate(page))
1643 				set_buffer_uptodate(bh);
1644 			bh = bh->b_this_page;
1645 		} while (bh != head);
1646 	}
1647 	attach_page_buffers(page, head);
1648 	spin_unlock(&page->mapping->private_lock);
1649 }
1650 EXPORT_SYMBOL(create_empty_buffers);
1651 
1652 /*
1653  * We are taking a block for data and we don't want any output from any
1654  * buffer-cache aliases starting from return from that function and
1655  * until the moment when something will explicitly mark the buffer
1656  * dirty (hopefully that will not happen until we will free that block ;-)
1657  * We don't even need to mark it not-uptodate - nobody can expect
1658  * anything from a newly allocated buffer anyway. We used to used
1659  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1660  * don't want to mark the alias unmapped, for example - it would confuse
1661  * anyone who might pick it with bread() afterwards...
1662  *
1663  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1664  * be writeout I/O going on against recently-freed buffers.  We don't
1665  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1666  * only if we really need to.  That happens here.
1667  */
1668 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1669 {
1670 	struct buffer_head *old_bh;
1671 
1672 	might_sleep();
1673 
1674 	old_bh = __find_get_block_slow(bdev, block);
1675 	if (old_bh) {
1676 		clear_buffer_dirty(old_bh);
1677 		wait_on_buffer(old_bh);
1678 		clear_buffer_req(old_bh);
1679 		__brelse(old_bh);
1680 	}
1681 }
1682 EXPORT_SYMBOL(unmap_underlying_metadata);
1683 
1684 /*
1685  * NOTE! All mapped/uptodate combinations are valid:
1686  *
1687  *	Mapped	Uptodate	Meaning
1688  *
1689  *	No	No		"unknown" - must do get_block()
1690  *	No	Yes		"hole" - zero-filled
1691  *	Yes	No		"allocated" - allocated on disk, not read in
1692  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1693  *
1694  * "Dirty" is valid only with the last case (mapped+uptodate).
1695  */
1696 
1697 /*
1698  * While block_write_full_page is writing back the dirty buffers under
1699  * the page lock, whoever dirtied the buffers may decide to clean them
1700  * again at any time.  We handle that by only looking at the buffer
1701  * state inside lock_buffer().
1702  *
1703  * If block_write_full_page() is called for regular writeback
1704  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1705  * locked buffer.   This only can happen if someone has written the buffer
1706  * directly, with submit_bh().  At the address_space level PageWriteback
1707  * prevents this contention from occurring.
1708  */
1709 static int __block_write_full_page(struct inode *inode, struct page *page,
1710 			get_block_t *get_block, struct writeback_control *wbc)
1711 {
1712 	int err;
1713 	sector_t block;
1714 	sector_t last_block;
1715 	struct buffer_head *bh, *head;
1716 	const unsigned blocksize = 1 << inode->i_blkbits;
1717 	int nr_underway = 0;
1718 
1719 	BUG_ON(!PageLocked(page));
1720 
1721 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1722 
1723 	if (!page_has_buffers(page)) {
1724 		create_empty_buffers(page, blocksize,
1725 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1726 	}
1727 
1728 	/*
1729 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1730 	 * here, and the (potentially unmapped) buffers may become dirty at
1731 	 * any time.  If a buffer becomes dirty here after we've inspected it
1732 	 * then we just miss that fact, and the page stays dirty.
1733 	 *
1734 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1735 	 * handle that here by just cleaning them.
1736 	 */
1737 
1738 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1739 	head = page_buffers(page);
1740 	bh = head;
1741 
1742 	/*
1743 	 * Get all the dirty buffers mapped to disk addresses and
1744 	 * handle any aliases from the underlying blockdev's mapping.
1745 	 */
1746 	do {
1747 		if (block > last_block) {
1748 			/*
1749 			 * mapped buffers outside i_size will occur, because
1750 			 * this page can be outside i_size when there is a
1751 			 * truncate in progress.
1752 			 */
1753 			/*
1754 			 * The buffer was zeroed by block_write_full_page()
1755 			 */
1756 			clear_buffer_dirty(bh);
1757 			set_buffer_uptodate(bh);
1758 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1759 			   buffer_dirty(bh)) {
1760 			WARN_ON(bh->b_size != blocksize);
1761 			err = get_block(inode, block, bh, 1);
1762 			if (err)
1763 				goto recover;
1764 			clear_buffer_delay(bh);
1765 			if (buffer_new(bh)) {
1766 				/* blockdev mappings never come here */
1767 				clear_buffer_new(bh);
1768 				unmap_underlying_metadata(bh->b_bdev,
1769 							bh->b_blocknr);
1770 			}
1771 		}
1772 		bh = bh->b_this_page;
1773 		block++;
1774 	} while (bh != head);
1775 
1776 	do {
1777 		if (!buffer_mapped(bh))
1778 			continue;
1779 		/*
1780 		 * If it's a fully non-blocking write attempt and we cannot
1781 		 * lock the buffer then redirty the page.  Note that this can
1782 		 * potentially cause a busy-wait loop from pdflush and kswapd
1783 		 * activity, but those code paths have their own higher-level
1784 		 * throttling.
1785 		 */
1786 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1787 			lock_buffer(bh);
1788 		} else if (!trylock_buffer(bh)) {
1789 			redirty_page_for_writepage(wbc, page);
1790 			continue;
1791 		}
1792 		if (test_clear_buffer_dirty(bh)) {
1793 			mark_buffer_async_write(bh);
1794 		} else {
1795 			unlock_buffer(bh);
1796 		}
1797 	} while ((bh = bh->b_this_page) != head);
1798 
1799 	/*
1800 	 * The page and its buffers are protected by PageWriteback(), so we can
1801 	 * drop the bh refcounts early.
1802 	 */
1803 	BUG_ON(PageWriteback(page));
1804 	set_page_writeback(page);
1805 
1806 	do {
1807 		struct buffer_head *next = bh->b_this_page;
1808 		if (buffer_async_write(bh)) {
1809 			submit_bh(WRITE, bh);
1810 			nr_underway++;
1811 		}
1812 		bh = next;
1813 	} while (bh != head);
1814 	unlock_page(page);
1815 
1816 	err = 0;
1817 done:
1818 	if (nr_underway == 0) {
1819 		/*
1820 		 * The page was marked dirty, but the buffers were
1821 		 * clean.  Someone wrote them back by hand with
1822 		 * ll_rw_block/submit_bh.  A rare case.
1823 		 */
1824 		end_page_writeback(page);
1825 
1826 		/*
1827 		 * The page and buffer_heads can be released at any time from
1828 		 * here on.
1829 		 */
1830 	}
1831 	return err;
1832 
1833 recover:
1834 	/*
1835 	 * ENOSPC, or some other error.  We may already have added some
1836 	 * blocks to the file, so we need to write these out to avoid
1837 	 * exposing stale data.
1838 	 * The page is currently locked and not marked for writeback
1839 	 */
1840 	bh = head;
1841 	/* Recovery: lock and submit the mapped buffers */
1842 	do {
1843 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1844 		    !buffer_delay(bh)) {
1845 			lock_buffer(bh);
1846 			mark_buffer_async_write(bh);
1847 		} else {
1848 			/*
1849 			 * The buffer may have been set dirty during
1850 			 * attachment to a dirty page.
1851 			 */
1852 			clear_buffer_dirty(bh);
1853 		}
1854 	} while ((bh = bh->b_this_page) != head);
1855 	SetPageError(page);
1856 	BUG_ON(PageWriteback(page));
1857 	mapping_set_error(page->mapping, err);
1858 	set_page_writeback(page);
1859 	do {
1860 		struct buffer_head *next = bh->b_this_page;
1861 		if (buffer_async_write(bh)) {
1862 			clear_buffer_dirty(bh);
1863 			submit_bh(WRITE, bh);
1864 			nr_underway++;
1865 		}
1866 		bh = next;
1867 	} while (bh != head);
1868 	unlock_page(page);
1869 	goto done;
1870 }
1871 
1872 /*
1873  * If a page has any new buffers, zero them out here, and mark them uptodate
1874  * and dirty so they'll be written out (in order to prevent uninitialised
1875  * block data from leaking). And clear the new bit.
1876  */
1877 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1878 {
1879 	unsigned int block_start, block_end;
1880 	struct buffer_head *head, *bh;
1881 
1882 	BUG_ON(!PageLocked(page));
1883 	if (!page_has_buffers(page))
1884 		return;
1885 
1886 	bh = head = page_buffers(page);
1887 	block_start = 0;
1888 	do {
1889 		block_end = block_start + bh->b_size;
1890 
1891 		if (buffer_new(bh)) {
1892 			if (block_end > from && block_start < to) {
1893 				if (!PageUptodate(page)) {
1894 					unsigned start, size;
1895 
1896 					start = max(from, block_start);
1897 					size = min(to, block_end) - start;
1898 
1899 					zero_user(page, start, size);
1900 					set_buffer_uptodate(bh);
1901 				}
1902 
1903 				clear_buffer_new(bh);
1904 				mark_buffer_dirty(bh);
1905 			}
1906 		}
1907 
1908 		block_start = block_end;
1909 		bh = bh->b_this_page;
1910 	} while (bh != head);
1911 }
1912 EXPORT_SYMBOL(page_zero_new_buffers);
1913 
1914 static int __block_prepare_write(struct inode *inode, struct page *page,
1915 		unsigned from, unsigned to, get_block_t *get_block)
1916 {
1917 	unsigned block_start, block_end;
1918 	sector_t block;
1919 	int err = 0;
1920 	unsigned blocksize, bbits;
1921 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1922 
1923 	BUG_ON(!PageLocked(page));
1924 	BUG_ON(from > PAGE_CACHE_SIZE);
1925 	BUG_ON(to > PAGE_CACHE_SIZE);
1926 	BUG_ON(from > to);
1927 
1928 	blocksize = 1 << inode->i_blkbits;
1929 	if (!page_has_buffers(page))
1930 		create_empty_buffers(page, blocksize, 0);
1931 	head = page_buffers(page);
1932 
1933 	bbits = inode->i_blkbits;
1934 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1935 
1936 	for(bh = head, block_start = 0; bh != head || !block_start;
1937 	    block++, block_start=block_end, bh = bh->b_this_page) {
1938 		block_end = block_start + blocksize;
1939 		if (block_end <= from || block_start >= to) {
1940 			if (PageUptodate(page)) {
1941 				if (!buffer_uptodate(bh))
1942 					set_buffer_uptodate(bh);
1943 			}
1944 			continue;
1945 		}
1946 		if (buffer_new(bh))
1947 			clear_buffer_new(bh);
1948 		if (!buffer_mapped(bh)) {
1949 			WARN_ON(bh->b_size != blocksize);
1950 			err = get_block(inode, block, bh, 1);
1951 			if (err)
1952 				break;
1953 			if (buffer_new(bh)) {
1954 				unmap_underlying_metadata(bh->b_bdev,
1955 							bh->b_blocknr);
1956 				if (PageUptodate(page)) {
1957 					clear_buffer_new(bh);
1958 					set_buffer_uptodate(bh);
1959 					mark_buffer_dirty(bh);
1960 					continue;
1961 				}
1962 				if (block_end > to || block_start < from)
1963 					zero_user_segments(page,
1964 						to, block_end,
1965 						block_start, from);
1966 				continue;
1967 			}
1968 		}
1969 		if (PageUptodate(page)) {
1970 			if (!buffer_uptodate(bh))
1971 				set_buffer_uptodate(bh);
1972 			continue;
1973 		}
1974 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1975 		    !buffer_unwritten(bh) &&
1976 		     (block_start < from || block_end > to)) {
1977 			ll_rw_block(READ, 1, &bh);
1978 			*wait_bh++=bh;
1979 		}
1980 	}
1981 	/*
1982 	 * If we issued read requests - let them complete.
1983 	 */
1984 	while(wait_bh > wait) {
1985 		wait_on_buffer(*--wait_bh);
1986 		if (!buffer_uptodate(*wait_bh))
1987 			err = -EIO;
1988 	}
1989 	if (unlikely(err))
1990 		page_zero_new_buffers(page, from, to);
1991 	return err;
1992 }
1993 
1994 static int __block_commit_write(struct inode *inode, struct page *page,
1995 		unsigned from, unsigned to)
1996 {
1997 	unsigned block_start, block_end;
1998 	int partial = 0;
1999 	unsigned blocksize;
2000 	struct buffer_head *bh, *head;
2001 
2002 	blocksize = 1 << inode->i_blkbits;
2003 
2004 	for(bh = head = page_buffers(page), block_start = 0;
2005 	    bh != head || !block_start;
2006 	    block_start=block_end, bh = bh->b_this_page) {
2007 		block_end = block_start + blocksize;
2008 		if (block_end <= from || block_start >= to) {
2009 			if (!buffer_uptodate(bh))
2010 				partial = 1;
2011 		} else {
2012 			set_buffer_uptodate(bh);
2013 			mark_buffer_dirty(bh);
2014 		}
2015 		clear_buffer_new(bh);
2016 	}
2017 
2018 	/*
2019 	 * If this is a partial write which happened to make all buffers
2020 	 * uptodate then we can optimize away a bogus readpage() for
2021 	 * the next read(). Here we 'discover' whether the page went
2022 	 * uptodate as a result of this (potentially partial) write.
2023 	 */
2024 	if (!partial)
2025 		SetPageUptodate(page);
2026 	return 0;
2027 }
2028 
2029 /*
2030  * block_write_begin takes care of the basic task of block allocation and
2031  * bringing partial write blocks uptodate first.
2032  *
2033  * If *pagep is not NULL, then block_write_begin uses the locked page
2034  * at *pagep rather than allocating its own. In this case, the page will
2035  * not be unlocked or deallocated on failure.
2036  */
2037 int block_write_begin(struct file *file, struct address_space *mapping,
2038 			loff_t pos, unsigned len, unsigned flags,
2039 			struct page **pagep, void **fsdata,
2040 			get_block_t *get_block)
2041 {
2042 	struct inode *inode = mapping->host;
2043 	int status = 0;
2044 	struct page *page;
2045 	pgoff_t index;
2046 	unsigned start, end;
2047 	int ownpage = 0;
2048 
2049 	index = pos >> PAGE_CACHE_SHIFT;
2050 	start = pos & (PAGE_CACHE_SIZE - 1);
2051 	end = start + len;
2052 
2053 	page = *pagep;
2054 	if (page == NULL) {
2055 		ownpage = 1;
2056 		page = grab_cache_page_write_begin(mapping, index, flags);
2057 		if (!page) {
2058 			status = -ENOMEM;
2059 			goto out;
2060 		}
2061 		*pagep = page;
2062 	} else
2063 		BUG_ON(!PageLocked(page));
2064 
2065 	status = __block_prepare_write(inode, page, start, end, get_block);
2066 	if (unlikely(status)) {
2067 		ClearPageUptodate(page);
2068 
2069 		if (ownpage) {
2070 			unlock_page(page);
2071 			page_cache_release(page);
2072 			*pagep = NULL;
2073 
2074 			/*
2075 			 * prepare_write() may have instantiated a few blocks
2076 			 * outside i_size.  Trim these off again. Don't need
2077 			 * i_size_read because we hold i_mutex.
2078 			 */
2079 			if (pos + len > inode->i_size)
2080 				vmtruncate(inode, inode->i_size);
2081 		}
2082 	}
2083 
2084 out:
2085 	return status;
2086 }
2087 EXPORT_SYMBOL(block_write_begin);
2088 
2089 int block_write_end(struct file *file, struct address_space *mapping,
2090 			loff_t pos, unsigned len, unsigned copied,
2091 			struct page *page, void *fsdata)
2092 {
2093 	struct inode *inode = mapping->host;
2094 	unsigned start;
2095 
2096 	start = pos & (PAGE_CACHE_SIZE - 1);
2097 
2098 	if (unlikely(copied < len)) {
2099 		/*
2100 		 * The buffers that were written will now be uptodate, so we
2101 		 * don't have to worry about a readpage reading them and
2102 		 * overwriting a partial write. However if we have encountered
2103 		 * a short write and only partially written into a buffer, it
2104 		 * will not be marked uptodate, so a readpage might come in and
2105 		 * destroy our partial write.
2106 		 *
2107 		 * Do the simplest thing, and just treat any short write to a
2108 		 * non uptodate page as a zero-length write, and force the
2109 		 * caller to redo the whole thing.
2110 		 */
2111 		if (!PageUptodate(page))
2112 			copied = 0;
2113 
2114 		page_zero_new_buffers(page, start+copied, start+len);
2115 	}
2116 	flush_dcache_page(page);
2117 
2118 	/* This could be a short (even 0-length) commit */
2119 	__block_commit_write(inode, page, start, start+copied);
2120 
2121 	return copied;
2122 }
2123 EXPORT_SYMBOL(block_write_end);
2124 
2125 int generic_write_end(struct file *file, struct address_space *mapping,
2126 			loff_t pos, unsigned len, unsigned copied,
2127 			struct page *page, void *fsdata)
2128 {
2129 	struct inode *inode = mapping->host;
2130 	int i_size_changed = 0;
2131 
2132 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2133 
2134 	/*
2135 	 * No need to use i_size_read() here, the i_size
2136 	 * cannot change under us because we hold i_mutex.
2137 	 *
2138 	 * But it's important to update i_size while still holding page lock:
2139 	 * page writeout could otherwise come in and zero beyond i_size.
2140 	 */
2141 	if (pos+copied > inode->i_size) {
2142 		i_size_write(inode, pos+copied);
2143 		i_size_changed = 1;
2144 	}
2145 
2146 	unlock_page(page);
2147 	page_cache_release(page);
2148 
2149 	/*
2150 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2151 	 * makes the holding time of page lock longer. Second, it forces lock
2152 	 * ordering of page lock and transaction start for journaling
2153 	 * filesystems.
2154 	 */
2155 	if (i_size_changed)
2156 		mark_inode_dirty(inode);
2157 
2158 	return copied;
2159 }
2160 EXPORT_SYMBOL(generic_write_end);
2161 
2162 /*
2163  * block_is_partially_uptodate checks whether buffers within a page are
2164  * uptodate or not.
2165  *
2166  * Returns true if all buffers which correspond to a file portion
2167  * we want to read are uptodate.
2168  */
2169 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2170 					unsigned long from)
2171 {
2172 	struct inode *inode = page->mapping->host;
2173 	unsigned block_start, block_end, blocksize;
2174 	unsigned to;
2175 	struct buffer_head *bh, *head;
2176 	int ret = 1;
2177 
2178 	if (!page_has_buffers(page))
2179 		return 0;
2180 
2181 	blocksize = 1 << inode->i_blkbits;
2182 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2183 	to = from + to;
2184 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2185 		return 0;
2186 
2187 	head = page_buffers(page);
2188 	bh = head;
2189 	block_start = 0;
2190 	do {
2191 		block_end = block_start + blocksize;
2192 		if (block_end > from && block_start < to) {
2193 			if (!buffer_uptodate(bh)) {
2194 				ret = 0;
2195 				break;
2196 			}
2197 			if (block_end >= to)
2198 				break;
2199 		}
2200 		block_start = block_end;
2201 		bh = bh->b_this_page;
2202 	} while (bh != head);
2203 
2204 	return ret;
2205 }
2206 EXPORT_SYMBOL(block_is_partially_uptodate);
2207 
2208 /*
2209  * Generic "read page" function for block devices that have the normal
2210  * get_block functionality. This is most of the block device filesystems.
2211  * Reads the page asynchronously --- the unlock_buffer() and
2212  * set/clear_buffer_uptodate() functions propagate buffer state into the
2213  * page struct once IO has completed.
2214  */
2215 int block_read_full_page(struct page *page, get_block_t *get_block)
2216 {
2217 	struct inode *inode = page->mapping->host;
2218 	sector_t iblock, lblock;
2219 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2220 	unsigned int blocksize;
2221 	int nr, i;
2222 	int fully_mapped = 1;
2223 
2224 	BUG_ON(!PageLocked(page));
2225 	blocksize = 1 << inode->i_blkbits;
2226 	if (!page_has_buffers(page))
2227 		create_empty_buffers(page, blocksize, 0);
2228 	head = page_buffers(page);
2229 
2230 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2231 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2232 	bh = head;
2233 	nr = 0;
2234 	i = 0;
2235 
2236 	do {
2237 		if (buffer_uptodate(bh))
2238 			continue;
2239 
2240 		if (!buffer_mapped(bh)) {
2241 			int err = 0;
2242 
2243 			fully_mapped = 0;
2244 			if (iblock < lblock) {
2245 				WARN_ON(bh->b_size != blocksize);
2246 				err = get_block(inode, iblock, bh, 0);
2247 				if (err)
2248 					SetPageError(page);
2249 			}
2250 			if (!buffer_mapped(bh)) {
2251 				zero_user(page, i * blocksize, blocksize);
2252 				if (!err)
2253 					set_buffer_uptodate(bh);
2254 				continue;
2255 			}
2256 			/*
2257 			 * get_block() might have updated the buffer
2258 			 * synchronously
2259 			 */
2260 			if (buffer_uptodate(bh))
2261 				continue;
2262 		}
2263 		arr[nr++] = bh;
2264 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2265 
2266 	if (fully_mapped)
2267 		SetPageMappedToDisk(page);
2268 
2269 	if (!nr) {
2270 		/*
2271 		 * All buffers are uptodate - we can set the page uptodate
2272 		 * as well. But not if get_block() returned an error.
2273 		 */
2274 		if (!PageError(page))
2275 			SetPageUptodate(page);
2276 		unlock_page(page);
2277 		return 0;
2278 	}
2279 
2280 	/* Stage two: lock the buffers */
2281 	for (i = 0; i < nr; i++) {
2282 		bh = arr[i];
2283 		lock_buffer(bh);
2284 		mark_buffer_async_read(bh);
2285 	}
2286 
2287 	/*
2288 	 * Stage 3: start the IO.  Check for uptodateness
2289 	 * inside the buffer lock in case another process reading
2290 	 * the underlying blockdev brought it uptodate (the sct fix).
2291 	 */
2292 	for (i = 0; i < nr; i++) {
2293 		bh = arr[i];
2294 		if (buffer_uptodate(bh))
2295 			end_buffer_async_read(bh, 1);
2296 		else
2297 			submit_bh(READ, bh);
2298 	}
2299 	return 0;
2300 }
2301 
2302 /* utility function for filesystems that need to do work on expanding
2303  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2304  * deal with the hole.
2305  */
2306 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2307 {
2308 	struct address_space *mapping = inode->i_mapping;
2309 	struct page *page;
2310 	void *fsdata;
2311 	unsigned long limit;
2312 	int err;
2313 
2314 	err = -EFBIG;
2315         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2316 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2317 		send_sig(SIGXFSZ, current, 0);
2318 		goto out;
2319 	}
2320 	if (size > inode->i_sb->s_maxbytes)
2321 		goto out;
2322 
2323 	err = pagecache_write_begin(NULL, mapping, size, 0,
2324 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2325 				&page, &fsdata);
2326 	if (err)
2327 		goto out;
2328 
2329 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2330 	BUG_ON(err > 0);
2331 
2332 out:
2333 	return err;
2334 }
2335 
2336 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2337 			    loff_t pos, loff_t *bytes)
2338 {
2339 	struct inode *inode = mapping->host;
2340 	unsigned blocksize = 1 << inode->i_blkbits;
2341 	struct page *page;
2342 	void *fsdata;
2343 	pgoff_t index, curidx;
2344 	loff_t curpos;
2345 	unsigned zerofrom, offset, len;
2346 	int err = 0;
2347 
2348 	index = pos >> PAGE_CACHE_SHIFT;
2349 	offset = pos & ~PAGE_CACHE_MASK;
2350 
2351 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2352 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2353 		if (zerofrom & (blocksize-1)) {
2354 			*bytes |= (blocksize-1);
2355 			(*bytes)++;
2356 		}
2357 		len = PAGE_CACHE_SIZE - zerofrom;
2358 
2359 		err = pagecache_write_begin(file, mapping, curpos, len,
2360 						AOP_FLAG_UNINTERRUPTIBLE,
2361 						&page, &fsdata);
2362 		if (err)
2363 			goto out;
2364 		zero_user(page, zerofrom, len);
2365 		err = pagecache_write_end(file, mapping, curpos, len, len,
2366 						page, fsdata);
2367 		if (err < 0)
2368 			goto out;
2369 		BUG_ON(err != len);
2370 		err = 0;
2371 
2372 		balance_dirty_pages_ratelimited(mapping);
2373 	}
2374 
2375 	/* page covers the boundary, find the boundary offset */
2376 	if (index == curidx) {
2377 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2378 		/* if we will expand the thing last block will be filled */
2379 		if (offset <= zerofrom) {
2380 			goto out;
2381 		}
2382 		if (zerofrom & (blocksize-1)) {
2383 			*bytes |= (blocksize-1);
2384 			(*bytes)++;
2385 		}
2386 		len = offset - zerofrom;
2387 
2388 		err = pagecache_write_begin(file, mapping, curpos, len,
2389 						AOP_FLAG_UNINTERRUPTIBLE,
2390 						&page, &fsdata);
2391 		if (err)
2392 			goto out;
2393 		zero_user(page, zerofrom, len);
2394 		err = pagecache_write_end(file, mapping, curpos, len, len,
2395 						page, fsdata);
2396 		if (err < 0)
2397 			goto out;
2398 		BUG_ON(err != len);
2399 		err = 0;
2400 	}
2401 out:
2402 	return err;
2403 }
2404 
2405 /*
2406  * For moronic filesystems that do not allow holes in file.
2407  * We may have to extend the file.
2408  */
2409 int cont_write_begin(struct file *file, struct address_space *mapping,
2410 			loff_t pos, unsigned len, unsigned flags,
2411 			struct page **pagep, void **fsdata,
2412 			get_block_t *get_block, loff_t *bytes)
2413 {
2414 	struct inode *inode = mapping->host;
2415 	unsigned blocksize = 1 << inode->i_blkbits;
2416 	unsigned zerofrom;
2417 	int err;
2418 
2419 	err = cont_expand_zero(file, mapping, pos, bytes);
2420 	if (err)
2421 		goto out;
2422 
2423 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2424 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2425 		*bytes |= (blocksize-1);
2426 		(*bytes)++;
2427 	}
2428 
2429 	*pagep = NULL;
2430 	err = block_write_begin(file, mapping, pos, len,
2431 				flags, pagep, fsdata, get_block);
2432 out:
2433 	return err;
2434 }
2435 
2436 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2437 			get_block_t *get_block)
2438 {
2439 	struct inode *inode = page->mapping->host;
2440 	int err = __block_prepare_write(inode, page, from, to, get_block);
2441 	if (err)
2442 		ClearPageUptodate(page);
2443 	return err;
2444 }
2445 
2446 int block_commit_write(struct page *page, unsigned from, unsigned to)
2447 {
2448 	struct inode *inode = page->mapping->host;
2449 	__block_commit_write(inode,page,from,to);
2450 	return 0;
2451 }
2452 
2453 /*
2454  * block_page_mkwrite() is not allowed to change the file size as it gets
2455  * called from a page fault handler when a page is first dirtied. Hence we must
2456  * be careful to check for EOF conditions here. We set the page up correctly
2457  * for a written page which means we get ENOSPC checking when writing into
2458  * holes and correct delalloc and unwritten extent mapping on filesystems that
2459  * support these features.
2460  *
2461  * We are not allowed to take the i_mutex here so we have to play games to
2462  * protect against truncate races as the page could now be beyond EOF.  Because
2463  * vmtruncate() writes the inode size before removing pages, once we have the
2464  * page lock we can determine safely if the page is beyond EOF. If it is not
2465  * beyond EOF, then the page is guaranteed safe against truncation until we
2466  * unlock the page.
2467  */
2468 int
2469 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2470 		   get_block_t get_block)
2471 {
2472 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2473 	unsigned long end;
2474 	loff_t size;
2475 	int ret = -EINVAL;
2476 
2477 	lock_page(page);
2478 	size = i_size_read(inode);
2479 	if ((page->mapping != inode->i_mapping) ||
2480 	    (page_offset(page) > size)) {
2481 		/* page got truncated out from underneath us */
2482 		goto out_unlock;
2483 	}
2484 
2485 	/* page is wholly or partially inside EOF */
2486 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2487 		end = size & ~PAGE_CACHE_MASK;
2488 	else
2489 		end = PAGE_CACHE_SIZE;
2490 
2491 	ret = block_prepare_write(page, 0, end, get_block);
2492 	if (!ret)
2493 		ret = block_commit_write(page, 0, end);
2494 
2495 out_unlock:
2496 	unlock_page(page);
2497 	return ret;
2498 }
2499 
2500 /*
2501  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2502  * immediately, while under the page lock.  So it needs a special end_io
2503  * handler which does not touch the bh after unlocking it.
2504  */
2505 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2506 {
2507 	__end_buffer_read_notouch(bh, uptodate);
2508 }
2509 
2510 /*
2511  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2512  * the page (converting it to circular linked list and taking care of page
2513  * dirty races).
2514  */
2515 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2516 {
2517 	struct buffer_head *bh;
2518 
2519 	BUG_ON(!PageLocked(page));
2520 
2521 	spin_lock(&page->mapping->private_lock);
2522 	bh = head;
2523 	do {
2524 		if (PageDirty(page))
2525 			set_buffer_dirty(bh);
2526 		if (!bh->b_this_page)
2527 			bh->b_this_page = head;
2528 		bh = bh->b_this_page;
2529 	} while (bh != head);
2530 	attach_page_buffers(page, head);
2531 	spin_unlock(&page->mapping->private_lock);
2532 }
2533 
2534 /*
2535  * On entry, the page is fully not uptodate.
2536  * On exit the page is fully uptodate in the areas outside (from,to)
2537  */
2538 int nobh_write_begin(struct file *file, struct address_space *mapping,
2539 			loff_t pos, unsigned len, unsigned flags,
2540 			struct page **pagep, void **fsdata,
2541 			get_block_t *get_block)
2542 {
2543 	struct inode *inode = mapping->host;
2544 	const unsigned blkbits = inode->i_blkbits;
2545 	const unsigned blocksize = 1 << blkbits;
2546 	struct buffer_head *head, *bh;
2547 	struct page *page;
2548 	pgoff_t index;
2549 	unsigned from, to;
2550 	unsigned block_in_page;
2551 	unsigned block_start, block_end;
2552 	sector_t block_in_file;
2553 	int nr_reads = 0;
2554 	int ret = 0;
2555 	int is_mapped_to_disk = 1;
2556 
2557 	index = pos >> PAGE_CACHE_SHIFT;
2558 	from = pos & (PAGE_CACHE_SIZE - 1);
2559 	to = from + len;
2560 
2561 	page = grab_cache_page_write_begin(mapping, index, flags);
2562 	if (!page)
2563 		return -ENOMEM;
2564 	*pagep = page;
2565 	*fsdata = NULL;
2566 
2567 	if (page_has_buffers(page)) {
2568 		unlock_page(page);
2569 		page_cache_release(page);
2570 		*pagep = NULL;
2571 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2572 					fsdata, get_block);
2573 	}
2574 
2575 	if (PageMappedToDisk(page))
2576 		return 0;
2577 
2578 	/*
2579 	 * Allocate buffers so that we can keep track of state, and potentially
2580 	 * attach them to the page if an error occurs. In the common case of
2581 	 * no error, they will just be freed again without ever being attached
2582 	 * to the page (which is all OK, because we're under the page lock).
2583 	 *
2584 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2585 	 * than the circular one we're used to.
2586 	 */
2587 	head = alloc_page_buffers(page, blocksize, 0);
2588 	if (!head) {
2589 		ret = -ENOMEM;
2590 		goto out_release;
2591 	}
2592 
2593 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2594 
2595 	/*
2596 	 * We loop across all blocks in the page, whether or not they are
2597 	 * part of the affected region.  This is so we can discover if the
2598 	 * page is fully mapped-to-disk.
2599 	 */
2600 	for (block_start = 0, block_in_page = 0, bh = head;
2601 		  block_start < PAGE_CACHE_SIZE;
2602 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2603 		int create;
2604 
2605 		block_end = block_start + blocksize;
2606 		bh->b_state = 0;
2607 		create = 1;
2608 		if (block_start >= to)
2609 			create = 0;
2610 		ret = get_block(inode, block_in_file + block_in_page,
2611 					bh, create);
2612 		if (ret)
2613 			goto failed;
2614 		if (!buffer_mapped(bh))
2615 			is_mapped_to_disk = 0;
2616 		if (buffer_new(bh))
2617 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2618 		if (PageUptodate(page)) {
2619 			set_buffer_uptodate(bh);
2620 			continue;
2621 		}
2622 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2623 			zero_user_segments(page, block_start, from,
2624 							to, block_end);
2625 			continue;
2626 		}
2627 		if (buffer_uptodate(bh))
2628 			continue;	/* reiserfs does this */
2629 		if (block_start < from || block_end > to) {
2630 			lock_buffer(bh);
2631 			bh->b_end_io = end_buffer_read_nobh;
2632 			submit_bh(READ, bh);
2633 			nr_reads++;
2634 		}
2635 	}
2636 
2637 	if (nr_reads) {
2638 		/*
2639 		 * The page is locked, so these buffers are protected from
2640 		 * any VM or truncate activity.  Hence we don't need to care
2641 		 * for the buffer_head refcounts.
2642 		 */
2643 		for (bh = head; bh; bh = bh->b_this_page) {
2644 			wait_on_buffer(bh);
2645 			if (!buffer_uptodate(bh))
2646 				ret = -EIO;
2647 		}
2648 		if (ret)
2649 			goto failed;
2650 	}
2651 
2652 	if (is_mapped_to_disk)
2653 		SetPageMappedToDisk(page);
2654 
2655 	*fsdata = head; /* to be released by nobh_write_end */
2656 
2657 	return 0;
2658 
2659 failed:
2660 	BUG_ON(!ret);
2661 	/*
2662 	 * Error recovery is a bit difficult. We need to zero out blocks that
2663 	 * were newly allocated, and dirty them to ensure they get written out.
2664 	 * Buffers need to be attached to the page at this point, otherwise
2665 	 * the handling of potential IO errors during writeout would be hard
2666 	 * (could try doing synchronous writeout, but what if that fails too?)
2667 	 */
2668 	attach_nobh_buffers(page, head);
2669 	page_zero_new_buffers(page, from, to);
2670 
2671 out_release:
2672 	unlock_page(page);
2673 	page_cache_release(page);
2674 	*pagep = NULL;
2675 
2676 	if (pos + len > inode->i_size)
2677 		vmtruncate(inode, inode->i_size);
2678 
2679 	return ret;
2680 }
2681 EXPORT_SYMBOL(nobh_write_begin);
2682 
2683 int nobh_write_end(struct file *file, struct address_space *mapping,
2684 			loff_t pos, unsigned len, unsigned copied,
2685 			struct page *page, void *fsdata)
2686 {
2687 	struct inode *inode = page->mapping->host;
2688 	struct buffer_head *head = fsdata;
2689 	struct buffer_head *bh;
2690 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2691 
2692 	if (unlikely(copied < len) && head)
2693 		attach_nobh_buffers(page, head);
2694 	if (page_has_buffers(page))
2695 		return generic_write_end(file, mapping, pos, len,
2696 					copied, page, fsdata);
2697 
2698 	SetPageUptodate(page);
2699 	set_page_dirty(page);
2700 	if (pos+copied > inode->i_size) {
2701 		i_size_write(inode, pos+copied);
2702 		mark_inode_dirty(inode);
2703 	}
2704 
2705 	unlock_page(page);
2706 	page_cache_release(page);
2707 
2708 	while (head) {
2709 		bh = head;
2710 		head = head->b_this_page;
2711 		free_buffer_head(bh);
2712 	}
2713 
2714 	return copied;
2715 }
2716 EXPORT_SYMBOL(nobh_write_end);
2717 
2718 /*
2719  * nobh_writepage() - based on block_full_write_page() except
2720  * that it tries to operate without attaching bufferheads to
2721  * the page.
2722  */
2723 int nobh_writepage(struct page *page, get_block_t *get_block,
2724 			struct writeback_control *wbc)
2725 {
2726 	struct inode * const inode = page->mapping->host;
2727 	loff_t i_size = i_size_read(inode);
2728 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2729 	unsigned offset;
2730 	int ret;
2731 
2732 	/* Is the page fully inside i_size? */
2733 	if (page->index < end_index)
2734 		goto out;
2735 
2736 	/* Is the page fully outside i_size? (truncate in progress) */
2737 	offset = i_size & (PAGE_CACHE_SIZE-1);
2738 	if (page->index >= end_index+1 || !offset) {
2739 		/*
2740 		 * The page may have dirty, unmapped buffers.  For example,
2741 		 * they may have been added in ext3_writepage().  Make them
2742 		 * freeable here, so the page does not leak.
2743 		 */
2744 #if 0
2745 		/* Not really sure about this  - do we need this ? */
2746 		if (page->mapping->a_ops->invalidatepage)
2747 			page->mapping->a_ops->invalidatepage(page, offset);
2748 #endif
2749 		unlock_page(page);
2750 		return 0; /* don't care */
2751 	}
2752 
2753 	/*
2754 	 * The page straddles i_size.  It must be zeroed out on each and every
2755 	 * writepage invocation because it may be mmapped.  "A file is mapped
2756 	 * in multiples of the page size.  For a file that is not a multiple of
2757 	 * the  page size, the remaining memory is zeroed when mapped, and
2758 	 * writes to that region are not written out to the file."
2759 	 */
2760 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2761 out:
2762 	ret = mpage_writepage(page, get_block, wbc);
2763 	if (ret == -EAGAIN)
2764 		ret = __block_write_full_page(inode, page, get_block, wbc);
2765 	return ret;
2766 }
2767 EXPORT_SYMBOL(nobh_writepage);
2768 
2769 int nobh_truncate_page(struct address_space *mapping,
2770 			loff_t from, get_block_t *get_block)
2771 {
2772 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2773 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2774 	unsigned blocksize;
2775 	sector_t iblock;
2776 	unsigned length, pos;
2777 	struct inode *inode = mapping->host;
2778 	struct page *page;
2779 	struct buffer_head map_bh;
2780 	int err;
2781 
2782 	blocksize = 1 << inode->i_blkbits;
2783 	length = offset & (blocksize - 1);
2784 
2785 	/* Block boundary? Nothing to do */
2786 	if (!length)
2787 		return 0;
2788 
2789 	length = blocksize - length;
2790 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2791 
2792 	page = grab_cache_page(mapping, index);
2793 	err = -ENOMEM;
2794 	if (!page)
2795 		goto out;
2796 
2797 	if (page_has_buffers(page)) {
2798 has_buffers:
2799 		unlock_page(page);
2800 		page_cache_release(page);
2801 		return block_truncate_page(mapping, from, get_block);
2802 	}
2803 
2804 	/* Find the buffer that contains "offset" */
2805 	pos = blocksize;
2806 	while (offset >= pos) {
2807 		iblock++;
2808 		pos += blocksize;
2809 	}
2810 
2811 	err = get_block(inode, iblock, &map_bh, 0);
2812 	if (err)
2813 		goto unlock;
2814 	/* unmapped? It's a hole - nothing to do */
2815 	if (!buffer_mapped(&map_bh))
2816 		goto unlock;
2817 
2818 	/* Ok, it's mapped. Make sure it's up-to-date */
2819 	if (!PageUptodate(page)) {
2820 		err = mapping->a_ops->readpage(NULL, page);
2821 		if (err) {
2822 			page_cache_release(page);
2823 			goto out;
2824 		}
2825 		lock_page(page);
2826 		if (!PageUptodate(page)) {
2827 			err = -EIO;
2828 			goto unlock;
2829 		}
2830 		if (page_has_buffers(page))
2831 			goto has_buffers;
2832 	}
2833 	zero_user(page, offset, length);
2834 	set_page_dirty(page);
2835 	err = 0;
2836 
2837 unlock:
2838 	unlock_page(page);
2839 	page_cache_release(page);
2840 out:
2841 	return err;
2842 }
2843 EXPORT_SYMBOL(nobh_truncate_page);
2844 
2845 int block_truncate_page(struct address_space *mapping,
2846 			loff_t from, get_block_t *get_block)
2847 {
2848 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2849 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2850 	unsigned blocksize;
2851 	sector_t iblock;
2852 	unsigned length, pos;
2853 	struct inode *inode = mapping->host;
2854 	struct page *page;
2855 	struct buffer_head *bh;
2856 	int err;
2857 
2858 	blocksize = 1 << inode->i_blkbits;
2859 	length = offset & (blocksize - 1);
2860 
2861 	/* Block boundary? Nothing to do */
2862 	if (!length)
2863 		return 0;
2864 
2865 	length = blocksize - length;
2866 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2867 
2868 	page = grab_cache_page(mapping, index);
2869 	err = -ENOMEM;
2870 	if (!page)
2871 		goto out;
2872 
2873 	if (!page_has_buffers(page))
2874 		create_empty_buffers(page, blocksize, 0);
2875 
2876 	/* Find the buffer that contains "offset" */
2877 	bh = page_buffers(page);
2878 	pos = blocksize;
2879 	while (offset >= pos) {
2880 		bh = bh->b_this_page;
2881 		iblock++;
2882 		pos += blocksize;
2883 	}
2884 
2885 	err = 0;
2886 	if (!buffer_mapped(bh)) {
2887 		WARN_ON(bh->b_size != blocksize);
2888 		err = get_block(inode, iblock, bh, 0);
2889 		if (err)
2890 			goto unlock;
2891 		/* unmapped? It's a hole - nothing to do */
2892 		if (!buffer_mapped(bh))
2893 			goto unlock;
2894 	}
2895 
2896 	/* Ok, it's mapped. Make sure it's up-to-date */
2897 	if (PageUptodate(page))
2898 		set_buffer_uptodate(bh);
2899 
2900 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2901 		err = -EIO;
2902 		ll_rw_block(READ, 1, &bh);
2903 		wait_on_buffer(bh);
2904 		/* Uhhuh. Read error. Complain and punt. */
2905 		if (!buffer_uptodate(bh))
2906 			goto unlock;
2907 	}
2908 
2909 	zero_user(page, offset, length);
2910 	mark_buffer_dirty(bh);
2911 	err = 0;
2912 
2913 unlock:
2914 	unlock_page(page);
2915 	page_cache_release(page);
2916 out:
2917 	return err;
2918 }
2919 
2920 /*
2921  * The generic ->writepage function for buffer-backed address_spaces
2922  */
2923 int block_write_full_page(struct page *page, get_block_t *get_block,
2924 			struct writeback_control *wbc)
2925 {
2926 	struct inode * const inode = page->mapping->host;
2927 	loff_t i_size = i_size_read(inode);
2928 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2929 	unsigned offset;
2930 
2931 	/* Is the page fully inside i_size? */
2932 	if (page->index < end_index)
2933 		return __block_write_full_page(inode, page, get_block, wbc);
2934 
2935 	/* Is the page fully outside i_size? (truncate in progress) */
2936 	offset = i_size & (PAGE_CACHE_SIZE-1);
2937 	if (page->index >= end_index+1 || !offset) {
2938 		/*
2939 		 * The page may have dirty, unmapped buffers.  For example,
2940 		 * they may have been added in ext3_writepage().  Make them
2941 		 * freeable here, so the page does not leak.
2942 		 */
2943 		do_invalidatepage(page, 0);
2944 		unlock_page(page);
2945 		return 0; /* don't care */
2946 	}
2947 
2948 	/*
2949 	 * The page straddles i_size.  It must be zeroed out on each and every
2950 	 * writepage invokation because it may be mmapped.  "A file is mapped
2951 	 * in multiples of the page size.  For a file that is not a multiple of
2952 	 * the  page size, the remaining memory is zeroed when mapped, and
2953 	 * writes to that region are not written out to the file."
2954 	 */
2955 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2956 	return __block_write_full_page(inode, page, get_block, wbc);
2957 }
2958 
2959 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2960 			    get_block_t *get_block)
2961 {
2962 	struct buffer_head tmp;
2963 	struct inode *inode = mapping->host;
2964 	tmp.b_state = 0;
2965 	tmp.b_blocknr = 0;
2966 	tmp.b_size = 1 << inode->i_blkbits;
2967 	get_block(inode, block, &tmp, 0);
2968 	return tmp.b_blocknr;
2969 }
2970 
2971 static void end_bio_bh_io_sync(struct bio *bio, int err)
2972 {
2973 	struct buffer_head *bh = bio->bi_private;
2974 
2975 	if (err == -EOPNOTSUPP) {
2976 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2977 		set_bit(BH_Eopnotsupp, &bh->b_state);
2978 	}
2979 
2980 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2981 		set_bit(BH_Quiet, &bh->b_state);
2982 
2983 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2984 	bio_put(bio);
2985 }
2986 
2987 int submit_bh(int rw, struct buffer_head * bh)
2988 {
2989 	struct bio *bio;
2990 	int ret = 0;
2991 
2992 	BUG_ON(!buffer_locked(bh));
2993 	BUG_ON(!buffer_mapped(bh));
2994 	BUG_ON(!bh->b_end_io);
2995 
2996 	/*
2997 	 * Mask in barrier bit for a write (could be either a WRITE or a
2998 	 * WRITE_SYNC
2999 	 */
3000 	if (buffer_ordered(bh) && (rw & WRITE))
3001 		rw |= WRITE_BARRIER;
3002 
3003 	/*
3004 	 * Only clear out a write error when rewriting
3005 	 */
3006 	if (test_set_buffer_req(bh) && (rw & WRITE))
3007 		clear_buffer_write_io_error(bh);
3008 
3009 	/*
3010 	 * from here on down, it's all bio -- do the initial mapping,
3011 	 * submit_bio -> generic_make_request may further map this bio around
3012 	 */
3013 	bio = bio_alloc(GFP_NOIO, 1);
3014 
3015 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3016 	bio->bi_bdev = bh->b_bdev;
3017 	bio->bi_io_vec[0].bv_page = bh->b_page;
3018 	bio->bi_io_vec[0].bv_len = bh->b_size;
3019 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3020 
3021 	bio->bi_vcnt = 1;
3022 	bio->bi_idx = 0;
3023 	bio->bi_size = bh->b_size;
3024 
3025 	bio->bi_end_io = end_bio_bh_io_sync;
3026 	bio->bi_private = bh;
3027 
3028 	bio_get(bio);
3029 	submit_bio(rw, bio);
3030 
3031 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
3032 		ret = -EOPNOTSUPP;
3033 
3034 	bio_put(bio);
3035 	return ret;
3036 }
3037 
3038 /**
3039  * ll_rw_block: low-level access to block devices (DEPRECATED)
3040  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3041  * @nr: number of &struct buffer_heads in the array
3042  * @bhs: array of pointers to &struct buffer_head
3043  *
3044  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3045  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3046  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3047  * are sent to disk. The fourth %READA option is described in the documentation
3048  * for generic_make_request() which ll_rw_block() calls.
3049  *
3050  * This function drops any buffer that it cannot get a lock on (with the
3051  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3052  * clean when doing a write request, and any buffer that appears to be
3053  * up-to-date when doing read request.  Further it marks as clean buffers that
3054  * are processed for writing (the buffer cache won't assume that they are
3055  * actually clean until the buffer gets unlocked).
3056  *
3057  * ll_rw_block sets b_end_io to simple completion handler that marks
3058  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3059  * any waiters.
3060  *
3061  * All of the buffers must be for the same device, and must also be a
3062  * multiple of the current approved size for the device.
3063  */
3064 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3065 {
3066 	int i;
3067 
3068 	for (i = 0; i < nr; i++) {
3069 		struct buffer_head *bh = bhs[i];
3070 
3071 		if (rw == SWRITE || rw == SWRITE_SYNC)
3072 			lock_buffer(bh);
3073 		else if (!trylock_buffer(bh))
3074 			continue;
3075 
3076 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
3077 			if (test_clear_buffer_dirty(bh)) {
3078 				bh->b_end_io = end_buffer_write_sync;
3079 				get_bh(bh);
3080 				if (rw == SWRITE_SYNC)
3081 					submit_bh(WRITE_SYNC, bh);
3082 				else
3083 					submit_bh(WRITE, bh);
3084 				continue;
3085 			}
3086 		} else {
3087 			if (!buffer_uptodate(bh)) {
3088 				bh->b_end_io = end_buffer_read_sync;
3089 				get_bh(bh);
3090 				submit_bh(rw, bh);
3091 				continue;
3092 			}
3093 		}
3094 		unlock_buffer(bh);
3095 	}
3096 }
3097 
3098 /*
3099  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3100  * and then start new I/O and then wait upon it.  The caller must have a ref on
3101  * the buffer_head.
3102  */
3103 int sync_dirty_buffer(struct buffer_head *bh)
3104 {
3105 	int ret = 0;
3106 
3107 	WARN_ON(atomic_read(&bh->b_count) < 1);
3108 	lock_buffer(bh);
3109 	if (test_clear_buffer_dirty(bh)) {
3110 		get_bh(bh);
3111 		bh->b_end_io = end_buffer_write_sync;
3112 		ret = submit_bh(WRITE, bh);
3113 		wait_on_buffer(bh);
3114 		if (buffer_eopnotsupp(bh)) {
3115 			clear_buffer_eopnotsupp(bh);
3116 			ret = -EOPNOTSUPP;
3117 		}
3118 		if (!ret && !buffer_uptodate(bh))
3119 			ret = -EIO;
3120 	} else {
3121 		unlock_buffer(bh);
3122 	}
3123 	return ret;
3124 }
3125 
3126 /*
3127  * try_to_free_buffers() checks if all the buffers on this particular page
3128  * are unused, and releases them if so.
3129  *
3130  * Exclusion against try_to_free_buffers may be obtained by either
3131  * locking the page or by holding its mapping's private_lock.
3132  *
3133  * If the page is dirty but all the buffers are clean then we need to
3134  * be sure to mark the page clean as well.  This is because the page
3135  * may be against a block device, and a later reattachment of buffers
3136  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3137  * filesystem data on the same device.
3138  *
3139  * The same applies to regular filesystem pages: if all the buffers are
3140  * clean then we set the page clean and proceed.  To do that, we require
3141  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3142  * private_lock.
3143  *
3144  * try_to_free_buffers() is non-blocking.
3145  */
3146 static inline int buffer_busy(struct buffer_head *bh)
3147 {
3148 	return atomic_read(&bh->b_count) |
3149 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3150 }
3151 
3152 static int
3153 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3154 {
3155 	struct buffer_head *head = page_buffers(page);
3156 	struct buffer_head *bh;
3157 
3158 	bh = head;
3159 	do {
3160 		if (buffer_write_io_error(bh) && page->mapping)
3161 			set_bit(AS_EIO, &page->mapping->flags);
3162 		if (buffer_busy(bh))
3163 			goto failed;
3164 		bh = bh->b_this_page;
3165 	} while (bh != head);
3166 
3167 	do {
3168 		struct buffer_head *next = bh->b_this_page;
3169 
3170 		if (bh->b_assoc_map)
3171 			__remove_assoc_queue(bh);
3172 		bh = next;
3173 	} while (bh != head);
3174 	*buffers_to_free = head;
3175 	__clear_page_buffers(page);
3176 	return 1;
3177 failed:
3178 	return 0;
3179 }
3180 
3181 int try_to_free_buffers(struct page *page)
3182 {
3183 	struct address_space * const mapping = page->mapping;
3184 	struct buffer_head *buffers_to_free = NULL;
3185 	int ret = 0;
3186 
3187 	BUG_ON(!PageLocked(page));
3188 	if (PageWriteback(page))
3189 		return 0;
3190 
3191 	if (mapping == NULL) {		/* can this still happen? */
3192 		ret = drop_buffers(page, &buffers_to_free);
3193 		goto out;
3194 	}
3195 
3196 	spin_lock(&mapping->private_lock);
3197 	ret = drop_buffers(page, &buffers_to_free);
3198 
3199 	/*
3200 	 * If the filesystem writes its buffers by hand (eg ext3)
3201 	 * then we can have clean buffers against a dirty page.  We
3202 	 * clean the page here; otherwise the VM will never notice
3203 	 * that the filesystem did any IO at all.
3204 	 *
3205 	 * Also, during truncate, discard_buffer will have marked all
3206 	 * the page's buffers clean.  We discover that here and clean
3207 	 * the page also.
3208 	 *
3209 	 * private_lock must be held over this entire operation in order
3210 	 * to synchronise against __set_page_dirty_buffers and prevent the
3211 	 * dirty bit from being lost.
3212 	 */
3213 	if (ret)
3214 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3215 	spin_unlock(&mapping->private_lock);
3216 out:
3217 	if (buffers_to_free) {
3218 		struct buffer_head *bh = buffers_to_free;
3219 
3220 		do {
3221 			struct buffer_head *next = bh->b_this_page;
3222 			free_buffer_head(bh);
3223 			bh = next;
3224 		} while (bh != buffers_to_free);
3225 	}
3226 	return ret;
3227 }
3228 EXPORT_SYMBOL(try_to_free_buffers);
3229 
3230 void block_sync_page(struct page *page)
3231 {
3232 	struct address_space *mapping;
3233 
3234 	smp_mb();
3235 	mapping = page_mapping(page);
3236 	if (mapping)
3237 		blk_run_backing_dev(mapping->backing_dev_info, page);
3238 }
3239 
3240 /*
3241  * There are no bdflush tunables left.  But distributions are
3242  * still running obsolete flush daemons, so we terminate them here.
3243  *
3244  * Use of bdflush() is deprecated and will be removed in a future kernel.
3245  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3246  */
3247 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3248 {
3249 	static int msg_count;
3250 
3251 	if (!capable(CAP_SYS_ADMIN))
3252 		return -EPERM;
3253 
3254 	if (msg_count < 5) {
3255 		msg_count++;
3256 		printk(KERN_INFO
3257 			"warning: process `%s' used the obsolete bdflush"
3258 			" system call\n", current->comm);
3259 		printk(KERN_INFO "Fix your initscripts?\n");
3260 	}
3261 
3262 	if (func == 1)
3263 		do_exit(0);
3264 	return 0;
3265 }
3266 
3267 /*
3268  * Buffer-head allocation
3269  */
3270 static struct kmem_cache *bh_cachep;
3271 
3272 /*
3273  * Once the number of bh's in the machine exceeds this level, we start
3274  * stripping them in writeback.
3275  */
3276 static int max_buffer_heads;
3277 
3278 int buffer_heads_over_limit;
3279 
3280 struct bh_accounting {
3281 	int nr;			/* Number of live bh's */
3282 	int ratelimit;		/* Limit cacheline bouncing */
3283 };
3284 
3285 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3286 
3287 static void recalc_bh_state(void)
3288 {
3289 	int i;
3290 	int tot = 0;
3291 
3292 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3293 		return;
3294 	__get_cpu_var(bh_accounting).ratelimit = 0;
3295 	for_each_online_cpu(i)
3296 		tot += per_cpu(bh_accounting, i).nr;
3297 	buffer_heads_over_limit = (tot > max_buffer_heads);
3298 }
3299 
3300 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3301 {
3302 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3303 	if (ret) {
3304 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3305 		get_cpu_var(bh_accounting).nr++;
3306 		recalc_bh_state();
3307 		put_cpu_var(bh_accounting);
3308 	}
3309 	return ret;
3310 }
3311 EXPORT_SYMBOL(alloc_buffer_head);
3312 
3313 void free_buffer_head(struct buffer_head *bh)
3314 {
3315 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3316 	kmem_cache_free(bh_cachep, bh);
3317 	get_cpu_var(bh_accounting).nr--;
3318 	recalc_bh_state();
3319 	put_cpu_var(bh_accounting);
3320 }
3321 EXPORT_SYMBOL(free_buffer_head);
3322 
3323 static void buffer_exit_cpu(int cpu)
3324 {
3325 	int i;
3326 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3327 
3328 	for (i = 0; i < BH_LRU_SIZE; i++) {
3329 		brelse(b->bhs[i]);
3330 		b->bhs[i] = NULL;
3331 	}
3332 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3333 	per_cpu(bh_accounting, cpu).nr = 0;
3334 	put_cpu_var(bh_accounting);
3335 }
3336 
3337 static int buffer_cpu_notify(struct notifier_block *self,
3338 			      unsigned long action, void *hcpu)
3339 {
3340 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3341 		buffer_exit_cpu((unsigned long)hcpu);
3342 	return NOTIFY_OK;
3343 }
3344 
3345 /**
3346  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3347  * @bh: struct buffer_head
3348  *
3349  * Return true if the buffer is up-to-date and false,
3350  * with the buffer locked, if not.
3351  */
3352 int bh_uptodate_or_lock(struct buffer_head *bh)
3353 {
3354 	if (!buffer_uptodate(bh)) {
3355 		lock_buffer(bh);
3356 		if (!buffer_uptodate(bh))
3357 			return 0;
3358 		unlock_buffer(bh);
3359 	}
3360 	return 1;
3361 }
3362 EXPORT_SYMBOL(bh_uptodate_or_lock);
3363 
3364 /**
3365  * bh_submit_read - Submit a locked buffer for reading
3366  * @bh: struct buffer_head
3367  *
3368  * Returns zero on success and -EIO on error.
3369  */
3370 int bh_submit_read(struct buffer_head *bh)
3371 {
3372 	BUG_ON(!buffer_locked(bh));
3373 
3374 	if (buffer_uptodate(bh)) {
3375 		unlock_buffer(bh);
3376 		return 0;
3377 	}
3378 
3379 	get_bh(bh);
3380 	bh->b_end_io = end_buffer_read_sync;
3381 	submit_bh(READ, bh);
3382 	wait_on_buffer(bh);
3383 	if (buffer_uptodate(bh))
3384 		return 0;
3385 	return -EIO;
3386 }
3387 EXPORT_SYMBOL(bh_submit_read);
3388 
3389 static void
3390 init_buffer_head(void *data)
3391 {
3392 	struct buffer_head *bh = data;
3393 
3394 	memset(bh, 0, sizeof(*bh));
3395 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3396 }
3397 
3398 void __init buffer_init(void)
3399 {
3400 	int nrpages;
3401 
3402 	bh_cachep = kmem_cache_create("buffer_head",
3403 			sizeof(struct buffer_head), 0,
3404 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3405 				SLAB_MEM_SPREAD),
3406 				init_buffer_head);
3407 
3408 	/*
3409 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3410 	 */
3411 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3412 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3413 	hotcpu_notifier(buffer_cpu_notify, 0);
3414 }
3415 
3416 EXPORT_SYMBOL(__bforget);
3417 EXPORT_SYMBOL(__brelse);
3418 EXPORT_SYMBOL(__wait_on_buffer);
3419 EXPORT_SYMBOL(block_commit_write);
3420 EXPORT_SYMBOL(block_prepare_write);
3421 EXPORT_SYMBOL(block_page_mkwrite);
3422 EXPORT_SYMBOL(block_read_full_page);
3423 EXPORT_SYMBOL(block_sync_page);
3424 EXPORT_SYMBOL(block_truncate_page);
3425 EXPORT_SYMBOL(block_write_full_page);
3426 EXPORT_SYMBOL(cont_write_begin);
3427 EXPORT_SYMBOL(end_buffer_read_sync);
3428 EXPORT_SYMBOL(end_buffer_write_sync);
3429 EXPORT_SYMBOL(file_fsync);
3430 EXPORT_SYMBOL(fsync_bdev);
3431 EXPORT_SYMBOL(generic_block_bmap);
3432 EXPORT_SYMBOL(generic_cont_expand_simple);
3433 EXPORT_SYMBOL(init_buffer);
3434 EXPORT_SYMBOL(invalidate_bdev);
3435 EXPORT_SYMBOL(ll_rw_block);
3436 EXPORT_SYMBOL(mark_buffer_dirty);
3437 EXPORT_SYMBOL(submit_bh);
3438 EXPORT_SYMBOL(sync_dirty_buffer);
3439 EXPORT_SYMBOL(unlock_buffer);
3440