xref: /openbmc/linux/fs/buffer.c (revision 18c993629a5a5938a032f04a698d15122550593d)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	smp_mb__before_clear_bit();
80 	clear_buffer_locked(bh);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98 	ClearPagePrivate(page);
99 	set_page_private(page, 0);
100 	page_cache_release(page);
101 }
102 
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105 	char b[BDEVNAME_SIZE];
106 
107 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 			bdevname(bh->b_bdev, b),
109 			(unsigned long long)bh->b_blocknr);
110 }
111 
112 /*
113  * End-of-IO handler helper function which does not touch the bh after
114  * unlocking it.
115  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116  * a race there is benign: unlock_buffer() only use the bh's address for
117  * hashing after unlocking the buffer, so it doesn't actually touch the bh
118  * itself.
119  */
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
121 {
122 	if (uptodate) {
123 		set_buffer_uptodate(bh);
124 	} else {
125 		/* This happens, due to failed READA attempts. */
126 		clear_buffer_uptodate(bh);
127 	}
128 	unlock_buffer(bh);
129 }
130 
131 /*
132  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
133  * unlock the buffer. This is what ll_rw_block uses too.
134  */
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136 {
137 	__end_buffer_read_notouch(bh, uptodate);
138 	put_bh(bh);
139 }
140 
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142 {
143 	char b[BDEVNAME_SIZE];
144 
145 	if (uptodate) {
146 		set_buffer_uptodate(bh);
147 	} else {
148 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 			buffer_io_error(bh);
150 			printk(KERN_WARNING "lost page write due to "
151 					"I/O error on %s\n",
152 				       bdevname(bh->b_bdev, b));
153 		}
154 		set_buffer_write_io_error(bh);
155 		clear_buffer_uptodate(bh);
156 	}
157 	unlock_buffer(bh);
158 	put_bh(bh);
159 }
160 
161 /*
162  * Write out and wait upon all the dirty data associated with a block
163  * device via its mapping.  Does not take the superblock lock.
164  */
165 int sync_blockdev(struct block_device *bdev)
166 {
167 	int ret = 0;
168 
169 	if (bdev)
170 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171 	return ret;
172 }
173 EXPORT_SYMBOL(sync_blockdev);
174 
175 /*
176  * Write out and wait upon all dirty data associated with this
177  * device.   Filesystem data as well as the underlying block
178  * device.  Takes the superblock lock.
179  */
180 int fsync_bdev(struct block_device *bdev)
181 {
182 	struct super_block *sb = get_super(bdev);
183 	if (sb) {
184 		int res = fsync_super(sb);
185 		drop_super(sb);
186 		return res;
187 	}
188 	return sync_blockdev(bdev);
189 }
190 
191 /**
192  * freeze_bdev  --  lock a filesystem and force it into a consistent state
193  * @bdev:	blockdevice to lock
194  *
195  * This takes the block device bd_mount_sem to make sure no new mounts
196  * happen on bdev until thaw_bdev() is called.
197  * If a superblock is found on this device, we take the s_umount semaphore
198  * on it to make sure nobody unmounts until the snapshot creation is done.
199  */
200 struct super_block *freeze_bdev(struct block_device *bdev)
201 {
202 	struct super_block *sb;
203 
204 	down(&bdev->bd_mount_sem);
205 	sb = get_super(bdev);
206 	if (sb && !(sb->s_flags & MS_RDONLY)) {
207 		sb->s_frozen = SB_FREEZE_WRITE;
208 		smp_wmb();
209 
210 		__fsync_super(sb);
211 
212 		sb->s_frozen = SB_FREEZE_TRANS;
213 		smp_wmb();
214 
215 		sync_blockdev(sb->s_bdev);
216 
217 		if (sb->s_op->write_super_lockfs)
218 			sb->s_op->write_super_lockfs(sb);
219 	}
220 
221 	sync_blockdev(bdev);
222 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
223 }
224 EXPORT_SYMBOL(freeze_bdev);
225 
226 /**
227  * thaw_bdev  -- unlock filesystem
228  * @bdev:	blockdevice to unlock
229  * @sb:		associated superblock
230  *
231  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232  */
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234 {
235 	if (sb) {
236 		BUG_ON(sb->s_bdev != bdev);
237 
238 		if (sb->s_op->unlockfs)
239 			sb->s_op->unlockfs(sb);
240 		sb->s_frozen = SB_UNFROZEN;
241 		smp_wmb();
242 		wake_up(&sb->s_wait_unfrozen);
243 		drop_super(sb);
244 	}
245 
246 	up(&bdev->bd_mount_sem);
247 }
248 EXPORT_SYMBOL(thaw_bdev);
249 
250 /*
251  * Various filesystems appear to want __find_get_block to be non-blocking.
252  * But it's the page lock which protects the buffers.  To get around this,
253  * we get exclusion from try_to_free_buffers with the blockdev mapping's
254  * private_lock.
255  *
256  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257  * may be quite high.  This code could TryLock the page, and if that
258  * succeeds, there is no need to take private_lock. (But if
259  * private_lock is contended then so is mapping->tree_lock).
260  */
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
263 {
264 	struct inode *bd_inode = bdev->bd_inode;
265 	struct address_space *bd_mapping = bd_inode->i_mapping;
266 	struct buffer_head *ret = NULL;
267 	pgoff_t index;
268 	struct buffer_head *bh;
269 	struct buffer_head *head;
270 	struct page *page;
271 	int all_mapped = 1;
272 
273 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 	page = find_get_page(bd_mapping, index);
275 	if (!page)
276 		goto out;
277 
278 	spin_lock(&bd_mapping->private_lock);
279 	if (!page_has_buffers(page))
280 		goto out_unlock;
281 	head = page_buffers(page);
282 	bh = head;
283 	do {
284 		if (bh->b_blocknr == block) {
285 			ret = bh;
286 			get_bh(bh);
287 			goto out_unlock;
288 		}
289 		if (!buffer_mapped(bh))
290 			all_mapped = 0;
291 		bh = bh->b_this_page;
292 	} while (bh != head);
293 
294 	/* we might be here because some of the buffers on this page are
295 	 * not mapped.  This is due to various races between
296 	 * file io on the block device and getblk.  It gets dealt with
297 	 * elsewhere, don't buffer_error if we had some unmapped buffers
298 	 */
299 	if (all_mapped) {
300 		printk("__find_get_block_slow() failed. "
301 			"block=%llu, b_blocknr=%llu\n",
302 			(unsigned long long)block,
303 			(unsigned long long)bh->b_blocknr);
304 		printk("b_state=0x%08lx, b_size=%zu\n",
305 			bh->b_state, bh->b_size);
306 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 	}
308 out_unlock:
309 	spin_unlock(&bd_mapping->private_lock);
310 	page_cache_release(page);
311 out:
312 	return ret;
313 }
314 
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316    of fs corruption is going on. Trashing dirty data always imply losing
317    information that was supposed to be just stored on the physical layer
318    by the user.
319 
320    Thus invalidate_buffers in general usage is not allwowed to trash
321    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322    be preserved.  These buffers are simply skipped.
323 
324    We also skip buffers which are still in use.  For example this can
325    happen if a userspace program is reading the block device.
326 
327    NOTE: In the case where the user removed a removable-media-disk even if
328    there's still dirty data not synced on disk (due a bug in the device driver
329    or due an error of the user), by not destroying the dirty buffers we could
330    generate corruption also on the next media inserted, thus a parameter is
331    necessary to handle this case in the most safe way possible (trying
332    to not corrupt also the new disk inserted with the data belonging to
333    the old now corrupted disk). Also for the ramdisk the natural thing
334    to do in order to release the ramdisk memory is to destroy dirty buffers.
335 
336    These are two special cases. Normal usage imply the device driver
337    to issue a sync on the device (without waiting I/O completion) and
338    then an invalidate_buffers call that doesn't trash dirty buffers.
339 
340    For handling cache coherency with the blkdev pagecache the 'update' case
341    is been introduced. It is needed to re-read from disk any pinned
342    buffer. NOTE: re-reading from disk is destructive so we can do it only
343    when we assume nobody is changing the buffercache under our I/O and when
344    we think the disk contains more recent information than the buffercache.
345    The update == 1 pass marks the buffers we need to update, the update == 2
346    pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
348 {
349 	struct address_space *mapping = bdev->bd_inode->i_mapping;
350 
351 	if (mapping->nrpages == 0)
352 		return;
353 
354 	invalidate_bh_lrus();
355 	invalidate_mapping_pages(mapping, 0, -1);
356 }
357 
358 /*
359  * Kick pdflush then try to free up some ZONE_NORMAL memory.
360  */
361 static void free_more_memory(void)
362 {
363 	struct zone *zone;
364 	int nid;
365 
366 	wakeup_pdflush(1024);
367 	yield();
368 
369 	for_each_online_node(nid) {
370 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
371 						gfp_zone(GFP_NOFS), NULL,
372 						&zone);
373 		if (zone)
374 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
375 						GFP_NOFS);
376 	}
377 }
378 
379 /*
380  * I/O completion handler for block_read_full_page() - pages
381  * which come unlocked at the end of I/O.
382  */
383 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
384 {
385 	unsigned long flags;
386 	struct buffer_head *first;
387 	struct buffer_head *tmp;
388 	struct page *page;
389 	int page_uptodate = 1;
390 
391 	BUG_ON(!buffer_async_read(bh));
392 
393 	page = bh->b_page;
394 	if (uptodate) {
395 		set_buffer_uptodate(bh);
396 	} else {
397 		clear_buffer_uptodate(bh);
398 		if (printk_ratelimit())
399 			buffer_io_error(bh);
400 		SetPageError(page);
401 	}
402 
403 	/*
404 	 * Be _very_ careful from here on. Bad things can happen if
405 	 * two buffer heads end IO at almost the same time and both
406 	 * decide that the page is now completely done.
407 	 */
408 	first = page_buffers(page);
409 	local_irq_save(flags);
410 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
411 	clear_buffer_async_read(bh);
412 	unlock_buffer(bh);
413 	tmp = bh;
414 	do {
415 		if (!buffer_uptodate(tmp))
416 			page_uptodate = 0;
417 		if (buffer_async_read(tmp)) {
418 			BUG_ON(!buffer_locked(tmp));
419 			goto still_busy;
420 		}
421 		tmp = tmp->b_this_page;
422 	} while (tmp != bh);
423 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
424 	local_irq_restore(flags);
425 
426 	/*
427 	 * If none of the buffers had errors and they are all
428 	 * uptodate then we can set the page uptodate.
429 	 */
430 	if (page_uptodate && !PageError(page))
431 		SetPageUptodate(page);
432 	unlock_page(page);
433 	return;
434 
435 still_busy:
436 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
437 	local_irq_restore(flags);
438 	return;
439 }
440 
441 /*
442  * Completion handler for block_write_full_page() - pages which are unlocked
443  * during I/O, and which have PageWriteback cleared upon I/O completion.
444  */
445 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
446 {
447 	char b[BDEVNAME_SIZE];
448 	unsigned long flags;
449 	struct buffer_head *first;
450 	struct buffer_head *tmp;
451 	struct page *page;
452 
453 	BUG_ON(!buffer_async_write(bh));
454 
455 	page = bh->b_page;
456 	if (uptodate) {
457 		set_buffer_uptodate(bh);
458 	} else {
459 		if (printk_ratelimit()) {
460 			buffer_io_error(bh);
461 			printk(KERN_WARNING "lost page write due to "
462 					"I/O error on %s\n",
463 			       bdevname(bh->b_bdev, b));
464 		}
465 		set_bit(AS_EIO, &page->mapping->flags);
466 		set_buffer_write_io_error(bh);
467 		clear_buffer_uptodate(bh);
468 		SetPageError(page);
469 	}
470 
471 	first = page_buffers(page);
472 	local_irq_save(flags);
473 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
474 
475 	clear_buffer_async_write(bh);
476 	unlock_buffer(bh);
477 	tmp = bh->b_this_page;
478 	while (tmp != bh) {
479 		if (buffer_async_write(tmp)) {
480 			BUG_ON(!buffer_locked(tmp));
481 			goto still_busy;
482 		}
483 		tmp = tmp->b_this_page;
484 	}
485 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
486 	local_irq_restore(flags);
487 	end_page_writeback(page);
488 	return;
489 
490 still_busy:
491 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
492 	local_irq_restore(flags);
493 	return;
494 }
495 
496 /*
497  * If a page's buffers are under async readin (end_buffer_async_read
498  * completion) then there is a possibility that another thread of
499  * control could lock one of the buffers after it has completed
500  * but while some of the other buffers have not completed.  This
501  * locked buffer would confuse end_buffer_async_read() into not unlocking
502  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
503  * that this buffer is not under async I/O.
504  *
505  * The page comes unlocked when it has no locked buffer_async buffers
506  * left.
507  *
508  * PageLocked prevents anyone starting new async I/O reads any of
509  * the buffers.
510  *
511  * PageWriteback is used to prevent simultaneous writeout of the same
512  * page.
513  *
514  * PageLocked prevents anyone from starting writeback of a page which is
515  * under read I/O (PageWriteback is only ever set against a locked page).
516  */
517 static void mark_buffer_async_read(struct buffer_head *bh)
518 {
519 	bh->b_end_io = end_buffer_async_read;
520 	set_buffer_async_read(bh);
521 }
522 
523 void mark_buffer_async_write(struct buffer_head *bh)
524 {
525 	bh->b_end_io = end_buffer_async_write;
526 	set_buffer_async_write(bh);
527 }
528 EXPORT_SYMBOL(mark_buffer_async_write);
529 
530 
531 /*
532  * fs/buffer.c contains helper functions for buffer-backed address space's
533  * fsync functions.  A common requirement for buffer-based filesystems is
534  * that certain data from the backing blockdev needs to be written out for
535  * a successful fsync().  For example, ext2 indirect blocks need to be
536  * written back and waited upon before fsync() returns.
537  *
538  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
539  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
540  * management of a list of dependent buffers at ->i_mapping->private_list.
541  *
542  * Locking is a little subtle: try_to_free_buffers() will remove buffers
543  * from their controlling inode's queue when they are being freed.  But
544  * try_to_free_buffers() will be operating against the *blockdev* mapping
545  * at the time, not against the S_ISREG file which depends on those buffers.
546  * So the locking for private_list is via the private_lock in the address_space
547  * which backs the buffers.  Which is different from the address_space
548  * against which the buffers are listed.  So for a particular address_space,
549  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
550  * mapping->private_list will always be protected by the backing blockdev's
551  * ->private_lock.
552  *
553  * Which introduces a requirement: all buffers on an address_space's
554  * ->private_list must be from the same address_space: the blockdev's.
555  *
556  * address_spaces which do not place buffers at ->private_list via these
557  * utility functions are free to use private_lock and private_list for
558  * whatever they want.  The only requirement is that list_empty(private_list)
559  * be true at clear_inode() time.
560  *
561  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
562  * filesystems should do that.  invalidate_inode_buffers() should just go
563  * BUG_ON(!list_empty).
564  *
565  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
566  * take an address_space, not an inode.  And it should be called
567  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
568  * queued up.
569  *
570  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
571  * list if it is already on a list.  Because if the buffer is on a list,
572  * it *must* already be on the right one.  If not, the filesystem is being
573  * silly.  This will save a ton of locking.  But first we have to ensure
574  * that buffers are taken *off* the old inode's list when they are freed
575  * (presumably in truncate).  That requires careful auditing of all
576  * filesystems (do it inside bforget()).  It could also be done by bringing
577  * b_inode back.
578  */
579 
580 /*
581  * The buffer's backing address_space's private_lock must be held
582  */
583 static inline void __remove_assoc_queue(struct buffer_head *bh)
584 {
585 	list_del_init(&bh->b_assoc_buffers);
586 	WARN_ON(!bh->b_assoc_map);
587 	if (buffer_write_io_error(bh))
588 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
589 	bh->b_assoc_map = NULL;
590 }
591 
592 int inode_has_buffers(struct inode *inode)
593 {
594 	return !list_empty(&inode->i_data.private_list);
595 }
596 
597 /*
598  * osync is designed to support O_SYNC io.  It waits synchronously for
599  * all already-submitted IO to complete, but does not queue any new
600  * writes to the disk.
601  *
602  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
603  * you dirty the buffers, and then use osync_inode_buffers to wait for
604  * completion.  Any other dirty buffers which are not yet queued for
605  * write will not be flushed to disk by the osync.
606  */
607 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
608 {
609 	struct buffer_head *bh;
610 	struct list_head *p;
611 	int err = 0;
612 
613 	spin_lock(lock);
614 repeat:
615 	list_for_each_prev(p, list) {
616 		bh = BH_ENTRY(p);
617 		if (buffer_locked(bh)) {
618 			get_bh(bh);
619 			spin_unlock(lock);
620 			wait_on_buffer(bh);
621 			if (!buffer_uptodate(bh))
622 				err = -EIO;
623 			brelse(bh);
624 			spin_lock(lock);
625 			goto repeat;
626 		}
627 	}
628 	spin_unlock(lock);
629 	return err;
630 }
631 
632 /**
633  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
634  * @mapping: the mapping which wants those buffers written
635  *
636  * Starts I/O against the buffers at mapping->private_list, and waits upon
637  * that I/O.
638  *
639  * Basically, this is a convenience function for fsync().
640  * @mapping is a file or directory which needs those buffers to be written for
641  * a successful fsync().
642  */
643 int sync_mapping_buffers(struct address_space *mapping)
644 {
645 	struct address_space *buffer_mapping = mapping->assoc_mapping;
646 
647 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
648 		return 0;
649 
650 	return fsync_buffers_list(&buffer_mapping->private_lock,
651 					&mapping->private_list);
652 }
653 EXPORT_SYMBOL(sync_mapping_buffers);
654 
655 /*
656  * Called when we've recently written block `bblock', and it is known that
657  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
658  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
659  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
660  */
661 void write_boundary_block(struct block_device *bdev,
662 			sector_t bblock, unsigned blocksize)
663 {
664 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
665 	if (bh) {
666 		if (buffer_dirty(bh))
667 			ll_rw_block(WRITE, 1, &bh);
668 		put_bh(bh);
669 	}
670 }
671 
672 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
673 {
674 	struct address_space *mapping = inode->i_mapping;
675 	struct address_space *buffer_mapping = bh->b_page->mapping;
676 
677 	mark_buffer_dirty(bh);
678 	if (!mapping->assoc_mapping) {
679 		mapping->assoc_mapping = buffer_mapping;
680 	} else {
681 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
682 	}
683 	if (!bh->b_assoc_map) {
684 		spin_lock(&buffer_mapping->private_lock);
685 		list_move_tail(&bh->b_assoc_buffers,
686 				&mapping->private_list);
687 		bh->b_assoc_map = mapping;
688 		spin_unlock(&buffer_mapping->private_lock);
689 	}
690 }
691 EXPORT_SYMBOL(mark_buffer_dirty_inode);
692 
693 /*
694  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
695  * dirty.
696  *
697  * If warn is true, then emit a warning if the page is not uptodate and has
698  * not been truncated.
699  */
700 static int __set_page_dirty(struct page *page,
701 		struct address_space *mapping, int warn)
702 {
703 	if (unlikely(!mapping))
704 		return !TestSetPageDirty(page);
705 
706 	if (TestSetPageDirty(page))
707 		return 0;
708 
709 	write_lock_irq(&mapping->tree_lock);
710 	if (page->mapping) {	/* Race with truncate? */
711 		WARN_ON_ONCE(warn && !PageUptodate(page));
712 
713 		if (mapping_cap_account_dirty(mapping)) {
714 			__inc_zone_page_state(page, NR_FILE_DIRTY);
715 			__inc_bdi_stat(mapping->backing_dev_info,
716 					BDI_RECLAIMABLE);
717 			task_io_account_write(PAGE_CACHE_SIZE);
718 		}
719 		radix_tree_tag_set(&mapping->page_tree,
720 				page_index(page), PAGECACHE_TAG_DIRTY);
721 	}
722 	write_unlock_irq(&mapping->tree_lock);
723 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724 
725 	return 1;
726 }
727 
728 /*
729  * Add a page to the dirty page list.
730  *
731  * It is a sad fact of life that this function is called from several places
732  * deeply under spinlocking.  It may not sleep.
733  *
734  * If the page has buffers, the uptodate buffers are set dirty, to preserve
735  * dirty-state coherency between the page and the buffers.  It the page does
736  * not have buffers then when they are later attached they will all be set
737  * dirty.
738  *
739  * The buffers are dirtied before the page is dirtied.  There's a small race
740  * window in which a writepage caller may see the page cleanness but not the
741  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
742  * before the buffers, a concurrent writepage caller could clear the page dirty
743  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
744  * page on the dirty page list.
745  *
746  * We use private_lock to lock against try_to_free_buffers while using the
747  * page's buffer list.  Also use this to protect against clean buffers being
748  * added to the page after it was set dirty.
749  *
750  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
751  * address_space though.
752  */
753 int __set_page_dirty_buffers(struct page *page)
754 {
755 	struct address_space *mapping = page_mapping(page);
756 
757 	if (unlikely(!mapping))
758 		return !TestSetPageDirty(page);
759 
760 	spin_lock(&mapping->private_lock);
761 	if (page_has_buffers(page)) {
762 		struct buffer_head *head = page_buffers(page);
763 		struct buffer_head *bh = head;
764 
765 		do {
766 			set_buffer_dirty(bh);
767 			bh = bh->b_this_page;
768 		} while (bh != head);
769 	}
770 	spin_unlock(&mapping->private_lock);
771 
772 	return __set_page_dirty(page, mapping, 1);
773 }
774 EXPORT_SYMBOL(__set_page_dirty_buffers);
775 
776 /*
777  * Write out and wait upon a list of buffers.
778  *
779  * We have conflicting pressures: we want to make sure that all
780  * initially dirty buffers get waited on, but that any subsequently
781  * dirtied buffers don't.  After all, we don't want fsync to last
782  * forever if somebody is actively writing to the file.
783  *
784  * Do this in two main stages: first we copy dirty buffers to a
785  * temporary inode list, queueing the writes as we go.  Then we clean
786  * up, waiting for those writes to complete.
787  *
788  * During this second stage, any subsequent updates to the file may end
789  * up refiling the buffer on the original inode's dirty list again, so
790  * there is a chance we will end up with a buffer queued for write but
791  * not yet completed on that list.  So, as a final cleanup we go through
792  * the osync code to catch these locked, dirty buffers without requeuing
793  * any newly dirty buffers for write.
794  */
795 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
796 {
797 	struct buffer_head *bh;
798 	struct list_head tmp;
799 	struct address_space *mapping;
800 	int err = 0, err2;
801 
802 	INIT_LIST_HEAD(&tmp);
803 
804 	spin_lock(lock);
805 	while (!list_empty(list)) {
806 		bh = BH_ENTRY(list->next);
807 		mapping = bh->b_assoc_map;
808 		__remove_assoc_queue(bh);
809 		/* Avoid race with mark_buffer_dirty_inode() which does
810 		 * a lockless check and we rely on seeing the dirty bit */
811 		smp_mb();
812 		if (buffer_dirty(bh) || buffer_locked(bh)) {
813 			list_add(&bh->b_assoc_buffers, &tmp);
814 			bh->b_assoc_map = mapping;
815 			if (buffer_dirty(bh)) {
816 				get_bh(bh);
817 				spin_unlock(lock);
818 				/*
819 				 * Ensure any pending I/O completes so that
820 				 * ll_rw_block() actually writes the current
821 				 * contents - it is a noop if I/O is still in
822 				 * flight on potentially older contents.
823 				 */
824 				ll_rw_block(SWRITE_SYNC, 1, &bh);
825 				brelse(bh);
826 				spin_lock(lock);
827 			}
828 		}
829 	}
830 
831 	while (!list_empty(&tmp)) {
832 		bh = BH_ENTRY(tmp.prev);
833 		get_bh(bh);
834 		mapping = bh->b_assoc_map;
835 		__remove_assoc_queue(bh);
836 		/* Avoid race with mark_buffer_dirty_inode() which does
837 		 * a lockless check and we rely on seeing the dirty bit */
838 		smp_mb();
839 		if (buffer_dirty(bh)) {
840 			list_add(&bh->b_assoc_buffers,
841 				 &mapping->private_list);
842 			bh->b_assoc_map = mapping;
843 		}
844 		spin_unlock(lock);
845 		wait_on_buffer(bh);
846 		if (!buffer_uptodate(bh))
847 			err = -EIO;
848 		brelse(bh);
849 		spin_lock(lock);
850 	}
851 
852 	spin_unlock(lock);
853 	err2 = osync_buffers_list(lock, list);
854 	if (err)
855 		return err;
856 	else
857 		return err2;
858 }
859 
860 /*
861  * Invalidate any and all dirty buffers on a given inode.  We are
862  * probably unmounting the fs, but that doesn't mean we have already
863  * done a sync().  Just drop the buffers from the inode list.
864  *
865  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
866  * assumes that all the buffers are against the blockdev.  Not true
867  * for reiserfs.
868  */
869 void invalidate_inode_buffers(struct inode *inode)
870 {
871 	if (inode_has_buffers(inode)) {
872 		struct address_space *mapping = &inode->i_data;
873 		struct list_head *list = &mapping->private_list;
874 		struct address_space *buffer_mapping = mapping->assoc_mapping;
875 
876 		spin_lock(&buffer_mapping->private_lock);
877 		while (!list_empty(list))
878 			__remove_assoc_queue(BH_ENTRY(list->next));
879 		spin_unlock(&buffer_mapping->private_lock);
880 	}
881 }
882 
883 /*
884  * Remove any clean buffers from the inode's buffer list.  This is called
885  * when we're trying to free the inode itself.  Those buffers can pin it.
886  *
887  * Returns true if all buffers were removed.
888  */
889 int remove_inode_buffers(struct inode *inode)
890 {
891 	int ret = 1;
892 
893 	if (inode_has_buffers(inode)) {
894 		struct address_space *mapping = &inode->i_data;
895 		struct list_head *list = &mapping->private_list;
896 		struct address_space *buffer_mapping = mapping->assoc_mapping;
897 
898 		spin_lock(&buffer_mapping->private_lock);
899 		while (!list_empty(list)) {
900 			struct buffer_head *bh = BH_ENTRY(list->next);
901 			if (buffer_dirty(bh)) {
902 				ret = 0;
903 				break;
904 			}
905 			__remove_assoc_queue(bh);
906 		}
907 		spin_unlock(&buffer_mapping->private_lock);
908 	}
909 	return ret;
910 }
911 
912 /*
913  * Create the appropriate buffers when given a page for data area and
914  * the size of each buffer.. Use the bh->b_this_page linked list to
915  * follow the buffers created.  Return NULL if unable to create more
916  * buffers.
917  *
918  * The retry flag is used to differentiate async IO (paging, swapping)
919  * which may not fail from ordinary buffer allocations.
920  */
921 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
922 		int retry)
923 {
924 	struct buffer_head *bh, *head;
925 	long offset;
926 
927 try_again:
928 	head = NULL;
929 	offset = PAGE_SIZE;
930 	while ((offset -= size) >= 0) {
931 		bh = alloc_buffer_head(GFP_NOFS);
932 		if (!bh)
933 			goto no_grow;
934 
935 		bh->b_bdev = NULL;
936 		bh->b_this_page = head;
937 		bh->b_blocknr = -1;
938 		head = bh;
939 
940 		bh->b_state = 0;
941 		atomic_set(&bh->b_count, 0);
942 		bh->b_private = NULL;
943 		bh->b_size = size;
944 
945 		/* Link the buffer to its page */
946 		set_bh_page(bh, page, offset);
947 
948 		init_buffer(bh, NULL, NULL);
949 	}
950 	return head;
951 /*
952  * In case anything failed, we just free everything we got.
953  */
954 no_grow:
955 	if (head) {
956 		do {
957 			bh = head;
958 			head = head->b_this_page;
959 			free_buffer_head(bh);
960 		} while (head);
961 	}
962 
963 	/*
964 	 * Return failure for non-async IO requests.  Async IO requests
965 	 * are not allowed to fail, so we have to wait until buffer heads
966 	 * become available.  But we don't want tasks sleeping with
967 	 * partially complete buffers, so all were released above.
968 	 */
969 	if (!retry)
970 		return NULL;
971 
972 	/* We're _really_ low on memory. Now we just
973 	 * wait for old buffer heads to become free due to
974 	 * finishing IO.  Since this is an async request and
975 	 * the reserve list is empty, we're sure there are
976 	 * async buffer heads in use.
977 	 */
978 	free_more_memory();
979 	goto try_again;
980 }
981 EXPORT_SYMBOL_GPL(alloc_page_buffers);
982 
983 static inline void
984 link_dev_buffers(struct page *page, struct buffer_head *head)
985 {
986 	struct buffer_head *bh, *tail;
987 
988 	bh = head;
989 	do {
990 		tail = bh;
991 		bh = bh->b_this_page;
992 	} while (bh);
993 	tail->b_this_page = head;
994 	attach_page_buffers(page, head);
995 }
996 
997 /*
998  * Initialise the state of a blockdev page's buffers.
999  */
1000 static void
1001 init_page_buffers(struct page *page, struct block_device *bdev,
1002 			sector_t block, int size)
1003 {
1004 	struct buffer_head *head = page_buffers(page);
1005 	struct buffer_head *bh = head;
1006 	int uptodate = PageUptodate(page);
1007 
1008 	do {
1009 		if (!buffer_mapped(bh)) {
1010 			init_buffer(bh, NULL, NULL);
1011 			bh->b_bdev = bdev;
1012 			bh->b_blocknr = block;
1013 			if (uptodate)
1014 				set_buffer_uptodate(bh);
1015 			set_buffer_mapped(bh);
1016 		}
1017 		block++;
1018 		bh = bh->b_this_page;
1019 	} while (bh != head);
1020 }
1021 
1022 /*
1023  * Create the page-cache page that contains the requested block.
1024  *
1025  * This is user purely for blockdev mappings.
1026  */
1027 static struct page *
1028 grow_dev_page(struct block_device *bdev, sector_t block,
1029 		pgoff_t index, int size)
1030 {
1031 	struct inode *inode = bdev->bd_inode;
1032 	struct page *page;
1033 	struct buffer_head *bh;
1034 
1035 	page = find_or_create_page(inode->i_mapping, index,
1036 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1037 	if (!page)
1038 		return NULL;
1039 
1040 	BUG_ON(!PageLocked(page));
1041 
1042 	if (page_has_buffers(page)) {
1043 		bh = page_buffers(page);
1044 		if (bh->b_size == size) {
1045 			init_page_buffers(page, bdev, block, size);
1046 			return page;
1047 		}
1048 		if (!try_to_free_buffers(page))
1049 			goto failed;
1050 	}
1051 
1052 	/*
1053 	 * Allocate some buffers for this page
1054 	 */
1055 	bh = alloc_page_buffers(page, size, 0);
1056 	if (!bh)
1057 		goto failed;
1058 
1059 	/*
1060 	 * Link the page to the buffers and initialise them.  Take the
1061 	 * lock to be atomic wrt __find_get_block(), which does not
1062 	 * run under the page lock.
1063 	 */
1064 	spin_lock(&inode->i_mapping->private_lock);
1065 	link_dev_buffers(page, bh);
1066 	init_page_buffers(page, bdev, block, size);
1067 	spin_unlock(&inode->i_mapping->private_lock);
1068 	return page;
1069 
1070 failed:
1071 	BUG();
1072 	unlock_page(page);
1073 	page_cache_release(page);
1074 	return NULL;
1075 }
1076 
1077 /*
1078  * Create buffers for the specified block device block's page.  If
1079  * that page was dirty, the buffers are set dirty also.
1080  */
1081 static int
1082 grow_buffers(struct block_device *bdev, sector_t block, int size)
1083 {
1084 	struct page *page;
1085 	pgoff_t index;
1086 	int sizebits;
1087 
1088 	sizebits = -1;
1089 	do {
1090 		sizebits++;
1091 	} while ((size << sizebits) < PAGE_SIZE);
1092 
1093 	index = block >> sizebits;
1094 
1095 	/*
1096 	 * Check for a block which wants to lie outside our maximum possible
1097 	 * pagecache index.  (this comparison is done using sector_t types).
1098 	 */
1099 	if (unlikely(index != block >> sizebits)) {
1100 		char b[BDEVNAME_SIZE];
1101 
1102 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1103 			"device %s\n",
1104 			__func__, (unsigned long long)block,
1105 			bdevname(bdev, b));
1106 		return -EIO;
1107 	}
1108 	block = index << sizebits;
1109 	/* Create a page with the proper size buffers.. */
1110 	page = grow_dev_page(bdev, block, index, size);
1111 	if (!page)
1112 		return 0;
1113 	unlock_page(page);
1114 	page_cache_release(page);
1115 	return 1;
1116 }
1117 
1118 static struct buffer_head *
1119 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1120 {
1121 	/* Size must be multiple of hard sectorsize */
1122 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1123 			(size < 512 || size > PAGE_SIZE))) {
1124 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1125 					size);
1126 		printk(KERN_ERR "hardsect size: %d\n",
1127 					bdev_hardsect_size(bdev));
1128 
1129 		dump_stack();
1130 		return NULL;
1131 	}
1132 
1133 	for (;;) {
1134 		struct buffer_head * bh;
1135 		int ret;
1136 
1137 		bh = __find_get_block(bdev, block, size);
1138 		if (bh)
1139 			return bh;
1140 
1141 		ret = grow_buffers(bdev, block, size);
1142 		if (ret < 0)
1143 			return NULL;
1144 		if (ret == 0)
1145 			free_more_memory();
1146 	}
1147 }
1148 
1149 /*
1150  * The relationship between dirty buffers and dirty pages:
1151  *
1152  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1153  * the page is tagged dirty in its radix tree.
1154  *
1155  * At all times, the dirtiness of the buffers represents the dirtiness of
1156  * subsections of the page.  If the page has buffers, the page dirty bit is
1157  * merely a hint about the true dirty state.
1158  *
1159  * When a page is set dirty in its entirety, all its buffers are marked dirty
1160  * (if the page has buffers).
1161  *
1162  * When a buffer is marked dirty, its page is dirtied, but the page's other
1163  * buffers are not.
1164  *
1165  * Also.  When blockdev buffers are explicitly read with bread(), they
1166  * individually become uptodate.  But their backing page remains not
1167  * uptodate - even if all of its buffers are uptodate.  A subsequent
1168  * block_read_full_page() against that page will discover all the uptodate
1169  * buffers, will set the page uptodate and will perform no I/O.
1170  */
1171 
1172 /**
1173  * mark_buffer_dirty - mark a buffer_head as needing writeout
1174  * @bh: the buffer_head to mark dirty
1175  *
1176  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1177  * backing page dirty, then tag the page as dirty in its address_space's radix
1178  * tree and then attach the address_space's inode to its superblock's dirty
1179  * inode list.
1180  *
1181  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1182  * mapping->tree_lock and the global inode_lock.
1183  */
1184 void mark_buffer_dirty(struct buffer_head *bh)
1185 {
1186 	WARN_ON_ONCE(!buffer_uptodate(bh));
1187 
1188 	/*
1189 	 * Very *carefully* optimize the it-is-already-dirty case.
1190 	 *
1191 	 * Don't let the final "is it dirty" escape to before we
1192 	 * perhaps modified the buffer.
1193 	 */
1194 	if (buffer_dirty(bh)) {
1195 		smp_mb();
1196 		if (buffer_dirty(bh))
1197 			return;
1198 	}
1199 
1200 	if (!test_set_buffer_dirty(bh))
1201 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1202 }
1203 
1204 /*
1205  * Decrement a buffer_head's reference count.  If all buffers against a page
1206  * have zero reference count, are clean and unlocked, and if the page is clean
1207  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1208  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1209  * a page but it ends up not being freed, and buffers may later be reattached).
1210  */
1211 void __brelse(struct buffer_head * buf)
1212 {
1213 	if (atomic_read(&buf->b_count)) {
1214 		put_bh(buf);
1215 		return;
1216 	}
1217 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1218 	WARN_ON(1);
1219 }
1220 
1221 /*
1222  * bforget() is like brelse(), except it discards any
1223  * potentially dirty data.
1224  */
1225 void __bforget(struct buffer_head *bh)
1226 {
1227 	clear_buffer_dirty(bh);
1228 	if (bh->b_assoc_map) {
1229 		struct address_space *buffer_mapping = bh->b_page->mapping;
1230 
1231 		spin_lock(&buffer_mapping->private_lock);
1232 		list_del_init(&bh->b_assoc_buffers);
1233 		bh->b_assoc_map = NULL;
1234 		spin_unlock(&buffer_mapping->private_lock);
1235 	}
1236 	__brelse(bh);
1237 }
1238 
1239 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1240 {
1241 	lock_buffer(bh);
1242 	if (buffer_uptodate(bh)) {
1243 		unlock_buffer(bh);
1244 		return bh;
1245 	} else {
1246 		get_bh(bh);
1247 		bh->b_end_io = end_buffer_read_sync;
1248 		submit_bh(READ, bh);
1249 		wait_on_buffer(bh);
1250 		if (buffer_uptodate(bh))
1251 			return bh;
1252 	}
1253 	brelse(bh);
1254 	return NULL;
1255 }
1256 
1257 /*
1258  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1259  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1260  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1261  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1262  * CPU's LRUs at the same time.
1263  *
1264  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1265  * sb_find_get_block().
1266  *
1267  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1268  * a local interrupt disable for that.
1269  */
1270 
1271 #define BH_LRU_SIZE	8
1272 
1273 struct bh_lru {
1274 	struct buffer_head *bhs[BH_LRU_SIZE];
1275 };
1276 
1277 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1278 
1279 #ifdef CONFIG_SMP
1280 #define bh_lru_lock()	local_irq_disable()
1281 #define bh_lru_unlock()	local_irq_enable()
1282 #else
1283 #define bh_lru_lock()	preempt_disable()
1284 #define bh_lru_unlock()	preempt_enable()
1285 #endif
1286 
1287 static inline void check_irqs_on(void)
1288 {
1289 #ifdef irqs_disabled
1290 	BUG_ON(irqs_disabled());
1291 #endif
1292 }
1293 
1294 /*
1295  * The LRU management algorithm is dopey-but-simple.  Sorry.
1296  */
1297 static void bh_lru_install(struct buffer_head *bh)
1298 {
1299 	struct buffer_head *evictee = NULL;
1300 	struct bh_lru *lru;
1301 
1302 	check_irqs_on();
1303 	bh_lru_lock();
1304 	lru = &__get_cpu_var(bh_lrus);
1305 	if (lru->bhs[0] != bh) {
1306 		struct buffer_head *bhs[BH_LRU_SIZE];
1307 		int in;
1308 		int out = 0;
1309 
1310 		get_bh(bh);
1311 		bhs[out++] = bh;
1312 		for (in = 0; in < BH_LRU_SIZE; in++) {
1313 			struct buffer_head *bh2 = lru->bhs[in];
1314 
1315 			if (bh2 == bh) {
1316 				__brelse(bh2);
1317 			} else {
1318 				if (out >= BH_LRU_SIZE) {
1319 					BUG_ON(evictee != NULL);
1320 					evictee = bh2;
1321 				} else {
1322 					bhs[out++] = bh2;
1323 				}
1324 			}
1325 		}
1326 		while (out < BH_LRU_SIZE)
1327 			bhs[out++] = NULL;
1328 		memcpy(lru->bhs, bhs, sizeof(bhs));
1329 	}
1330 	bh_lru_unlock();
1331 
1332 	if (evictee)
1333 		__brelse(evictee);
1334 }
1335 
1336 /*
1337  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1338  */
1339 static struct buffer_head *
1340 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1341 {
1342 	struct buffer_head *ret = NULL;
1343 	struct bh_lru *lru;
1344 	unsigned int i;
1345 
1346 	check_irqs_on();
1347 	bh_lru_lock();
1348 	lru = &__get_cpu_var(bh_lrus);
1349 	for (i = 0; i < BH_LRU_SIZE; i++) {
1350 		struct buffer_head *bh = lru->bhs[i];
1351 
1352 		if (bh && bh->b_bdev == bdev &&
1353 				bh->b_blocknr == block && bh->b_size == size) {
1354 			if (i) {
1355 				while (i) {
1356 					lru->bhs[i] = lru->bhs[i - 1];
1357 					i--;
1358 				}
1359 				lru->bhs[0] = bh;
1360 			}
1361 			get_bh(bh);
1362 			ret = bh;
1363 			break;
1364 		}
1365 	}
1366 	bh_lru_unlock();
1367 	return ret;
1368 }
1369 
1370 /*
1371  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1372  * it in the LRU and mark it as accessed.  If it is not present then return
1373  * NULL
1374  */
1375 struct buffer_head *
1376 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1377 {
1378 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1379 
1380 	if (bh == NULL) {
1381 		bh = __find_get_block_slow(bdev, block);
1382 		if (bh)
1383 			bh_lru_install(bh);
1384 	}
1385 	if (bh)
1386 		touch_buffer(bh);
1387 	return bh;
1388 }
1389 EXPORT_SYMBOL(__find_get_block);
1390 
1391 /*
1392  * __getblk will locate (and, if necessary, create) the buffer_head
1393  * which corresponds to the passed block_device, block and size. The
1394  * returned buffer has its reference count incremented.
1395  *
1396  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1397  * illegal block number, __getblk() will happily return a buffer_head
1398  * which represents the non-existent block.  Very weird.
1399  *
1400  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1401  * attempt is failing.  FIXME, perhaps?
1402  */
1403 struct buffer_head *
1404 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1405 {
1406 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1407 
1408 	might_sleep();
1409 	if (bh == NULL)
1410 		bh = __getblk_slow(bdev, block, size);
1411 	return bh;
1412 }
1413 EXPORT_SYMBOL(__getblk);
1414 
1415 /*
1416  * Do async read-ahead on a buffer..
1417  */
1418 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1419 {
1420 	struct buffer_head *bh = __getblk(bdev, block, size);
1421 	if (likely(bh)) {
1422 		ll_rw_block(READA, 1, &bh);
1423 		brelse(bh);
1424 	}
1425 }
1426 EXPORT_SYMBOL(__breadahead);
1427 
1428 /**
1429  *  __bread() - reads a specified block and returns the bh
1430  *  @bdev: the block_device to read from
1431  *  @block: number of block
1432  *  @size: size (in bytes) to read
1433  *
1434  *  Reads a specified block, and returns buffer head that contains it.
1435  *  It returns NULL if the block was unreadable.
1436  */
1437 struct buffer_head *
1438 __bread(struct block_device *bdev, sector_t block, unsigned size)
1439 {
1440 	struct buffer_head *bh = __getblk(bdev, block, size);
1441 
1442 	if (likely(bh) && !buffer_uptodate(bh))
1443 		bh = __bread_slow(bh);
1444 	return bh;
1445 }
1446 EXPORT_SYMBOL(__bread);
1447 
1448 /*
1449  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1450  * This doesn't race because it runs in each cpu either in irq
1451  * or with preempt disabled.
1452  */
1453 static void invalidate_bh_lru(void *arg)
1454 {
1455 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1456 	int i;
1457 
1458 	for (i = 0; i < BH_LRU_SIZE; i++) {
1459 		brelse(b->bhs[i]);
1460 		b->bhs[i] = NULL;
1461 	}
1462 	put_cpu_var(bh_lrus);
1463 }
1464 
1465 void invalidate_bh_lrus(void)
1466 {
1467 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1468 }
1469 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1470 
1471 void set_bh_page(struct buffer_head *bh,
1472 		struct page *page, unsigned long offset)
1473 {
1474 	bh->b_page = page;
1475 	BUG_ON(offset >= PAGE_SIZE);
1476 	if (PageHighMem(page))
1477 		/*
1478 		 * This catches illegal uses and preserves the offset:
1479 		 */
1480 		bh->b_data = (char *)(0 + offset);
1481 	else
1482 		bh->b_data = page_address(page) + offset;
1483 }
1484 EXPORT_SYMBOL(set_bh_page);
1485 
1486 /*
1487  * Called when truncating a buffer on a page completely.
1488  */
1489 static void discard_buffer(struct buffer_head * bh)
1490 {
1491 	lock_buffer(bh);
1492 	clear_buffer_dirty(bh);
1493 	bh->b_bdev = NULL;
1494 	clear_buffer_mapped(bh);
1495 	clear_buffer_req(bh);
1496 	clear_buffer_new(bh);
1497 	clear_buffer_delay(bh);
1498 	clear_buffer_unwritten(bh);
1499 	unlock_buffer(bh);
1500 }
1501 
1502 /**
1503  * block_invalidatepage - invalidate part of all of a buffer-backed page
1504  *
1505  * @page: the page which is affected
1506  * @offset: the index of the truncation point
1507  *
1508  * block_invalidatepage() is called when all or part of the page has become
1509  * invalidatedby a truncate operation.
1510  *
1511  * block_invalidatepage() does not have to release all buffers, but it must
1512  * ensure that no dirty buffer is left outside @offset and that no I/O
1513  * is underway against any of the blocks which are outside the truncation
1514  * point.  Because the caller is about to free (and possibly reuse) those
1515  * blocks on-disk.
1516  */
1517 void block_invalidatepage(struct page *page, unsigned long offset)
1518 {
1519 	struct buffer_head *head, *bh, *next;
1520 	unsigned int curr_off = 0;
1521 
1522 	BUG_ON(!PageLocked(page));
1523 	if (!page_has_buffers(page))
1524 		goto out;
1525 
1526 	head = page_buffers(page);
1527 	bh = head;
1528 	do {
1529 		unsigned int next_off = curr_off + bh->b_size;
1530 		next = bh->b_this_page;
1531 
1532 		/*
1533 		 * is this block fully invalidated?
1534 		 */
1535 		if (offset <= curr_off)
1536 			discard_buffer(bh);
1537 		curr_off = next_off;
1538 		bh = next;
1539 	} while (bh != head);
1540 
1541 	/*
1542 	 * We release buffers only if the entire page is being invalidated.
1543 	 * The get_block cached value has been unconditionally invalidated,
1544 	 * so real IO is not possible anymore.
1545 	 */
1546 	if (offset == 0)
1547 		try_to_release_page(page, 0);
1548 out:
1549 	return;
1550 }
1551 EXPORT_SYMBOL(block_invalidatepage);
1552 
1553 /*
1554  * We attach and possibly dirty the buffers atomically wrt
1555  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1556  * is already excluded via the page lock.
1557  */
1558 void create_empty_buffers(struct page *page,
1559 			unsigned long blocksize, unsigned long b_state)
1560 {
1561 	struct buffer_head *bh, *head, *tail;
1562 
1563 	head = alloc_page_buffers(page, blocksize, 1);
1564 	bh = head;
1565 	do {
1566 		bh->b_state |= b_state;
1567 		tail = bh;
1568 		bh = bh->b_this_page;
1569 	} while (bh);
1570 	tail->b_this_page = head;
1571 
1572 	spin_lock(&page->mapping->private_lock);
1573 	if (PageUptodate(page) || PageDirty(page)) {
1574 		bh = head;
1575 		do {
1576 			if (PageDirty(page))
1577 				set_buffer_dirty(bh);
1578 			if (PageUptodate(page))
1579 				set_buffer_uptodate(bh);
1580 			bh = bh->b_this_page;
1581 		} while (bh != head);
1582 	}
1583 	attach_page_buffers(page, head);
1584 	spin_unlock(&page->mapping->private_lock);
1585 }
1586 EXPORT_SYMBOL(create_empty_buffers);
1587 
1588 /*
1589  * We are taking a block for data and we don't want any output from any
1590  * buffer-cache aliases starting from return from that function and
1591  * until the moment when something will explicitly mark the buffer
1592  * dirty (hopefully that will not happen until we will free that block ;-)
1593  * We don't even need to mark it not-uptodate - nobody can expect
1594  * anything from a newly allocated buffer anyway. We used to used
1595  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1596  * don't want to mark the alias unmapped, for example - it would confuse
1597  * anyone who might pick it with bread() afterwards...
1598  *
1599  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1600  * be writeout I/O going on against recently-freed buffers.  We don't
1601  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1602  * only if we really need to.  That happens here.
1603  */
1604 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1605 {
1606 	struct buffer_head *old_bh;
1607 
1608 	might_sleep();
1609 
1610 	old_bh = __find_get_block_slow(bdev, block);
1611 	if (old_bh) {
1612 		clear_buffer_dirty(old_bh);
1613 		wait_on_buffer(old_bh);
1614 		clear_buffer_req(old_bh);
1615 		__brelse(old_bh);
1616 	}
1617 }
1618 EXPORT_SYMBOL(unmap_underlying_metadata);
1619 
1620 /*
1621  * NOTE! All mapped/uptodate combinations are valid:
1622  *
1623  *	Mapped	Uptodate	Meaning
1624  *
1625  *	No	No		"unknown" - must do get_block()
1626  *	No	Yes		"hole" - zero-filled
1627  *	Yes	No		"allocated" - allocated on disk, not read in
1628  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1629  *
1630  * "Dirty" is valid only with the last case (mapped+uptodate).
1631  */
1632 
1633 /*
1634  * While block_write_full_page is writing back the dirty buffers under
1635  * the page lock, whoever dirtied the buffers may decide to clean them
1636  * again at any time.  We handle that by only looking at the buffer
1637  * state inside lock_buffer().
1638  *
1639  * If block_write_full_page() is called for regular writeback
1640  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1641  * locked buffer.   This only can happen if someone has written the buffer
1642  * directly, with submit_bh().  At the address_space level PageWriteback
1643  * prevents this contention from occurring.
1644  */
1645 static int __block_write_full_page(struct inode *inode, struct page *page,
1646 			get_block_t *get_block, struct writeback_control *wbc)
1647 {
1648 	int err;
1649 	sector_t block;
1650 	sector_t last_block;
1651 	struct buffer_head *bh, *head;
1652 	const unsigned blocksize = 1 << inode->i_blkbits;
1653 	int nr_underway = 0;
1654 
1655 	BUG_ON(!PageLocked(page));
1656 
1657 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1658 
1659 	if (!page_has_buffers(page)) {
1660 		create_empty_buffers(page, blocksize,
1661 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1662 	}
1663 
1664 	/*
1665 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1666 	 * here, and the (potentially unmapped) buffers may become dirty at
1667 	 * any time.  If a buffer becomes dirty here after we've inspected it
1668 	 * then we just miss that fact, and the page stays dirty.
1669 	 *
1670 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1671 	 * handle that here by just cleaning them.
1672 	 */
1673 
1674 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1675 	head = page_buffers(page);
1676 	bh = head;
1677 
1678 	/*
1679 	 * Get all the dirty buffers mapped to disk addresses and
1680 	 * handle any aliases from the underlying blockdev's mapping.
1681 	 */
1682 	do {
1683 		if (block > last_block) {
1684 			/*
1685 			 * mapped buffers outside i_size will occur, because
1686 			 * this page can be outside i_size when there is a
1687 			 * truncate in progress.
1688 			 */
1689 			/*
1690 			 * The buffer was zeroed by block_write_full_page()
1691 			 */
1692 			clear_buffer_dirty(bh);
1693 			set_buffer_uptodate(bh);
1694 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1695 			   buffer_dirty(bh)) {
1696 			WARN_ON(bh->b_size != blocksize);
1697 			err = get_block(inode, block, bh, 1);
1698 			if (err)
1699 				goto recover;
1700 			clear_buffer_delay(bh);
1701 			if (buffer_new(bh)) {
1702 				/* blockdev mappings never come here */
1703 				clear_buffer_new(bh);
1704 				unmap_underlying_metadata(bh->b_bdev,
1705 							bh->b_blocknr);
1706 			}
1707 		}
1708 		bh = bh->b_this_page;
1709 		block++;
1710 	} while (bh != head);
1711 
1712 	do {
1713 		if (!buffer_mapped(bh))
1714 			continue;
1715 		/*
1716 		 * If it's a fully non-blocking write attempt and we cannot
1717 		 * lock the buffer then redirty the page.  Note that this can
1718 		 * potentially cause a busy-wait loop from pdflush and kswapd
1719 		 * activity, but those code paths have their own higher-level
1720 		 * throttling.
1721 		 */
1722 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1723 			lock_buffer(bh);
1724 		} else if (test_set_buffer_locked(bh)) {
1725 			redirty_page_for_writepage(wbc, page);
1726 			continue;
1727 		}
1728 		if (test_clear_buffer_dirty(bh)) {
1729 			mark_buffer_async_write(bh);
1730 		} else {
1731 			unlock_buffer(bh);
1732 		}
1733 	} while ((bh = bh->b_this_page) != head);
1734 
1735 	/*
1736 	 * The page and its buffers are protected by PageWriteback(), so we can
1737 	 * drop the bh refcounts early.
1738 	 */
1739 	BUG_ON(PageWriteback(page));
1740 	set_page_writeback(page);
1741 
1742 	do {
1743 		struct buffer_head *next = bh->b_this_page;
1744 		if (buffer_async_write(bh)) {
1745 			submit_bh(WRITE, bh);
1746 			nr_underway++;
1747 		}
1748 		bh = next;
1749 	} while (bh != head);
1750 	unlock_page(page);
1751 
1752 	err = 0;
1753 done:
1754 	if (nr_underway == 0) {
1755 		/*
1756 		 * The page was marked dirty, but the buffers were
1757 		 * clean.  Someone wrote them back by hand with
1758 		 * ll_rw_block/submit_bh.  A rare case.
1759 		 */
1760 		end_page_writeback(page);
1761 
1762 		/*
1763 		 * The page and buffer_heads can be released at any time from
1764 		 * here on.
1765 		 */
1766 	}
1767 	return err;
1768 
1769 recover:
1770 	/*
1771 	 * ENOSPC, or some other error.  We may already have added some
1772 	 * blocks to the file, so we need to write these out to avoid
1773 	 * exposing stale data.
1774 	 * The page is currently locked and not marked for writeback
1775 	 */
1776 	bh = head;
1777 	/* Recovery: lock and submit the mapped buffers */
1778 	do {
1779 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1780 		    !buffer_delay(bh)) {
1781 			lock_buffer(bh);
1782 			mark_buffer_async_write(bh);
1783 		} else {
1784 			/*
1785 			 * The buffer may have been set dirty during
1786 			 * attachment to a dirty page.
1787 			 */
1788 			clear_buffer_dirty(bh);
1789 		}
1790 	} while ((bh = bh->b_this_page) != head);
1791 	SetPageError(page);
1792 	BUG_ON(PageWriteback(page));
1793 	mapping_set_error(page->mapping, err);
1794 	set_page_writeback(page);
1795 	do {
1796 		struct buffer_head *next = bh->b_this_page;
1797 		if (buffer_async_write(bh)) {
1798 			clear_buffer_dirty(bh);
1799 			submit_bh(WRITE, bh);
1800 			nr_underway++;
1801 		}
1802 		bh = next;
1803 	} while (bh != head);
1804 	unlock_page(page);
1805 	goto done;
1806 }
1807 
1808 /*
1809  * If a page has any new buffers, zero them out here, and mark them uptodate
1810  * and dirty so they'll be written out (in order to prevent uninitialised
1811  * block data from leaking). And clear the new bit.
1812  */
1813 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1814 {
1815 	unsigned int block_start, block_end;
1816 	struct buffer_head *head, *bh;
1817 
1818 	BUG_ON(!PageLocked(page));
1819 	if (!page_has_buffers(page))
1820 		return;
1821 
1822 	bh = head = page_buffers(page);
1823 	block_start = 0;
1824 	do {
1825 		block_end = block_start + bh->b_size;
1826 
1827 		if (buffer_new(bh)) {
1828 			if (block_end > from && block_start < to) {
1829 				if (!PageUptodate(page)) {
1830 					unsigned start, size;
1831 
1832 					start = max(from, block_start);
1833 					size = min(to, block_end) - start;
1834 
1835 					zero_user(page, start, size);
1836 					set_buffer_uptodate(bh);
1837 				}
1838 
1839 				clear_buffer_new(bh);
1840 				mark_buffer_dirty(bh);
1841 			}
1842 		}
1843 
1844 		block_start = block_end;
1845 		bh = bh->b_this_page;
1846 	} while (bh != head);
1847 }
1848 EXPORT_SYMBOL(page_zero_new_buffers);
1849 
1850 static int __block_prepare_write(struct inode *inode, struct page *page,
1851 		unsigned from, unsigned to, get_block_t *get_block)
1852 {
1853 	unsigned block_start, block_end;
1854 	sector_t block;
1855 	int err = 0;
1856 	unsigned blocksize, bbits;
1857 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1858 
1859 	BUG_ON(!PageLocked(page));
1860 	BUG_ON(from > PAGE_CACHE_SIZE);
1861 	BUG_ON(to > PAGE_CACHE_SIZE);
1862 	BUG_ON(from > to);
1863 
1864 	blocksize = 1 << inode->i_blkbits;
1865 	if (!page_has_buffers(page))
1866 		create_empty_buffers(page, blocksize, 0);
1867 	head = page_buffers(page);
1868 
1869 	bbits = inode->i_blkbits;
1870 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1871 
1872 	for(bh = head, block_start = 0; bh != head || !block_start;
1873 	    block++, block_start=block_end, bh = bh->b_this_page) {
1874 		block_end = block_start + blocksize;
1875 		if (block_end <= from || block_start >= to) {
1876 			if (PageUptodate(page)) {
1877 				if (!buffer_uptodate(bh))
1878 					set_buffer_uptodate(bh);
1879 			}
1880 			continue;
1881 		}
1882 		if (buffer_new(bh))
1883 			clear_buffer_new(bh);
1884 		if (!buffer_mapped(bh)) {
1885 			WARN_ON(bh->b_size != blocksize);
1886 			err = get_block(inode, block, bh, 1);
1887 			if (err)
1888 				break;
1889 			if (buffer_new(bh)) {
1890 				unmap_underlying_metadata(bh->b_bdev,
1891 							bh->b_blocknr);
1892 				if (PageUptodate(page)) {
1893 					clear_buffer_new(bh);
1894 					set_buffer_uptodate(bh);
1895 					mark_buffer_dirty(bh);
1896 					continue;
1897 				}
1898 				if (block_end > to || block_start < from)
1899 					zero_user_segments(page,
1900 						to, block_end,
1901 						block_start, from);
1902 				continue;
1903 			}
1904 		}
1905 		if (PageUptodate(page)) {
1906 			if (!buffer_uptodate(bh))
1907 				set_buffer_uptodate(bh);
1908 			continue;
1909 		}
1910 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1911 		    !buffer_unwritten(bh) &&
1912 		     (block_start < from || block_end > to)) {
1913 			ll_rw_block(READ, 1, &bh);
1914 			*wait_bh++=bh;
1915 		}
1916 	}
1917 	/*
1918 	 * If we issued read requests - let them complete.
1919 	 */
1920 	while(wait_bh > wait) {
1921 		wait_on_buffer(*--wait_bh);
1922 		if (!buffer_uptodate(*wait_bh))
1923 			err = -EIO;
1924 	}
1925 	if (unlikely(err))
1926 		page_zero_new_buffers(page, from, to);
1927 	return err;
1928 }
1929 
1930 static int __block_commit_write(struct inode *inode, struct page *page,
1931 		unsigned from, unsigned to)
1932 {
1933 	unsigned block_start, block_end;
1934 	int partial = 0;
1935 	unsigned blocksize;
1936 	struct buffer_head *bh, *head;
1937 
1938 	blocksize = 1 << inode->i_blkbits;
1939 
1940 	for(bh = head = page_buffers(page), block_start = 0;
1941 	    bh != head || !block_start;
1942 	    block_start=block_end, bh = bh->b_this_page) {
1943 		block_end = block_start + blocksize;
1944 		if (block_end <= from || block_start >= to) {
1945 			if (!buffer_uptodate(bh))
1946 				partial = 1;
1947 		} else {
1948 			set_buffer_uptodate(bh);
1949 			mark_buffer_dirty(bh);
1950 		}
1951 		clear_buffer_new(bh);
1952 	}
1953 
1954 	/*
1955 	 * If this is a partial write which happened to make all buffers
1956 	 * uptodate then we can optimize away a bogus readpage() for
1957 	 * the next read(). Here we 'discover' whether the page went
1958 	 * uptodate as a result of this (potentially partial) write.
1959 	 */
1960 	if (!partial)
1961 		SetPageUptodate(page);
1962 	return 0;
1963 }
1964 
1965 /*
1966  * block_write_begin takes care of the basic task of block allocation and
1967  * bringing partial write blocks uptodate first.
1968  *
1969  * If *pagep is not NULL, then block_write_begin uses the locked page
1970  * at *pagep rather than allocating its own. In this case, the page will
1971  * not be unlocked or deallocated on failure.
1972  */
1973 int block_write_begin(struct file *file, struct address_space *mapping,
1974 			loff_t pos, unsigned len, unsigned flags,
1975 			struct page **pagep, void **fsdata,
1976 			get_block_t *get_block)
1977 {
1978 	struct inode *inode = mapping->host;
1979 	int status = 0;
1980 	struct page *page;
1981 	pgoff_t index;
1982 	unsigned start, end;
1983 	int ownpage = 0;
1984 
1985 	index = pos >> PAGE_CACHE_SHIFT;
1986 	start = pos & (PAGE_CACHE_SIZE - 1);
1987 	end = start + len;
1988 
1989 	page = *pagep;
1990 	if (page == NULL) {
1991 		ownpage = 1;
1992 		page = __grab_cache_page(mapping, index);
1993 		if (!page) {
1994 			status = -ENOMEM;
1995 			goto out;
1996 		}
1997 		*pagep = page;
1998 	} else
1999 		BUG_ON(!PageLocked(page));
2000 
2001 	status = __block_prepare_write(inode, page, start, end, get_block);
2002 	if (unlikely(status)) {
2003 		ClearPageUptodate(page);
2004 
2005 		if (ownpage) {
2006 			unlock_page(page);
2007 			page_cache_release(page);
2008 			*pagep = NULL;
2009 
2010 			/*
2011 			 * prepare_write() may have instantiated a few blocks
2012 			 * outside i_size.  Trim these off again. Don't need
2013 			 * i_size_read because we hold i_mutex.
2014 			 */
2015 			if (pos + len > inode->i_size)
2016 				vmtruncate(inode, inode->i_size);
2017 		}
2018 		goto out;
2019 	}
2020 
2021 out:
2022 	return status;
2023 }
2024 EXPORT_SYMBOL(block_write_begin);
2025 
2026 int block_write_end(struct file *file, struct address_space *mapping,
2027 			loff_t pos, unsigned len, unsigned copied,
2028 			struct page *page, void *fsdata)
2029 {
2030 	struct inode *inode = mapping->host;
2031 	unsigned start;
2032 
2033 	start = pos & (PAGE_CACHE_SIZE - 1);
2034 
2035 	if (unlikely(copied < len)) {
2036 		/*
2037 		 * The buffers that were written will now be uptodate, so we
2038 		 * don't have to worry about a readpage reading them and
2039 		 * overwriting a partial write. However if we have encountered
2040 		 * a short write and only partially written into a buffer, it
2041 		 * will not be marked uptodate, so a readpage might come in and
2042 		 * destroy our partial write.
2043 		 *
2044 		 * Do the simplest thing, and just treat any short write to a
2045 		 * non uptodate page as a zero-length write, and force the
2046 		 * caller to redo the whole thing.
2047 		 */
2048 		if (!PageUptodate(page))
2049 			copied = 0;
2050 
2051 		page_zero_new_buffers(page, start+copied, start+len);
2052 	}
2053 	flush_dcache_page(page);
2054 
2055 	/* This could be a short (even 0-length) commit */
2056 	__block_commit_write(inode, page, start, start+copied);
2057 
2058 	return copied;
2059 }
2060 EXPORT_SYMBOL(block_write_end);
2061 
2062 int generic_write_end(struct file *file, struct address_space *mapping,
2063 			loff_t pos, unsigned len, unsigned copied,
2064 			struct page *page, void *fsdata)
2065 {
2066 	struct inode *inode = mapping->host;
2067 	int i_size_changed = 0;
2068 
2069 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2070 
2071 	/*
2072 	 * No need to use i_size_read() here, the i_size
2073 	 * cannot change under us because we hold i_mutex.
2074 	 *
2075 	 * But it's important to update i_size while still holding page lock:
2076 	 * page writeout could otherwise come in and zero beyond i_size.
2077 	 */
2078 	if (pos+copied > inode->i_size) {
2079 		i_size_write(inode, pos+copied);
2080 		i_size_changed = 1;
2081 	}
2082 
2083 	unlock_page(page);
2084 	page_cache_release(page);
2085 
2086 	/*
2087 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2088 	 * makes the holding time of page lock longer. Second, it forces lock
2089 	 * ordering of page lock and transaction start for journaling
2090 	 * filesystems.
2091 	 */
2092 	if (i_size_changed)
2093 		mark_inode_dirty(inode);
2094 
2095 	return copied;
2096 }
2097 EXPORT_SYMBOL(generic_write_end);
2098 
2099 /*
2100  * Generic "read page" function for block devices that have the normal
2101  * get_block functionality. This is most of the block device filesystems.
2102  * Reads the page asynchronously --- the unlock_buffer() and
2103  * set/clear_buffer_uptodate() functions propagate buffer state into the
2104  * page struct once IO has completed.
2105  */
2106 int block_read_full_page(struct page *page, get_block_t *get_block)
2107 {
2108 	struct inode *inode = page->mapping->host;
2109 	sector_t iblock, lblock;
2110 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2111 	unsigned int blocksize;
2112 	int nr, i;
2113 	int fully_mapped = 1;
2114 
2115 	BUG_ON(!PageLocked(page));
2116 	blocksize = 1 << inode->i_blkbits;
2117 	if (!page_has_buffers(page))
2118 		create_empty_buffers(page, blocksize, 0);
2119 	head = page_buffers(page);
2120 
2121 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2122 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2123 	bh = head;
2124 	nr = 0;
2125 	i = 0;
2126 
2127 	do {
2128 		if (buffer_uptodate(bh))
2129 			continue;
2130 
2131 		if (!buffer_mapped(bh)) {
2132 			int err = 0;
2133 
2134 			fully_mapped = 0;
2135 			if (iblock < lblock) {
2136 				WARN_ON(bh->b_size != blocksize);
2137 				err = get_block(inode, iblock, bh, 0);
2138 				if (err)
2139 					SetPageError(page);
2140 			}
2141 			if (!buffer_mapped(bh)) {
2142 				zero_user(page, i * blocksize, blocksize);
2143 				if (!err)
2144 					set_buffer_uptodate(bh);
2145 				continue;
2146 			}
2147 			/*
2148 			 * get_block() might have updated the buffer
2149 			 * synchronously
2150 			 */
2151 			if (buffer_uptodate(bh))
2152 				continue;
2153 		}
2154 		arr[nr++] = bh;
2155 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2156 
2157 	if (fully_mapped)
2158 		SetPageMappedToDisk(page);
2159 
2160 	if (!nr) {
2161 		/*
2162 		 * All buffers are uptodate - we can set the page uptodate
2163 		 * as well. But not if get_block() returned an error.
2164 		 */
2165 		if (!PageError(page))
2166 			SetPageUptodate(page);
2167 		unlock_page(page);
2168 		return 0;
2169 	}
2170 
2171 	/* Stage two: lock the buffers */
2172 	for (i = 0; i < nr; i++) {
2173 		bh = arr[i];
2174 		lock_buffer(bh);
2175 		mark_buffer_async_read(bh);
2176 	}
2177 
2178 	/*
2179 	 * Stage 3: start the IO.  Check for uptodateness
2180 	 * inside the buffer lock in case another process reading
2181 	 * the underlying blockdev brought it uptodate (the sct fix).
2182 	 */
2183 	for (i = 0; i < nr; i++) {
2184 		bh = arr[i];
2185 		if (buffer_uptodate(bh))
2186 			end_buffer_async_read(bh, 1);
2187 		else
2188 			submit_bh(READ, bh);
2189 	}
2190 	return 0;
2191 }
2192 
2193 /* utility function for filesystems that need to do work on expanding
2194  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2195  * deal with the hole.
2196  */
2197 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2198 {
2199 	struct address_space *mapping = inode->i_mapping;
2200 	struct page *page;
2201 	void *fsdata;
2202 	unsigned long limit;
2203 	int err;
2204 
2205 	err = -EFBIG;
2206         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2207 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2208 		send_sig(SIGXFSZ, current, 0);
2209 		goto out;
2210 	}
2211 	if (size > inode->i_sb->s_maxbytes)
2212 		goto out;
2213 
2214 	err = pagecache_write_begin(NULL, mapping, size, 0,
2215 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2216 				&page, &fsdata);
2217 	if (err)
2218 		goto out;
2219 
2220 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2221 	BUG_ON(err > 0);
2222 
2223 out:
2224 	return err;
2225 }
2226 
2227 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2228 			    loff_t pos, loff_t *bytes)
2229 {
2230 	struct inode *inode = mapping->host;
2231 	unsigned blocksize = 1 << inode->i_blkbits;
2232 	struct page *page;
2233 	void *fsdata;
2234 	pgoff_t index, curidx;
2235 	loff_t curpos;
2236 	unsigned zerofrom, offset, len;
2237 	int err = 0;
2238 
2239 	index = pos >> PAGE_CACHE_SHIFT;
2240 	offset = pos & ~PAGE_CACHE_MASK;
2241 
2242 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2243 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2244 		if (zerofrom & (blocksize-1)) {
2245 			*bytes |= (blocksize-1);
2246 			(*bytes)++;
2247 		}
2248 		len = PAGE_CACHE_SIZE - zerofrom;
2249 
2250 		err = pagecache_write_begin(file, mapping, curpos, len,
2251 						AOP_FLAG_UNINTERRUPTIBLE,
2252 						&page, &fsdata);
2253 		if (err)
2254 			goto out;
2255 		zero_user(page, zerofrom, len);
2256 		err = pagecache_write_end(file, mapping, curpos, len, len,
2257 						page, fsdata);
2258 		if (err < 0)
2259 			goto out;
2260 		BUG_ON(err != len);
2261 		err = 0;
2262 
2263 		balance_dirty_pages_ratelimited(mapping);
2264 	}
2265 
2266 	/* page covers the boundary, find the boundary offset */
2267 	if (index == curidx) {
2268 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2269 		/* if we will expand the thing last block will be filled */
2270 		if (offset <= zerofrom) {
2271 			goto out;
2272 		}
2273 		if (zerofrom & (blocksize-1)) {
2274 			*bytes |= (blocksize-1);
2275 			(*bytes)++;
2276 		}
2277 		len = offset - zerofrom;
2278 
2279 		err = pagecache_write_begin(file, mapping, curpos, len,
2280 						AOP_FLAG_UNINTERRUPTIBLE,
2281 						&page, &fsdata);
2282 		if (err)
2283 			goto out;
2284 		zero_user(page, zerofrom, len);
2285 		err = pagecache_write_end(file, mapping, curpos, len, len,
2286 						page, fsdata);
2287 		if (err < 0)
2288 			goto out;
2289 		BUG_ON(err != len);
2290 		err = 0;
2291 	}
2292 out:
2293 	return err;
2294 }
2295 
2296 /*
2297  * For moronic filesystems that do not allow holes in file.
2298  * We may have to extend the file.
2299  */
2300 int cont_write_begin(struct file *file, struct address_space *mapping,
2301 			loff_t pos, unsigned len, unsigned flags,
2302 			struct page **pagep, void **fsdata,
2303 			get_block_t *get_block, loff_t *bytes)
2304 {
2305 	struct inode *inode = mapping->host;
2306 	unsigned blocksize = 1 << inode->i_blkbits;
2307 	unsigned zerofrom;
2308 	int err;
2309 
2310 	err = cont_expand_zero(file, mapping, pos, bytes);
2311 	if (err)
2312 		goto out;
2313 
2314 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2315 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2316 		*bytes |= (blocksize-1);
2317 		(*bytes)++;
2318 	}
2319 
2320 	*pagep = NULL;
2321 	err = block_write_begin(file, mapping, pos, len,
2322 				flags, pagep, fsdata, get_block);
2323 out:
2324 	return err;
2325 }
2326 
2327 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2328 			get_block_t *get_block)
2329 {
2330 	struct inode *inode = page->mapping->host;
2331 	int err = __block_prepare_write(inode, page, from, to, get_block);
2332 	if (err)
2333 		ClearPageUptodate(page);
2334 	return err;
2335 }
2336 
2337 int block_commit_write(struct page *page, unsigned from, unsigned to)
2338 {
2339 	struct inode *inode = page->mapping->host;
2340 	__block_commit_write(inode,page,from,to);
2341 	return 0;
2342 }
2343 
2344 /*
2345  * block_page_mkwrite() is not allowed to change the file size as it gets
2346  * called from a page fault handler when a page is first dirtied. Hence we must
2347  * be careful to check for EOF conditions here. We set the page up correctly
2348  * for a written page which means we get ENOSPC checking when writing into
2349  * holes and correct delalloc and unwritten extent mapping on filesystems that
2350  * support these features.
2351  *
2352  * We are not allowed to take the i_mutex here so we have to play games to
2353  * protect against truncate races as the page could now be beyond EOF.  Because
2354  * vmtruncate() writes the inode size before removing pages, once we have the
2355  * page lock we can determine safely if the page is beyond EOF. If it is not
2356  * beyond EOF, then the page is guaranteed safe against truncation until we
2357  * unlock the page.
2358  */
2359 int
2360 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2361 		   get_block_t get_block)
2362 {
2363 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2364 	unsigned long end;
2365 	loff_t size;
2366 	int ret = -EINVAL;
2367 
2368 	lock_page(page);
2369 	size = i_size_read(inode);
2370 	if ((page->mapping != inode->i_mapping) ||
2371 	    (page_offset(page) > size)) {
2372 		/* page got truncated out from underneath us */
2373 		goto out_unlock;
2374 	}
2375 
2376 	/* page is wholly or partially inside EOF */
2377 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2378 		end = size & ~PAGE_CACHE_MASK;
2379 	else
2380 		end = PAGE_CACHE_SIZE;
2381 
2382 	ret = block_prepare_write(page, 0, end, get_block);
2383 	if (!ret)
2384 		ret = block_commit_write(page, 0, end);
2385 
2386 out_unlock:
2387 	unlock_page(page);
2388 	return ret;
2389 }
2390 
2391 /*
2392  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2393  * immediately, while under the page lock.  So it needs a special end_io
2394  * handler which does not touch the bh after unlocking it.
2395  */
2396 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2397 {
2398 	__end_buffer_read_notouch(bh, uptodate);
2399 }
2400 
2401 /*
2402  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2403  * the page (converting it to circular linked list and taking care of page
2404  * dirty races).
2405  */
2406 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2407 {
2408 	struct buffer_head *bh;
2409 
2410 	BUG_ON(!PageLocked(page));
2411 
2412 	spin_lock(&page->mapping->private_lock);
2413 	bh = head;
2414 	do {
2415 		if (PageDirty(page))
2416 			set_buffer_dirty(bh);
2417 		if (!bh->b_this_page)
2418 			bh->b_this_page = head;
2419 		bh = bh->b_this_page;
2420 	} while (bh != head);
2421 	attach_page_buffers(page, head);
2422 	spin_unlock(&page->mapping->private_lock);
2423 }
2424 
2425 /*
2426  * On entry, the page is fully not uptodate.
2427  * On exit the page is fully uptodate in the areas outside (from,to)
2428  */
2429 int nobh_write_begin(struct file *file, struct address_space *mapping,
2430 			loff_t pos, unsigned len, unsigned flags,
2431 			struct page **pagep, void **fsdata,
2432 			get_block_t *get_block)
2433 {
2434 	struct inode *inode = mapping->host;
2435 	const unsigned blkbits = inode->i_blkbits;
2436 	const unsigned blocksize = 1 << blkbits;
2437 	struct buffer_head *head, *bh;
2438 	struct page *page;
2439 	pgoff_t index;
2440 	unsigned from, to;
2441 	unsigned block_in_page;
2442 	unsigned block_start, block_end;
2443 	sector_t block_in_file;
2444 	int nr_reads = 0;
2445 	int ret = 0;
2446 	int is_mapped_to_disk = 1;
2447 
2448 	index = pos >> PAGE_CACHE_SHIFT;
2449 	from = pos & (PAGE_CACHE_SIZE - 1);
2450 	to = from + len;
2451 
2452 	page = __grab_cache_page(mapping, index);
2453 	if (!page)
2454 		return -ENOMEM;
2455 	*pagep = page;
2456 	*fsdata = NULL;
2457 
2458 	if (page_has_buffers(page)) {
2459 		unlock_page(page);
2460 		page_cache_release(page);
2461 		*pagep = NULL;
2462 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2463 					fsdata, get_block);
2464 	}
2465 
2466 	if (PageMappedToDisk(page))
2467 		return 0;
2468 
2469 	/*
2470 	 * Allocate buffers so that we can keep track of state, and potentially
2471 	 * attach them to the page if an error occurs. In the common case of
2472 	 * no error, they will just be freed again without ever being attached
2473 	 * to the page (which is all OK, because we're under the page lock).
2474 	 *
2475 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2476 	 * than the circular one we're used to.
2477 	 */
2478 	head = alloc_page_buffers(page, blocksize, 0);
2479 	if (!head) {
2480 		ret = -ENOMEM;
2481 		goto out_release;
2482 	}
2483 
2484 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2485 
2486 	/*
2487 	 * We loop across all blocks in the page, whether or not they are
2488 	 * part of the affected region.  This is so we can discover if the
2489 	 * page is fully mapped-to-disk.
2490 	 */
2491 	for (block_start = 0, block_in_page = 0, bh = head;
2492 		  block_start < PAGE_CACHE_SIZE;
2493 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2494 		int create;
2495 
2496 		block_end = block_start + blocksize;
2497 		bh->b_state = 0;
2498 		create = 1;
2499 		if (block_start >= to)
2500 			create = 0;
2501 		ret = get_block(inode, block_in_file + block_in_page,
2502 					bh, create);
2503 		if (ret)
2504 			goto failed;
2505 		if (!buffer_mapped(bh))
2506 			is_mapped_to_disk = 0;
2507 		if (buffer_new(bh))
2508 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2509 		if (PageUptodate(page)) {
2510 			set_buffer_uptodate(bh);
2511 			continue;
2512 		}
2513 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2514 			zero_user_segments(page, block_start, from,
2515 							to, block_end);
2516 			continue;
2517 		}
2518 		if (buffer_uptodate(bh))
2519 			continue;	/* reiserfs does this */
2520 		if (block_start < from || block_end > to) {
2521 			lock_buffer(bh);
2522 			bh->b_end_io = end_buffer_read_nobh;
2523 			submit_bh(READ, bh);
2524 			nr_reads++;
2525 		}
2526 	}
2527 
2528 	if (nr_reads) {
2529 		/*
2530 		 * The page is locked, so these buffers are protected from
2531 		 * any VM or truncate activity.  Hence we don't need to care
2532 		 * for the buffer_head refcounts.
2533 		 */
2534 		for (bh = head; bh; bh = bh->b_this_page) {
2535 			wait_on_buffer(bh);
2536 			if (!buffer_uptodate(bh))
2537 				ret = -EIO;
2538 		}
2539 		if (ret)
2540 			goto failed;
2541 	}
2542 
2543 	if (is_mapped_to_disk)
2544 		SetPageMappedToDisk(page);
2545 
2546 	*fsdata = head; /* to be released by nobh_write_end */
2547 
2548 	return 0;
2549 
2550 failed:
2551 	BUG_ON(!ret);
2552 	/*
2553 	 * Error recovery is a bit difficult. We need to zero out blocks that
2554 	 * were newly allocated, and dirty them to ensure they get written out.
2555 	 * Buffers need to be attached to the page at this point, otherwise
2556 	 * the handling of potential IO errors during writeout would be hard
2557 	 * (could try doing synchronous writeout, but what if that fails too?)
2558 	 */
2559 	attach_nobh_buffers(page, head);
2560 	page_zero_new_buffers(page, from, to);
2561 
2562 out_release:
2563 	unlock_page(page);
2564 	page_cache_release(page);
2565 	*pagep = NULL;
2566 
2567 	if (pos + len > inode->i_size)
2568 		vmtruncate(inode, inode->i_size);
2569 
2570 	return ret;
2571 }
2572 EXPORT_SYMBOL(nobh_write_begin);
2573 
2574 int nobh_write_end(struct file *file, struct address_space *mapping,
2575 			loff_t pos, unsigned len, unsigned copied,
2576 			struct page *page, void *fsdata)
2577 {
2578 	struct inode *inode = page->mapping->host;
2579 	struct buffer_head *head = fsdata;
2580 	struct buffer_head *bh;
2581 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2582 
2583 	if (unlikely(copied < len) && !page_has_buffers(page))
2584 		attach_nobh_buffers(page, head);
2585 	if (page_has_buffers(page))
2586 		return generic_write_end(file, mapping, pos, len,
2587 					copied, page, fsdata);
2588 
2589 	SetPageUptodate(page);
2590 	set_page_dirty(page);
2591 	if (pos+copied > inode->i_size) {
2592 		i_size_write(inode, pos+copied);
2593 		mark_inode_dirty(inode);
2594 	}
2595 
2596 	unlock_page(page);
2597 	page_cache_release(page);
2598 
2599 	while (head) {
2600 		bh = head;
2601 		head = head->b_this_page;
2602 		free_buffer_head(bh);
2603 	}
2604 
2605 	return copied;
2606 }
2607 EXPORT_SYMBOL(nobh_write_end);
2608 
2609 /*
2610  * nobh_writepage() - based on block_full_write_page() except
2611  * that it tries to operate without attaching bufferheads to
2612  * the page.
2613  */
2614 int nobh_writepage(struct page *page, get_block_t *get_block,
2615 			struct writeback_control *wbc)
2616 {
2617 	struct inode * const inode = page->mapping->host;
2618 	loff_t i_size = i_size_read(inode);
2619 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2620 	unsigned offset;
2621 	int ret;
2622 
2623 	/* Is the page fully inside i_size? */
2624 	if (page->index < end_index)
2625 		goto out;
2626 
2627 	/* Is the page fully outside i_size? (truncate in progress) */
2628 	offset = i_size & (PAGE_CACHE_SIZE-1);
2629 	if (page->index >= end_index+1 || !offset) {
2630 		/*
2631 		 * The page may have dirty, unmapped buffers.  For example,
2632 		 * they may have been added in ext3_writepage().  Make them
2633 		 * freeable here, so the page does not leak.
2634 		 */
2635 #if 0
2636 		/* Not really sure about this  - do we need this ? */
2637 		if (page->mapping->a_ops->invalidatepage)
2638 			page->mapping->a_ops->invalidatepage(page, offset);
2639 #endif
2640 		unlock_page(page);
2641 		return 0; /* don't care */
2642 	}
2643 
2644 	/*
2645 	 * The page straddles i_size.  It must be zeroed out on each and every
2646 	 * writepage invocation because it may be mmapped.  "A file is mapped
2647 	 * in multiples of the page size.  For a file that is not a multiple of
2648 	 * the  page size, the remaining memory is zeroed when mapped, and
2649 	 * writes to that region are not written out to the file."
2650 	 */
2651 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2652 out:
2653 	ret = mpage_writepage(page, get_block, wbc);
2654 	if (ret == -EAGAIN)
2655 		ret = __block_write_full_page(inode, page, get_block, wbc);
2656 	return ret;
2657 }
2658 EXPORT_SYMBOL(nobh_writepage);
2659 
2660 int nobh_truncate_page(struct address_space *mapping,
2661 			loff_t from, get_block_t *get_block)
2662 {
2663 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2664 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2665 	unsigned blocksize;
2666 	sector_t iblock;
2667 	unsigned length, pos;
2668 	struct inode *inode = mapping->host;
2669 	struct page *page;
2670 	struct buffer_head map_bh;
2671 	int err;
2672 
2673 	blocksize = 1 << inode->i_blkbits;
2674 	length = offset & (blocksize - 1);
2675 
2676 	/* Block boundary? Nothing to do */
2677 	if (!length)
2678 		return 0;
2679 
2680 	length = blocksize - length;
2681 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2682 
2683 	page = grab_cache_page(mapping, index);
2684 	err = -ENOMEM;
2685 	if (!page)
2686 		goto out;
2687 
2688 	if (page_has_buffers(page)) {
2689 has_buffers:
2690 		unlock_page(page);
2691 		page_cache_release(page);
2692 		return block_truncate_page(mapping, from, get_block);
2693 	}
2694 
2695 	/* Find the buffer that contains "offset" */
2696 	pos = blocksize;
2697 	while (offset >= pos) {
2698 		iblock++;
2699 		pos += blocksize;
2700 	}
2701 
2702 	err = get_block(inode, iblock, &map_bh, 0);
2703 	if (err)
2704 		goto unlock;
2705 	/* unmapped? It's a hole - nothing to do */
2706 	if (!buffer_mapped(&map_bh))
2707 		goto unlock;
2708 
2709 	/* Ok, it's mapped. Make sure it's up-to-date */
2710 	if (!PageUptodate(page)) {
2711 		err = mapping->a_ops->readpage(NULL, page);
2712 		if (err) {
2713 			page_cache_release(page);
2714 			goto out;
2715 		}
2716 		lock_page(page);
2717 		if (!PageUptodate(page)) {
2718 			err = -EIO;
2719 			goto unlock;
2720 		}
2721 		if (page_has_buffers(page))
2722 			goto has_buffers;
2723 	}
2724 	zero_user(page, offset, length);
2725 	set_page_dirty(page);
2726 	err = 0;
2727 
2728 unlock:
2729 	unlock_page(page);
2730 	page_cache_release(page);
2731 out:
2732 	return err;
2733 }
2734 EXPORT_SYMBOL(nobh_truncate_page);
2735 
2736 int block_truncate_page(struct address_space *mapping,
2737 			loff_t from, get_block_t *get_block)
2738 {
2739 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2740 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2741 	unsigned blocksize;
2742 	sector_t iblock;
2743 	unsigned length, pos;
2744 	struct inode *inode = mapping->host;
2745 	struct page *page;
2746 	struct buffer_head *bh;
2747 	int err;
2748 
2749 	blocksize = 1 << inode->i_blkbits;
2750 	length = offset & (blocksize - 1);
2751 
2752 	/* Block boundary? Nothing to do */
2753 	if (!length)
2754 		return 0;
2755 
2756 	length = blocksize - length;
2757 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2758 
2759 	page = grab_cache_page(mapping, index);
2760 	err = -ENOMEM;
2761 	if (!page)
2762 		goto out;
2763 
2764 	if (!page_has_buffers(page))
2765 		create_empty_buffers(page, blocksize, 0);
2766 
2767 	/* Find the buffer that contains "offset" */
2768 	bh = page_buffers(page);
2769 	pos = blocksize;
2770 	while (offset >= pos) {
2771 		bh = bh->b_this_page;
2772 		iblock++;
2773 		pos += blocksize;
2774 	}
2775 
2776 	err = 0;
2777 	if (!buffer_mapped(bh)) {
2778 		WARN_ON(bh->b_size != blocksize);
2779 		err = get_block(inode, iblock, bh, 0);
2780 		if (err)
2781 			goto unlock;
2782 		/* unmapped? It's a hole - nothing to do */
2783 		if (!buffer_mapped(bh))
2784 			goto unlock;
2785 	}
2786 
2787 	/* Ok, it's mapped. Make sure it's up-to-date */
2788 	if (PageUptodate(page))
2789 		set_buffer_uptodate(bh);
2790 
2791 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2792 		err = -EIO;
2793 		ll_rw_block(READ, 1, &bh);
2794 		wait_on_buffer(bh);
2795 		/* Uhhuh. Read error. Complain and punt. */
2796 		if (!buffer_uptodate(bh))
2797 			goto unlock;
2798 	}
2799 
2800 	zero_user(page, offset, length);
2801 	mark_buffer_dirty(bh);
2802 	err = 0;
2803 
2804 unlock:
2805 	unlock_page(page);
2806 	page_cache_release(page);
2807 out:
2808 	return err;
2809 }
2810 
2811 /*
2812  * The generic ->writepage function for buffer-backed address_spaces
2813  */
2814 int block_write_full_page(struct page *page, get_block_t *get_block,
2815 			struct writeback_control *wbc)
2816 {
2817 	struct inode * const inode = page->mapping->host;
2818 	loff_t i_size = i_size_read(inode);
2819 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2820 	unsigned offset;
2821 
2822 	/* Is the page fully inside i_size? */
2823 	if (page->index < end_index)
2824 		return __block_write_full_page(inode, page, get_block, wbc);
2825 
2826 	/* Is the page fully outside i_size? (truncate in progress) */
2827 	offset = i_size & (PAGE_CACHE_SIZE-1);
2828 	if (page->index >= end_index+1 || !offset) {
2829 		/*
2830 		 * The page may have dirty, unmapped buffers.  For example,
2831 		 * they may have been added in ext3_writepage().  Make them
2832 		 * freeable here, so the page does not leak.
2833 		 */
2834 		do_invalidatepage(page, 0);
2835 		unlock_page(page);
2836 		return 0; /* don't care */
2837 	}
2838 
2839 	/*
2840 	 * The page straddles i_size.  It must be zeroed out on each and every
2841 	 * writepage invokation because it may be mmapped.  "A file is mapped
2842 	 * in multiples of the page size.  For a file that is not a multiple of
2843 	 * the  page size, the remaining memory is zeroed when mapped, and
2844 	 * writes to that region are not written out to the file."
2845 	 */
2846 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2847 	return __block_write_full_page(inode, page, get_block, wbc);
2848 }
2849 
2850 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2851 			    get_block_t *get_block)
2852 {
2853 	struct buffer_head tmp;
2854 	struct inode *inode = mapping->host;
2855 	tmp.b_state = 0;
2856 	tmp.b_blocknr = 0;
2857 	tmp.b_size = 1 << inode->i_blkbits;
2858 	get_block(inode, block, &tmp, 0);
2859 	return tmp.b_blocknr;
2860 }
2861 
2862 static void end_bio_bh_io_sync(struct bio *bio, int err)
2863 {
2864 	struct buffer_head *bh = bio->bi_private;
2865 
2866 	if (err == -EOPNOTSUPP) {
2867 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2868 		set_bit(BH_Eopnotsupp, &bh->b_state);
2869 	}
2870 
2871 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2872 	bio_put(bio);
2873 }
2874 
2875 int submit_bh(int rw, struct buffer_head * bh)
2876 {
2877 	struct bio *bio;
2878 	int ret = 0;
2879 
2880 	BUG_ON(!buffer_locked(bh));
2881 	BUG_ON(!buffer_mapped(bh));
2882 	BUG_ON(!bh->b_end_io);
2883 
2884 	if (buffer_ordered(bh) && (rw == WRITE))
2885 		rw = WRITE_BARRIER;
2886 
2887 	/*
2888 	 * Only clear out a write error when rewriting, should this
2889 	 * include WRITE_SYNC as well?
2890 	 */
2891 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2892 		clear_buffer_write_io_error(bh);
2893 
2894 	/*
2895 	 * from here on down, it's all bio -- do the initial mapping,
2896 	 * submit_bio -> generic_make_request may further map this bio around
2897 	 */
2898 	bio = bio_alloc(GFP_NOIO, 1);
2899 
2900 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2901 	bio->bi_bdev = bh->b_bdev;
2902 	bio->bi_io_vec[0].bv_page = bh->b_page;
2903 	bio->bi_io_vec[0].bv_len = bh->b_size;
2904 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2905 
2906 	bio->bi_vcnt = 1;
2907 	bio->bi_idx = 0;
2908 	bio->bi_size = bh->b_size;
2909 
2910 	bio->bi_end_io = end_bio_bh_io_sync;
2911 	bio->bi_private = bh;
2912 
2913 	bio_get(bio);
2914 	submit_bio(rw, bio);
2915 
2916 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2917 		ret = -EOPNOTSUPP;
2918 
2919 	bio_put(bio);
2920 	return ret;
2921 }
2922 
2923 /**
2924  * ll_rw_block: low-level access to block devices (DEPRECATED)
2925  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2926  * @nr: number of &struct buffer_heads in the array
2927  * @bhs: array of pointers to &struct buffer_head
2928  *
2929  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2930  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2931  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2932  * are sent to disk. The fourth %READA option is described in the documentation
2933  * for generic_make_request() which ll_rw_block() calls.
2934  *
2935  * This function drops any buffer that it cannot get a lock on (with the
2936  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2937  * clean when doing a write request, and any buffer that appears to be
2938  * up-to-date when doing read request.  Further it marks as clean buffers that
2939  * are processed for writing (the buffer cache won't assume that they are
2940  * actually clean until the buffer gets unlocked).
2941  *
2942  * ll_rw_block sets b_end_io to simple completion handler that marks
2943  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2944  * any waiters.
2945  *
2946  * All of the buffers must be for the same device, and must also be a
2947  * multiple of the current approved size for the device.
2948  */
2949 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2950 {
2951 	int i;
2952 
2953 	for (i = 0; i < nr; i++) {
2954 		struct buffer_head *bh = bhs[i];
2955 
2956 		if (rw == SWRITE || rw == SWRITE_SYNC)
2957 			lock_buffer(bh);
2958 		else if (test_set_buffer_locked(bh))
2959 			continue;
2960 
2961 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
2962 			if (test_clear_buffer_dirty(bh)) {
2963 				bh->b_end_io = end_buffer_write_sync;
2964 				get_bh(bh);
2965 				if (rw == SWRITE_SYNC)
2966 					submit_bh(WRITE_SYNC, bh);
2967 				else
2968 					submit_bh(WRITE, bh);
2969 				continue;
2970 			}
2971 		} else {
2972 			if (!buffer_uptodate(bh)) {
2973 				bh->b_end_io = end_buffer_read_sync;
2974 				get_bh(bh);
2975 				submit_bh(rw, bh);
2976 				continue;
2977 			}
2978 		}
2979 		unlock_buffer(bh);
2980 	}
2981 }
2982 
2983 /*
2984  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2985  * and then start new I/O and then wait upon it.  The caller must have a ref on
2986  * the buffer_head.
2987  */
2988 int sync_dirty_buffer(struct buffer_head *bh)
2989 {
2990 	int ret = 0;
2991 
2992 	WARN_ON(atomic_read(&bh->b_count) < 1);
2993 	lock_buffer(bh);
2994 	if (test_clear_buffer_dirty(bh)) {
2995 		get_bh(bh);
2996 		bh->b_end_io = end_buffer_write_sync;
2997 		ret = submit_bh(WRITE_SYNC, bh);
2998 		wait_on_buffer(bh);
2999 		if (buffer_eopnotsupp(bh)) {
3000 			clear_buffer_eopnotsupp(bh);
3001 			ret = -EOPNOTSUPP;
3002 		}
3003 		if (!ret && !buffer_uptodate(bh))
3004 			ret = -EIO;
3005 	} else {
3006 		unlock_buffer(bh);
3007 	}
3008 	return ret;
3009 }
3010 
3011 /*
3012  * try_to_free_buffers() checks if all the buffers on this particular page
3013  * are unused, and releases them if so.
3014  *
3015  * Exclusion against try_to_free_buffers may be obtained by either
3016  * locking the page or by holding its mapping's private_lock.
3017  *
3018  * If the page is dirty but all the buffers are clean then we need to
3019  * be sure to mark the page clean as well.  This is because the page
3020  * may be against a block device, and a later reattachment of buffers
3021  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3022  * filesystem data on the same device.
3023  *
3024  * The same applies to regular filesystem pages: if all the buffers are
3025  * clean then we set the page clean and proceed.  To do that, we require
3026  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3027  * private_lock.
3028  *
3029  * try_to_free_buffers() is non-blocking.
3030  */
3031 static inline int buffer_busy(struct buffer_head *bh)
3032 {
3033 	return atomic_read(&bh->b_count) |
3034 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3035 }
3036 
3037 static int
3038 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3039 {
3040 	struct buffer_head *head = page_buffers(page);
3041 	struct buffer_head *bh;
3042 
3043 	bh = head;
3044 	do {
3045 		if (buffer_write_io_error(bh) && page->mapping)
3046 			set_bit(AS_EIO, &page->mapping->flags);
3047 		if (buffer_busy(bh))
3048 			goto failed;
3049 		bh = bh->b_this_page;
3050 	} while (bh != head);
3051 
3052 	do {
3053 		struct buffer_head *next = bh->b_this_page;
3054 
3055 		if (bh->b_assoc_map)
3056 			__remove_assoc_queue(bh);
3057 		bh = next;
3058 	} while (bh != head);
3059 	*buffers_to_free = head;
3060 	__clear_page_buffers(page);
3061 	return 1;
3062 failed:
3063 	return 0;
3064 }
3065 
3066 int try_to_free_buffers(struct page *page)
3067 {
3068 	struct address_space * const mapping = page->mapping;
3069 	struct buffer_head *buffers_to_free = NULL;
3070 	int ret = 0;
3071 
3072 	BUG_ON(!PageLocked(page));
3073 	if (PageWriteback(page))
3074 		return 0;
3075 
3076 	if (mapping == NULL) {		/* can this still happen? */
3077 		ret = drop_buffers(page, &buffers_to_free);
3078 		goto out;
3079 	}
3080 
3081 	spin_lock(&mapping->private_lock);
3082 	ret = drop_buffers(page, &buffers_to_free);
3083 
3084 	/*
3085 	 * If the filesystem writes its buffers by hand (eg ext3)
3086 	 * then we can have clean buffers against a dirty page.  We
3087 	 * clean the page here; otherwise the VM will never notice
3088 	 * that the filesystem did any IO at all.
3089 	 *
3090 	 * Also, during truncate, discard_buffer will have marked all
3091 	 * the page's buffers clean.  We discover that here and clean
3092 	 * the page also.
3093 	 *
3094 	 * private_lock must be held over this entire operation in order
3095 	 * to synchronise against __set_page_dirty_buffers and prevent the
3096 	 * dirty bit from being lost.
3097 	 */
3098 	if (ret)
3099 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3100 	spin_unlock(&mapping->private_lock);
3101 out:
3102 	if (buffers_to_free) {
3103 		struct buffer_head *bh = buffers_to_free;
3104 
3105 		do {
3106 			struct buffer_head *next = bh->b_this_page;
3107 			free_buffer_head(bh);
3108 			bh = next;
3109 		} while (bh != buffers_to_free);
3110 	}
3111 	return ret;
3112 }
3113 EXPORT_SYMBOL(try_to_free_buffers);
3114 
3115 void block_sync_page(struct page *page)
3116 {
3117 	struct address_space *mapping;
3118 
3119 	smp_mb();
3120 	mapping = page_mapping(page);
3121 	if (mapping)
3122 		blk_run_backing_dev(mapping->backing_dev_info, page);
3123 }
3124 
3125 /*
3126  * There are no bdflush tunables left.  But distributions are
3127  * still running obsolete flush daemons, so we terminate them here.
3128  *
3129  * Use of bdflush() is deprecated and will be removed in a future kernel.
3130  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3131  */
3132 asmlinkage long sys_bdflush(int func, long data)
3133 {
3134 	static int msg_count;
3135 
3136 	if (!capable(CAP_SYS_ADMIN))
3137 		return -EPERM;
3138 
3139 	if (msg_count < 5) {
3140 		msg_count++;
3141 		printk(KERN_INFO
3142 			"warning: process `%s' used the obsolete bdflush"
3143 			" system call\n", current->comm);
3144 		printk(KERN_INFO "Fix your initscripts?\n");
3145 	}
3146 
3147 	if (func == 1)
3148 		do_exit(0);
3149 	return 0;
3150 }
3151 
3152 /*
3153  * Buffer-head allocation
3154  */
3155 static struct kmem_cache *bh_cachep;
3156 
3157 /*
3158  * Once the number of bh's in the machine exceeds this level, we start
3159  * stripping them in writeback.
3160  */
3161 static int max_buffer_heads;
3162 
3163 int buffer_heads_over_limit;
3164 
3165 struct bh_accounting {
3166 	int nr;			/* Number of live bh's */
3167 	int ratelimit;		/* Limit cacheline bouncing */
3168 };
3169 
3170 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3171 
3172 static void recalc_bh_state(void)
3173 {
3174 	int i;
3175 	int tot = 0;
3176 
3177 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3178 		return;
3179 	__get_cpu_var(bh_accounting).ratelimit = 0;
3180 	for_each_online_cpu(i)
3181 		tot += per_cpu(bh_accounting, i).nr;
3182 	buffer_heads_over_limit = (tot > max_buffer_heads);
3183 }
3184 
3185 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3186 {
3187 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3188 	if (ret) {
3189 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3190 		get_cpu_var(bh_accounting).nr++;
3191 		recalc_bh_state();
3192 		put_cpu_var(bh_accounting);
3193 	}
3194 	return ret;
3195 }
3196 EXPORT_SYMBOL(alloc_buffer_head);
3197 
3198 void free_buffer_head(struct buffer_head *bh)
3199 {
3200 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3201 	kmem_cache_free(bh_cachep, bh);
3202 	get_cpu_var(bh_accounting).nr--;
3203 	recalc_bh_state();
3204 	put_cpu_var(bh_accounting);
3205 }
3206 EXPORT_SYMBOL(free_buffer_head);
3207 
3208 static void buffer_exit_cpu(int cpu)
3209 {
3210 	int i;
3211 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3212 
3213 	for (i = 0; i < BH_LRU_SIZE; i++) {
3214 		brelse(b->bhs[i]);
3215 		b->bhs[i] = NULL;
3216 	}
3217 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3218 	per_cpu(bh_accounting, cpu).nr = 0;
3219 	put_cpu_var(bh_accounting);
3220 }
3221 
3222 static int buffer_cpu_notify(struct notifier_block *self,
3223 			      unsigned long action, void *hcpu)
3224 {
3225 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3226 		buffer_exit_cpu((unsigned long)hcpu);
3227 	return NOTIFY_OK;
3228 }
3229 
3230 /**
3231  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3232  * @bh: struct buffer_head
3233  *
3234  * Return true if the buffer is up-to-date and false,
3235  * with the buffer locked, if not.
3236  */
3237 int bh_uptodate_or_lock(struct buffer_head *bh)
3238 {
3239 	if (!buffer_uptodate(bh)) {
3240 		lock_buffer(bh);
3241 		if (!buffer_uptodate(bh))
3242 			return 0;
3243 		unlock_buffer(bh);
3244 	}
3245 	return 1;
3246 }
3247 EXPORT_SYMBOL(bh_uptodate_or_lock);
3248 
3249 /**
3250  * bh_submit_read - Submit a locked buffer for reading
3251  * @bh: struct buffer_head
3252  *
3253  * Returns zero on success and -EIO on error.
3254  */
3255 int bh_submit_read(struct buffer_head *bh)
3256 {
3257 	BUG_ON(!buffer_locked(bh));
3258 
3259 	if (buffer_uptodate(bh)) {
3260 		unlock_buffer(bh);
3261 		return 0;
3262 	}
3263 
3264 	get_bh(bh);
3265 	bh->b_end_io = end_buffer_read_sync;
3266 	submit_bh(READ, bh);
3267 	wait_on_buffer(bh);
3268 	if (buffer_uptodate(bh))
3269 		return 0;
3270 	return -EIO;
3271 }
3272 EXPORT_SYMBOL(bh_submit_read);
3273 
3274 static void
3275 init_buffer_head(struct kmem_cache *cachep, void *data)
3276 {
3277 	struct buffer_head *bh = data;
3278 
3279 	memset(bh, 0, sizeof(*bh));
3280 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3281 }
3282 
3283 void __init buffer_init(void)
3284 {
3285 	int nrpages;
3286 
3287 	bh_cachep = kmem_cache_create("buffer_head",
3288 			sizeof(struct buffer_head), 0,
3289 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3290 				SLAB_MEM_SPREAD),
3291 				init_buffer_head);
3292 
3293 	/*
3294 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3295 	 */
3296 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3297 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3298 	hotcpu_notifier(buffer_cpu_notify, 0);
3299 }
3300 
3301 EXPORT_SYMBOL(__bforget);
3302 EXPORT_SYMBOL(__brelse);
3303 EXPORT_SYMBOL(__wait_on_buffer);
3304 EXPORT_SYMBOL(block_commit_write);
3305 EXPORT_SYMBOL(block_prepare_write);
3306 EXPORT_SYMBOL(block_page_mkwrite);
3307 EXPORT_SYMBOL(block_read_full_page);
3308 EXPORT_SYMBOL(block_sync_page);
3309 EXPORT_SYMBOL(block_truncate_page);
3310 EXPORT_SYMBOL(block_write_full_page);
3311 EXPORT_SYMBOL(cont_write_begin);
3312 EXPORT_SYMBOL(end_buffer_read_sync);
3313 EXPORT_SYMBOL(end_buffer_write_sync);
3314 EXPORT_SYMBOL(file_fsync);
3315 EXPORT_SYMBOL(fsync_bdev);
3316 EXPORT_SYMBOL(generic_block_bmap);
3317 EXPORT_SYMBOL(generic_cont_expand_simple);
3318 EXPORT_SYMBOL(init_buffer);
3319 EXPORT_SYMBOL(invalidate_bdev);
3320 EXPORT_SYMBOL(ll_rw_block);
3321 EXPORT_SYMBOL(mark_buffer_dirty);
3322 EXPORT_SYMBOL(submit_bh);
3323 EXPORT_SYMBOL(sync_dirty_buffer);
3324 EXPORT_SYMBOL(unlock_buffer);
3325