xref: /openbmc/linux/fs/buffer.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void fastcall __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void fastcall unlock_buffer(struct buffer_head *bh)
78 {
79 	smp_mb__before_clear_bit();
80 	clear_buffer_locked(bh);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98 	ClearPagePrivate(page);
99 	set_page_private(page, 0);
100 	page_cache_release(page);
101 }
102 
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105 	char b[BDEVNAME_SIZE];
106 
107 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 			bdevname(bh->b_bdev, b),
109 			(unsigned long long)bh->b_blocknr);
110 }
111 
112 /*
113  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
114  * unlock the buffer. This is what ll_rw_block uses too.
115  */
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117 {
118 	if (uptodate) {
119 		set_buffer_uptodate(bh);
120 	} else {
121 		/* This happens, due to failed READA attempts. */
122 		clear_buffer_uptodate(bh);
123 	}
124 	unlock_buffer(bh);
125 	put_bh(bh);
126 }
127 
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129 {
130 	char b[BDEVNAME_SIZE];
131 
132 	if (uptodate) {
133 		set_buffer_uptodate(bh);
134 	} else {
135 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136 			buffer_io_error(bh);
137 			printk(KERN_WARNING "lost page write due to "
138 					"I/O error on %s\n",
139 				       bdevname(bh->b_bdev, b));
140 		}
141 		set_buffer_write_io_error(bh);
142 		clear_buffer_uptodate(bh);
143 	}
144 	unlock_buffer(bh);
145 	put_bh(bh);
146 }
147 
148 /*
149  * Write out and wait upon all the dirty data associated with a block
150  * device via its mapping.  Does not take the superblock lock.
151  */
152 int sync_blockdev(struct block_device *bdev)
153 {
154 	int ret = 0;
155 
156 	if (bdev)
157 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
158 	return ret;
159 }
160 EXPORT_SYMBOL(sync_blockdev);
161 
162 /*
163  * Write out and wait upon all dirty data associated with this
164  * device.   Filesystem data as well as the underlying block
165  * device.  Takes the superblock lock.
166  */
167 int fsync_bdev(struct block_device *bdev)
168 {
169 	struct super_block *sb = get_super(bdev);
170 	if (sb) {
171 		int res = fsync_super(sb);
172 		drop_super(sb);
173 		return res;
174 	}
175 	return sync_blockdev(bdev);
176 }
177 
178 /**
179  * freeze_bdev  --  lock a filesystem and force it into a consistent state
180  * @bdev:	blockdevice to lock
181  *
182  * This takes the block device bd_mount_sem to make sure no new mounts
183  * happen on bdev until thaw_bdev() is called.
184  * If a superblock is found on this device, we take the s_umount semaphore
185  * on it to make sure nobody unmounts until the snapshot creation is done.
186  */
187 struct super_block *freeze_bdev(struct block_device *bdev)
188 {
189 	struct super_block *sb;
190 
191 	down(&bdev->bd_mount_sem);
192 	sb = get_super(bdev);
193 	if (sb && !(sb->s_flags & MS_RDONLY)) {
194 		sb->s_frozen = SB_FREEZE_WRITE;
195 		smp_wmb();
196 
197 		__fsync_super(sb);
198 
199 		sb->s_frozen = SB_FREEZE_TRANS;
200 		smp_wmb();
201 
202 		sync_blockdev(sb->s_bdev);
203 
204 		if (sb->s_op->write_super_lockfs)
205 			sb->s_op->write_super_lockfs(sb);
206 	}
207 
208 	sync_blockdev(bdev);
209 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
210 }
211 EXPORT_SYMBOL(freeze_bdev);
212 
213 /**
214  * thaw_bdev  -- unlock filesystem
215  * @bdev:	blockdevice to unlock
216  * @sb:		associated superblock
217  *
218  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
219  */
220 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
221 {
222 	if (sb) {
223 		BUG_ON(sb->s_bdev != bdev);
224 
225 		if (sb->s_op->unlockfs)
226 			sb->s_op->unlockfs(sb);
227 		sb->s_frozen = SB_UNFROZEN;
228 		smp_wmb();
229 		wake_up(&sb->s_wait_unfrozen);
230 		drop_super(sb);
231 	}
232 
233 	up(&bdev->bd_mount_sem);
234 }
235 EXPORT_SYMBOL(thaw_bdev);
236 
237 /*
238  * Various filesystems appear to want __find_get_block to be non-blocking.
239  * But it's the page lock which protects the buffers.  To get around this,
240  * we get exclusion from try_to_free_buffers with the blockdev mapping's
241  * private_lock.
242  *
243  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244  * may be quite high.  This code could TryLock the page, and if that
245  * succeeds, there is no need to take private_lock. (But if
246  * private_lock is contended then so is mapping->tree_lock).
247  */
248 static struct buffer_head *
249 __find_get_block_slow(struct block_device *bdev, sector_t block)
250 {
251 	struct inode *bd_inode = bdev->bd_inode;
252 	struct address_space *bd_mapping = bd_inode->i_mapping;
253 	struct buffer_head *ret = NULL;
254 	pgoff_t index;
255 	struct buffer_head *bh;
256 	struct buffer_head *head;
257 	struct page *page;
258 	int all_mapped = 1;
259 
260 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261 	page = find_get_page(bd_mapping, index);
262 	if (!page)
263 		goto out;
264 
265 	spin_lock(&bd_mapping->private_lock);
266 	if (!page_has_buffers(page))
267 		goto out_unlock;
268 	head = page_buffers(page);
269 	bh = head;
270 	do {
271 		if (bh->b_blocknr == block) {
272 			ret = bh;
273 			get_bh(bh);
274 			goto out_unlock;
275 		}
276 		if (!buffer_mapped(bh))
277 			all_mapped = 0;
278 		bh = bh->b_this_page;
279 	} while (bh != head);
280 
281 	/* we might be here because some of the buffers on this page are
282 	 * not mapped.  This is due to various races between
283 	 * file io on the block device and getblk.  It gets dealt with
284 	 * elsewhere, don't buffer_error if we had some unmapped buffers
285 	 */
286 	if (all_mapped) {
287 		printk("__find_get_block_slow() failed. "
288 			"block=%llu, b_blocknr=%llu\n",
289 			(unsigned long long)block,
290 			(unsigned long long)bh->b_blocknr);
291 		printk("b_state=0x%08lx, b_size=%zu\n",
292 			bh->b_state, bh->b_size);
293 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
294 	}
295 out_unlock:
296 	spin_unlock(&bd_mapping->private_lock);
297 	page_cache_release(page);
298 out:
299 	return ret;
300 }
301 
302 /* If invalidate_buffers() will trash dirty buffers, it means some kind
303    of fs corruption is going on. Trashing dirty data always imply losing
304    information that was supposed to be just stored on the physical layer
305    by the user.
306 
307    Thus invalidate_buffers in general usage is not allwowed to trash
308    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309    be preserved.  These buffers are simply skipped.
310 
311    We also skip buffers which are still in use.  For example this can
312    happen if a userspace program is reading the block device.
313 
314    NOTE: In the case where the user removed a removable-media-disk even if
315    there's still dirty data not synced on disk (due a bug in the device driver
316    or due an error of the user), by not destroying the dirty buffers we could
317    generate corruption also on the next media inserted, thus a parameter is
318    necessary to handle this case in the most safe way possible (trying
319    to not corrupt also the new disk inserted with the data belonging to
320    the old now corrupted disk). Also for the ramdisk the natural thing
321    to do in order to release the ramdisk memory is to destroy dirty buffers.
322 
323    These are two special cases. Normal usage imply the device driver
324    to issue a sync on the device (without waiting I/O completion) and
325    then an invalidate_buffers call that doesn't trash dirty buffers.
326 
327    For handling cache coherency with the blkdev pagecache the 'update' case
328    is been introduced. It is needed to re-read from disk any pinned
329    buffer. NOTE: re-reading from disk is destructive so we can do it only
330    when we assume nobody is changing the buffercache under our I/O and when
331    we think the disk contains more recent information than the buffercache.
332    The update == 1 pass marks the buffers we need to update, the update == 2
333    pass does the actual I/O. */
334 void invalidate_bdev(struct block_device *bdev)
335 {
336 	struct address_space *mapping = bdev->bd_inode->i_mapping;
337 
338 	if (mapping->nrpages == 0)
339 		return;
340 
341 	invalidate_bh_lrus();
342 	invalidate_mapping_pages(mapping, 0, -1);
343 }
344 
345 /*
346  * Kick pdflush then try to free up some ZONE_NORMAL memory.
347  */
348 static void free_more_memory(void)
349 {
350 	struct zone **zones;
351 	pg_data_t *pgdat;
352 
353 	wakeup_pdflush(1024);
354 	yield();
355 
356 	for_each_online_pgdat(pgdat) {
357 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
358 		if (*zones)
359 			try_to_free_pages(zones, GFP_NOFS);
360 	}
361 }
362 
363 /*
364  * I/O completion handler for block_read_full_page() - pages
365  * which come unlocked at the end of I/O.
366  */
367 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
368 {
369 	unsigned long flags;
370 	struct buffer_head *first;
371 	struct buffer_head *tmp;
372 	struct page *page;
373 	int page_uptodate = 1;
374 
375 	BUG_ON(!buffer_async_read(bh));
376 
377 	page = bh->b_page;
378 	if (uptodate) {
379 		set_buffer_uptodate(bh);
380 	} else {
381 		clear_buffer_uptodate(bh);
382 		if (printk_ratelimit())
383 			buffer_io_error(bh);
384 		SetPageError(page);
385 	}
386 
387 	/*
388 	 * Be _very_ careful from here on. Bad things can happen if
389 	 * two buffer heads end IO at almost the same time and both
390 	 * decide that the page is now completely done.
391 	 */
392 	first = page_buffers(page);
393 	local_irq_save(flags);
394 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
395 	clear_buffer_async_read(bh);
396 	unlock_buffer(bh);
397 	tmp = bh;
398 	do {
399 		if (!buffer_uptodate(tmp))
400 			page_uptodate = 0;
401 		if (buffer_async_read(tmp)) {
402 			BUG_ON(!buffer_locked(tmp));
403 			goto still_busy;
404 		}
405 		tmp = tmp->b_this_page;
406 	} while (tmp != bh);
407 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 	local_irq_restore(flags);
409 
410 	/*
411 	 * If none of the buffers had errors and they are all
412 	 * uptodate then we can set the page uptodate.
413 	 */
414 	if (page_uptodate && !PageError(page))
415 		SetPageUptodate(page);
416 	unlock_page(page);
417 	return;
418 
419 still_busy:
420 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 	local_irq_restore(flags);
422 	return;
423 }
424 
425 /*
426  * Completion handler for block_write_full_page() - pages which are unlocked
427  * during I/O, and which have PageWriteback cleared upon I/O completion.
428  */
429 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
430 {
431 	char b[BDEVNAME_SIZE];
432 	unsigned long flags;
433 	struct buffer_head *first;
434 	struct buffer_head *tmp;
435 	struct page *page;
436 
437 	BUG_ON(!buffer_async_write(bh));
438 
439 	page = bh->b_page;
440 	if (uptodate) {
441 		set_buffer_uptodate(bh);
442 	} else {
443 		if (printk_ratelimit()) {
444 			buffer_io_error(bh);
445 			printk(KERN_WARNING "lost page write due to "
446 					"I/O error on %s\n",
447 			       bdevname(bh->b_bdev, b));
448 		}
449 		set_bit(AS_EIO, &page->mapping->flags);
450 		set_buffer_write_io_error(bh);
451 		clear_buffer_uptodate(bh);
452 		SetPageError(page);
453 	}
454 
455 	first = page_buffers(page);
456 	local_irq_save(flags);
457 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
458 
459 	clear_buffer_async_write(bh);
460 	unlock_buffer(bh);
461 	tmp = bh->b_this_page;
462 	while (tmp != bh) {
463 		if (buffer_async_write(tmp)) {
464 			BUG_ON(!buffer_locked(tmp));
465 			goto still_busy;
466 		}
467 		tmp = tmp->b_this_page;
468 	}
469 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
470 	local_irq_restore(flags);
471 	end_page_writeback(page);
472 	return;
473 
474 still_busy:
475 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
476 	local_irq_restore(flags);
477 	return;
478 }
479 
480 /*
481  * If a page's buffers are under async readin (end_buffer_async_read
482  * completion) then there is a possibility that another thread of
483  * control could lock one of the buffers after it has completed
484  * but while some of the other buffers have not completed.  This
485  * locked buffer would confuse end_buffer_async_read() into not unlocking
486  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
487  * that this buffer is not under async I/O.
488  *
489  * The page comes unlocked when it has no locked buffer_async buffers
490  * left.
491  *
492  * PageLocked prevents anyone starting new async I/O reads any of
493  * the buffers.
494  *
495  * PageWriteback is used to prevent simultaneous writeout of the same
496  * page.
497  *
498  * PageLocked prevents anyone from starting writeback of a page which is
499  * under read I/O (PageWriteback is only ever set against a locked page).
500  */
501 static void mark_buffer_async_read(struct buffer_head *bh)
502 {
503 	bh->b_end_io = end_buffer_async_read;
504 	set_buffer_async_read(bh);
505 }
506 
507 void mark_buffer_async_write(struct buffer_head *bh)
508 {
509 	bh->b_end_io = end_buffer_async_write;
510 	set_buffer_async_write(bh);
511 }
512 EXPORT_SYMBOL(mark_buffer_async_write);
513 
514 
515 /*
516  * fs/buffer.c contains helper functions for buffer-backed address space's
517  * fsync functions.  A common requirement for buffer-based filesystems is
518  * that certain data from the backing blockdev needs to be written out for
519  * a successful fsync().  For example, ext2 indirect blocks need to be
520  * written back and waited upon before fsync() returns.
521  *
522  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
523  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
524  * management of a list of dependent buffers at ->i_mapping->private_list.
525  *
526  * Locking is a little subtle: try_to_free_buffers() will remove buffers
527  * from their controlling inode's queue when they are being freed.  But
528  * try_to_free_buffers() will be operating against the *blockdev* mapping
529  * at the time, not against the S_ISREG file which depends on those buffers.
530  * So the locking for private_list is via the private_lock in the address_space
531  * which backs the buffers.  Which is different from the address_space
532  * against which the buffers are listed.  So for a particular address_space,
533  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
534  * mapping->private_list will always be protected by the backing blockdev's
535  * ->private_lock.
536  *
537  * Which introduces a requirement: all buffers on an address_space's
538  * ->private_list must be from the same address_space: the blockdev's.
539  *
540  * address_spaces which do not place buffers at ->private_list via these
541  * utility functions are free to use private_lock and private_list for
542  * whatever they want.  The only requirement is that list_empty(private_list)
543  * be true at clear_inode() time.
544  *
545  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
546  * filesystems should do that.  invalidate_inode_buffers() should just go
547  * BUG_ON(!list_empty).
548  *
549  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
550  * take an address_space, not an inode.  And it should be called
551  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
552  * queued up.
553  *
554  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
555  * list if it is already on a list.  Because if the buffer is on a list,
556  * it *must* already be on the right one.  If not, the filesystem is being
557  * silly.  This will save a ton of locking.  But first we have to ensure
558  * that buffers are taken *off* the old inode's list when they are freed
559  * (presumably in truncate).  That requires careful auditing of all
560  * filesystems (do it inside bforget()).  It could also be done by bringing
561  * b_inode back.
562  */
563 
564 /*
565  * The buffer's backing address_space's private_lock must be held
566  */
567 static inline void __remove_assoc_queue(struct buffer_head *bh)
568 {
569 	list_del_init(&bh->b_assoc_buffers);
570 	WARN_ON(!bh->b_assoc_map);
571 	if (buffer_write_io_error(bh))
572 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
573 	bh->b_assoc_map = NULL;
574 }
575 
576 int inode_has_buffers(struct inode *inode)
577 {
578 	return !list_empty(&inode->i_data.private_list);
579 }
580 
581 /*
582  * osync is designed to support O_SYNC io.  It waits synchronously for
583  * all already-submitted IO to complete, but does not queue any new
584  * writes to the disk.
585  *
586  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
587  * you dirty the buffers, and then use osync_inode_buffers to wait for
588  * completion.  Any other dirty buffers which are not yet queued for
589  * write will not be flushed to disk by the osync.
590  */
591 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
592 {
593 	struct buffer_head *bh;
594 	struct list_head *p;
595 	int err = 0;
596 
597 	spin_lock(lock);
598 repeat:
599 	list_for_each_prev(p, list) {
600 		bh = BH_ENTRY(p);
601 		if (buffer_locked(bh)) {
602 			get_bh(bh);
603 			spin_unlock(lock);
604 			wait_on_buffer(bh);
605 			if (!buffer_uptodate(bh))
606 				err = -EIO;
607 			brelse(bh);
608 			spin_lock(lock);
609 			goto repeat;
610 		}
611 	}
612 	spin_unlock(lock);
613 	return err;
614 }
615 
616 /**
617  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
618  *                        buffers
619  * @mapping: the mapping which wants those buffers written
620  *
621  * Starts I/O against the buffers at mapping->private_list, and waits upon
622  * that I/O.
623  *
624  * Basically, this is a convenience function for fsync().
625  * @mapping is a file or directory which needs those buffers to be written for
626  * a successful fsync().
627  */
628 int sync_mapping_buffers(struct address_space *mapping)
629 {
630 	struct address_space *buffer_mapping = mapping->assoc_mapping;
631 
632 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
633 		return 0;
634 
635 	return fsync_buffers_list(&buffer_mapping->private_lock,
636 					&mapping->private_list);
637 }
638 EXPORT_SYMBOL(sync_mapping_buffers);
639 
640 /*
641  * Called when we've recently written block `bblock', and it is known that
642  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
643  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
644  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
645  */
646 void write_boundary_block(struct block_device *bdev,
647 			sector_t bblock, unsigned blocksize)
648 {
649 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
650 	if (bh) {
651 		if (buffer_dirty(bh))
652 			ll_rw_block(WRITE, 1, &bh);
653 		put_bh(bh);
654 	}
655 }
656 
657 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
658 {
659 	struct address_space *mapping = inode->i_mapping;
660 	struct address_space *buffer_mapping = bh->b_page->mapping;
661 
662 	mark_buffer_dirty(bh);
663 	if (!mapping->assoc_mapping) {
664 		mapping->assoc_mapping = buffer_mapping;
665 	} else {
666 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
667 	}
668 	if (list_empty(&bh->b_assoc_buffers)) {
669 		spin_lock(&buffer_mapping->private_lock);
670 		list_move_tail(&bh->b_assoc_buffers,
671 				&mapping->private_list);
672 		bh->b_assoc_map = mapping;
673 		spin_unlock(&buffer_mapping->private_lock);
674 	}
675 }
676 EXPORT_SYMBOL(mark_buffer_dirty_inode);
677 
678 /*
679  * Add a page to the dirty page list.
680  *
681  * It is a sad fact of life that this function is called from several places
682  * deeply under spinlocking.  It may not sleep.
683  *
684  * If the page has buffers, the uptodate buffers are set dirty, to preserve
685  * dirty-state coherency between the page and the buffers.  It the page does
686  * not have buffers then when they are later attached they will all be set
687  * dirty.
688  *
689  * The buffers are dirtied before the page is dirtied.  There's a small race
690  * window in which a writepage caller may see the page cleanness but not the
691  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
692  * before the buffers, a concurrent writepage caller could clear the page dirty
693  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694  * page on the dirty page list.
695  *
696  * We use private_lock to lock against try_to_free_buffers while using the
697  * page's buffer list.  Also use this to protect against clean buffers being
698  * added to the page after it was set dirty.
699  *
700  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
701  * address_space though.
702  */
703 int __set_page_dirty_buffers(struct page *page)
704 {
705 	struct address_space * const mapping = page_mapping(page);
706 
707 	if (unlikely(!mapping))
708 		return !TestSetPageDirty(page);
709 
710 	spin_lock(&mapping->private_lock);
711 	if (page_has_buffers(page)) {
712 		struct buffer_head *head = page_buffers(page);
713 		struct buffer_head *bh = head;
714 
715 		do {
716 			set_buffer_dirty(bh);
717 			bh = bh->b_this_page;
718 		} while (bh != head);
719 	}
720 	spin_unlock(&mapping->private_lock);
721 
722 	if (TestSetPageDirty(page))
723 		return 0;
724 
725 	write_lock_irq(&mapping->tree_lock);
726 	if (page->mapping) {	/* Race with truncate? */
727 		if (mapping_cap_account_dirty(mapping)) {
728 			__inc_zone_page_state(page, NR_FILE_DIRTY);
729 			task_io_account_write(PAGE_CACHE_SIZE);
730 		}
731 		radix_tree_tag_set(&mapping->page_tree,
732 				page_index(page), PAGECACHE_TAG_DIRTY);
733 	}
734 	write_unlock_irq(&mapping->tree_lock);
735 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
736 	return 1;
737 }
738 EXPORT_SYMBOL(__set_page_dirty_buffers);
739 
740 /*
741  * Write out and wait upon a list of buffers.
742  *
743  * We have conflicting pressures: we want to make sure that all
744  * initially dirty buffers get waited on, but that any subsequently
745  * dirtied buffers don't.  After all, we don't want fsync to last
746  * forever if somebody is actively writing to the file.
747  *
748  * Do this in two main stages: first we copy dirty buffers to a
749  * temporary inode list, queueing the writes as we go.  Then we clean
750  * up, waiting for those writes to complete.
751  *
752  * During this second stage, any subsequent updates to the file may end
753  * up refiling the buffer on the original inode's dirty list again, so
754  * there is a chance we will end up with a buffer queued for write but
755  * not yet completed on that list.  So, as a final cleanup we go through
756  * the osync code to catch these locked, dirty buffers without requeuing
757  * any newly dirty buffers for write.
758  */
759 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
760 {
761 	struct buffer_head *bh;
762 	struct list_head tmp;
763 	int err = 0, err2;
764 
765 	INIT_LIST_HEAD(&tmp);
766 
767 	spin_lock(lock);
768 	while (!list_empty(list)) {
769 		bh = BH_ENTRY(list->next);
770 		__remove_assoc_queue(bh);
771 		if (buffer_dirty(bh) || buffer_locked(bh)) {
772 			list_add(&bh->b_assoc_buffers, &tmp);
773 			if (buffer_dirty(bh)) {
774 				get_bh(bh);
775 				spin_unlock(lock);
776 				/*
777 				 * Ensure any pending I/O completes so that
778 				 * ll_rw_block() actually writes the current
779 				 * contents - it is a noop if I/O is still in
780 				 * flight on potentially older contents.
781 				 */
782 				ll_rw_block(SWRITE, 1, &bh);
783 				brelse(bh);
784 				spin_lock(lock);
785 			}
786 		}
787 	}
788 
789 	while (!list_empty(&tmp)) {
790 		bh = BH_ENTRY(tmp.prev);
791 		list_del_init(&bh->b_assoc_buffers);
792 		get_bh(bh);
793 		spin_unlock(lock);
794 		wait_on_buffer(bh);
795 		if (!buffer_uptodate(bh))
796 			err = -EIO;
797 		brelse(bh);
798 		spin_lock(lock);
799 	}
800 
801 	spin_unlock(lock);
802 	err2 = osync_buffers_list(lock, list);
803 	if (err)
804 		return err;
805 	else
806 		return err2;
807 }
808 
809 /*
810  * Invalidate any and all dirty buffers on a given inode.  We are
811  * probably unmounting the fs, but that doesn't mean we have already
812  * done a sync().  Just drop the buffers from the inode list.
813  *
814  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
815  * assumes that all the buffers are against the blockdev.  Not true
816  * for reiserfs.
817  */
818 void invalidate_inode_buffers(struct inode *inode)
819 {
820 	if (inode_has_buffers(inode)) {
821 		struct address_space *mapping = &inode->i_data;
822 		struct list_head *list = &mapping->private_list;
823 		struct address_space *buffer_mapping = mapping->assoc_mapping;
824 
825 		spin_lock(&buffer_mapping->private_lock);
826 		while (!list_empty(list))
827 			__remove_assoc_queue(BH_ENTRY(list->next));
828 		spin_unlock(&buffer_mapping->private_lock);
829 	}
830 }
831 
832 /*
833  * Remove any clean buffers from the inode's buffer list.  This is called
834  * when we're trying to free the inode itself.  Those buffers can pin it.
835  *
836  * Returns true if all buffers were removed.
837  */
838 int remove_inode_buffers(struct inode *inode)
839 {
840 	int ret = 1;
841 
842 	if (inode_has_buffers(inode)) {
843 		struct address_space *mapping = &inode->i_data;
844 		struct list_head *list = &mapping->private_list;
845 		struct address_space *buffer_mapping = mapping->assoc_mapping;
846 
847 		spin_lock(&buffer_mapping->private_lock);
848 		while (!list_empty(list)) {
849 			struct buffer_head *bh = BH_ENTRY(list->next);
850 			if (buffer_dirty(bh)) {
851 				ret = 0;
852 				break;
853 			}
854 			__remove_assoc_queue(bh);
855 		}
856 		spin_unlock(&buffer_mapping->private_lock);
857 	}
858 	return ret;
859 }
860 
861 /*
862  * Create the appropriate buffers when given a page for data area and
863  * the size of each buffer.. Use the bh->b_this_page linked list to
864  * follow the buffers created.  Return NULL if unable to create more
865  * buffers.
866  *
867  * The retry flag is used to differentiate async IO (paging, swapping)
868  * which may not fail from ordinary buffer allocations.
869  */
870 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
871 		int retry)
872 {
873 	struct buffer_head *bh, *head;
874 	long offset;
875 
876 try_again:
877 	head = NULL;
878 	offset = PAGE_SIZE;
879 	while ((offset -= size) >= 0) {
880 		bh = alloc_buffer_head(GFP_NOFS);
881 		if (!bh)
882 			goto no_grow;
883 
884 		bh->b_bdev = NULL;
885 		bh->b_this_page = head;
886 		bh->b_blocknr = -1;
887 		head = bh;
888 
889 		bh->b_state = 0;
890 		atomic_set(&bh->b_count, 0);
891 		bh->b_private = NULL;
892 		bh->b_size = size;
893 
894 		/* Link the buffer to its page */
895 		set_bh_page(bh, page, offset);
896 
897 		init_buffer(bh, NULL, NULL);
898 	}
899 	return head;
900 /*
901  * In case anything failed, we just free everything we got.
902  */
903 no_grow:
904 	if (head) {
905 		do {
906 			bh = head;
907 			head = head->b_this_page;
908 			free_buffer_head(bh);
909 		} while (head);
910 	}
911 
912 	/*
913 	 * Return failure for non-async IO requests.  Async IO requests
914 	 * are not allowed to fail, so we have to wait until buffer heads
915 	 * become available.  But we don't want tasks sleeping with
916 	 * partially complete buffers, so all were released above.
917 	 */
918 	if (!retry)
919 		return NULL;
920 
921 	/* We're _really_ low on memory. Now we just
922 	 * wait for old buffer heads to become free due to
923 	 * finishing IO.  Since this is an async request and
924 	 * the reserve list is empty, we're sure there are
925 	 * async buffer heads in use.
926 	 */
927 	free_more_memory();
928 	goto try_again;
929 }
930 EXPORT_SYMBOL_GPL(alloc_page_buffers);
931 
932 static inline void
933 link_dev_buffers(struct page *page, struct buffer_head *head)
934 {
935 	struct buffer_head *bh, *tail;
936 
937 	bh = head;
938 	do {
939 		tail = bh;
940 		bh = bh->b_this_page;
941 	} while (bh);
942 	tail->b_this_page = head;
943 	attach_page_buffers(page, head);
944 }
945 
946 /*
947  * Initialise the state of a blockdev page's buffers.
948  */
949 static void
950 init_page_buffers(struct page *page, struct block_device *bdev,
951 			sector_t block, int size)
952 {
953 	struct buffer_head *head = page_buffers(page);
954 	struct buffer_head *bh = head;
955 	int uptodate = PageUptodate(page);
956 
957 	do {
958 		if (!buffer_mapped(bh)) {
959 			init_buffer(bh, NULL, NULL);
960 			bh->b_bdev = bdev;
961 			bh->b_blocknr = block;
962 			if (uptodate)
963 				set_buffer_uptodate(bh);
964 			set_buffer_mapped(bh);
965 		}
966 		block++;
967 		bh = bh->b_this_page;
968 	} while (bh != head);
969 }
970 
971 /*
972  * Create the page-cache page that contains the requested block.
973  *
974  * This is user purely for blockdev mappings.
975  */
976 static struct page *
977 grow_dev_page(struct block_device *bdev, sector_t block,
978 		pgoff_t index, int size)
979 {
980 	struct inode *inode = bdev->bd_inode;
981 	struct page *page;
982 	struct buffer_head *bh;
983 
984 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
985 	if (!page)
986 		return NULL;
987 
988 	BUG_ON(!PageLocked(page));
989 
990 	if (page_has_buffers(page)) {
991 		bh = page_buffers(page);
992 		if (bh->b_size == size) {
993 			init_page_buffers(page, bdev, block, size);
994 			return page;
995 		}
996 		if (!try_to_free_buffers(page))
997 			goto failed;
998 	}
999 
1000 	/*
1001 	 * Allocate some buffers for this page
1002 	 */
1003 	bh = alloc_page_buffers(page, size, 0);
1004 	if (!bh)
1005 		goto failed;
1006 
1007 	/*
1008 	 * Link the page to the buffers and initialise them.  Take the
1009 	 * lock to be atomic wrt __find_get_block(), which does not
1010 	 * run under the page lock.
1011 	 */
1012 	spin_lock(&inode->i_mapping->private_lock);
1013 	link_dev_buffers(page, bh);
1014 	init_page_buffers(page, bdev, block, size);
1015 	spin_unlock(&inode->i_mapping->private_lock);
1016 	return page;
1017 
1018 failed:
1019 	BUG();
1020 	unlock_page(page);
1021 	page_cache_release(page);
1022 	return NULL;
1023 }
1024 
1025 /*
1026  * Create buffers for the specified block device block's page.  If
1027  * that page was dirty, the buffers are set dirty also.
1028  *
1029  * Except that's a bug.  Attaching dirty buffers to a dirty
1030  * blockdev's page can result in filesystem corruption, because
1031  * some of those buffers may be aliases of filesystem data.
1032  * grow_dev_page() will go BUG() if this happens.
1033  */
1034 static int
1035 grow_buffers(struct block_device *bdev, sector_t block, int size)
1036 {
1037 	struct page *page;
1038 	pgoff_t index;
1039 	int sizebits;
1040 
1041 	sizebits = -1;
1042 	do {
1043 		sizebits++;
1044 	} while ((size << sizebits) < PAGE_SIZE);
1045 
1046 	index = block >> sizebits;
1047 
1048 	/*
1049 	 * Check for a block which wants to lie outside our maximum possible
1050 	 * pagecache index.  (this comparison is done using sector_t types).
1051 	 */
1052 	if (unlikely(index != block >> sizebits)) {
1053 		char b[BDEVNAME_SIZE];
1054 
1055 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056 			"device %s\n",
1057 			__FUNCTION__, (unsigned long long)block,
1058 			bdevname(bdev, b));
1059 		return -EIO;
1060 	}
1061 	block = index << sizebits;
1062 	/* Create a page with the proper size buffers.. */
1063 	page = grow_dev_page(bdev, block, index, size);
1064 	if (!page)
1065 		return 0;
1066 	unlock_page(page);
1067 	page_cache_release(page);
1068 	return 1;
1069 }
1070 
1071 static struct buffer_head *
1072 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1073 {
1074 	/* Size must be multiple of hard sectorsize */
1075 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1076 			(size < 512 || size > PAGE_SIZE))) {
1077 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078 					size);
1079 		printk(KERN_ERR "hardsect size: %d\n",
1080 					bdev_hardsect_size(bdev));
1081 
1082 		dump_stack();
1083 		return NULL;
1084 	}
1085 
1086 	for (;;) {
1087 		struct buffer_head * bh;
1088 		int ret;
1089 
1090 		bh = __find_get_block(bdev, block, size);
1091 		if (bh)
1092 			return bh;
1093 
1094 		ret = grow_buffers(bdev, block, size);
1095 		if (ret < 0)
1096 			return NULL;
1097 		if (ret == 0)
1098 			free_more_memory();
1099 	}
1100 }
1101 
1102 /*
1103  * The relationship between dirty buffers and dirty pages:
1104  *
1105  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106  * the page is tagged dirty in its radix tree.
1107  *
1108  * At all times, the dirtiness of the buffers represents the dirtiness of
1109  * subsections of the page.  If the page has buffers, the page dirty bit is
1110  * merely a hint about the true dirty state.
1111  *
1112  * When a page is set dirty in its entirety, all its buffers are marked dirty
1113  * (if the page has buffers).
1114  *
1115  * When a buffer is marked dirty, its page is dirtied, but the page's other
1116  * buffers are not.
1117  *
1118  * Also.  When blockdev buffers are explicitly read with bread(), they
1119  * individually become uptodate.  But their backing page remains not
1120  * uptodate - even if all of its buffers are uptodate.  A subsequent
1121  * block_read_full_page() against that page will discover all the uptodate
1122  * buffers, will set the page uptodate and will perform no I/O.
1123  */
1124 
1125 /**
1126  * mark_buffer_dirty - mark a buffer_head as needing writeout
1127  * @bh: the buffer_head to mark dirty
1128  *
1129  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130  * backing page dirty, then tag the page as dirty in its address_space's radix
1131  * tree and then attach the address_space's inode to its superblock's dirty
1132  * inode list.
1133  *
1134  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1135  * mapping->tree_lock and the global inode_lock.
1136  */
1137 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1138 {
1139 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1140 		__set_page_dirty_nobuffers(bh->b_page);
1141 }
1142 
1143 /*
1144  * Decrement a buffer_head's reference count.  If all buffers against a page
1145  * have zero reference count, are clean and unlocked, and if the page is clean
1146  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1147  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1148  * a page but it ends up not being freed, and buffers may later be reattached).
1149  */
1150 void __brelse(struct buffer_head * buf)
1151 {
1152 	if (atomic_read(&buf->b_count)) {
1153 		put_bh(buf);
1154 		return;
1155 	}
1156 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1157 	WARN_ON(1);
1158 }
1159 
1160 /*
1161  * bforget() is like brelse(), except it discards any
1162  * potentially dirty data.
1163  */
1164 void __bforget(struct buffer_head *bh)
1165 {
1166 	clear_buffer_dirty(bh);
1167 	if (!list_empty(&bh->b_assoc_buffers)) {
1168 		struct address_space *buffer_mapping = bh->b_page->mapping;
1169 
1170 		spin_lock(&buffer_mapping->private_lock);
1171 		list_del_init(&bh->b_assoc_buffers);
1172 		bh->b_assoc_map = NULL;
1173 		spin_unlock(&buffer_mapping->private_lock);
1174 	}
1175 	__brelse(bh);
1176 }
1177 
1178 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1179 {
1180 	lock_buffer(bh);
1181 	if (buffer_uptodate(bh)) {
1182 		unlock_buffer(bh);
1183 		return bh;
1184 	} else {
1185 		get_bh(bh);
1186 		bh->b_end_io = end_buffer_read_sync;
1187 		submit_bh(READ, bh);
1188 		wait_on_buffer(bh);
1189 		if (buffer_uptodate(bh))
1190 			return bh;
1191 	}
1192 	brelse(bh);
1193 	return NULL;
1194 }
1195 
1196 /*
1197  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1198  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1199  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1200  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1201  * CPU's LRUs at the same time.
1202  *
1203  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1204  * sb_find_get_block().
1205  *
1206  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1207  * a local interrupt disable for that.
1208  */
1209 
1210 #define BH_LRU_SIZE	8
1211 
1212 struct bh_lru {
1213 	struct buffer_head *bhs[BH_LRU_SIZE];
1214 };
1215 
1216 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1217 
1218 #ifdef CONFIG_SMP
1219 #define bh_lru_lock()	local_irq_disable()
1220 #define bh_lru_unlock()	local_irq_enable()
1221 #else
1222 #define bh_lru_lock()	preempt_disable()
1223 #define bh_lru_unlock()	preempt_enable()
1224 #endif
1225 
1226 static inline void check_irqs_on(void)
1227 {
1228 #ifdef irqs_disabled
1229 	BUG_ON(irqs_disabled());
1230 #endif
1231 }
1232 
1233 /*
1234  * The LRU management algorithm is dopey-but-simple.  Sorry.
1235  */
1236 static void bh_lru_install(struct buffer_head *bh)
1237 {
1238 	struct buffer_head *evictee = NULL;
1239 	struct bh_lru *lru;
1240 
1241 	check_irqs_on();
1242 	bh_lru_lock();
1243 	lru = &__get_cpu_var(bh_lrus);
1244 	if (lru->bhs[0] != bh) {
1245 		struct buffer_head *bhs[BH_LRU_SIZE];
1246 		int in;
1247 		int out = 0;
1248 
1249 		get_bh(bh);
1250 		bhs[out++] = bh;
1251 		for (in = 0; in < BH_LRU_SIZE; in++) {
1252 			struct buffer_head *bh2 = lru->bhs[in];
1253 
1254 			if (bh2 == bh) {
1255 				__brelse(bh2);
1256 			} else {
1257 				if (out >= BH_LRU_SIZE) {
1258 					BUG_ON(evictee != NULL);
1259 					evictee = bh2;
1260 				} else {
1261 					bhs[out++] = bh2;
1262 				}
1263 			}
1264 		}
1265 		while (out < BH_LRU_SIZE)
1266 			bhs[out++] = NULL;
1267 		memcpy(lru->bhs, bhs, sizeof(bhs));
1268 	}
1269 	bh_lru_unlock();
1270 
1271 	if (evictee)
1272 		__brelse(evictee);
1273 }
1274 
1275 /*
1276  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1277  */
1278 static struct buffer_head *
1279 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1280 {
1281 	struct buffer_head *ret = NULL;
1282 	struct bh_lru *lru;
1283 	unsigned int i;
1284 
1285 	check_irqs_on();
1286 	bh_lru_lock();
1287 	lru = &__get_cpu_var(bh_lrus);
1288 	for (i = 0; i < BH_LRU_SIZE; i++) {
1289 		struct buffer_head *bh = lru->bhs[i];
1290 
1291 		if (bh && bh->b_bdev == bdev &&
1292 				bh->b_blocknr == block && bh->b_size == size) {
1293 			if (i) {
1294 				while (i) {
1295 					lru->bhs[i] = lru->bhs[i - 1];
1296 					i--;
1297 				}
1298 				lru->bhs[0] = bh;
1299 			}
1300 			get_bh(bh);
1301 			ret = bh;
1302 			break;
1303 		}
1304 	}
1305 	bh_lru_unlock();
1306 	return ret;
1307 }
1308 
1309 /*
1310  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1311  * it in the LRU and mark it as accessed.  If it is not present then return
1312  * NULL
1313  */
1314 struct buffer_head *
1315 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1316 {
1317 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1318 
1319 	if (bh == NULL) {
1320 		bh = __find_get_block_slow(bdev, block);
1321 		if (bh)
1322 			bh_lru_install(bh);
1323 	}
1324 	if (bh)
1325 		touch_buffer(bh);
1326 	return bh;
1327 }
1328 EXPORT_SYMBOL(__find_get_block);
1329 
1330 /*
1331  * __getblk will locate (and, if necessary, create) the buffer_head
1332  * which corresponds to the passed block_device, block and size. The
1333  * returned buffer has its reference count incremented.
1334  *
1335  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1336  * illegal block number, __getblk() will happily return a buffer_head
1337  * which represents the non-existent block.  Very weird.
1338  *
1339  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1340  * attempt is failing.  FIXME, perhaps?
1341  */
1342 struct buffer_head *
1343 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1344 {
1345 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1346 
1347 	might_sleep();
1348 	if (bh == NULL)
1349 		bh = __getblk_slow(bdev, block, size);
1350 	return bh;
1351 }
1352 EXPORT_SYMBOL(__getblk);
1353 
1354 /*
1355  * Do async read-ahead on a buffer..
1356  */
1357 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1358 {
1359 	struct buffer_head *bh = __getblk(bdev, block, size);
1360 	if (likely(bh)) {
1361 		ll_rw_block(READA, 1, &bh);
1362 		brelse(bh);
1363 	}
1364 }
1365 EXPORT_SYMBOL(__breadahead);
1366 
1367 /**
1368  *  __bread() - reads a specified block and returns the bh
1369  *  @bdev: the block_device to read from
1370  *  @block: number of block
1371  *  @size: size (in bytes) to read
1372  *
1373  *  Reads a specified block, and returns buffer head that contains it.
1374  *  It returns NULL if the block was unreadable.
1375  */
1376 struct buffer_head *
1377 __bread(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379 	struct buffer_head *bh = __getblk(bdev, block, size);
1380 
1381 	if (likely(bh) && !buffer_uptodate(bh))
1382 		bh = __bread_slow(bh);
1383 	return bh;
1384 }
1385 EXPORT_SYMBOL(__bread);
1386 
1387 /*
1388  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1389  * This doesn't race because it runs in each cpu either in irq
1390  * or with preempt disabled.
1391  */
1392 static void invalidate_bh_lru(void *arg)
1393 {
1394 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1395 	int i;
1396 
1397 	for (i = 0; i < BH_LRU_SIZE; i++) {
1398 		brelse(b->bhs[i]);
1399 		b->bhs[i] = NULL;
1400 	}
1401 	put_cpu_var(bh_lrus);
1402 }
1403 
1404 void invalidate_bh_lrus(void)
1405 {
1406 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1407 }
1408 
1409 void set_bh_page(struct buffer_head *bh,
1410 		struct page *page, unsigned long offset)
1411 {
1412 	bh->b_page = page;
1413 	BUG_ON(offset >= PAGE_SIZE);
1414 	if (PageHighMem(page))
1415 		/*
1416 		 * This catches illegal uses and preserves the offset:
1417 		 */
1418 		bh->b_data = (char *)(0 + offset);
1419 	else
1420 		bh->b_data = page_address(page) + offset;
1421 }
1422 EXPORT_SYMBOL(set_bh_page);
1423 
1424 /*
1425  * Called when truncating a buffer on a page completely.
1426  */
1427 static void discard_buffer(struct buffer_head * bh)
1428 {
1429 	lock_buffer(bh);
1430 	clear_buffer_dirty(bh);
1431 	bh->b_bdev = NULL;
1432 	clear_buffer_mapped(bh);
1433 	clear_buffer_req(bh);
1434 	clear_buffer_new(bh);
1435 	clear_buffer_delay(bh);
1436 	clear_buffer_unwritten(bh);
1437 	unlock_buffer(bh);
1438 }
1439 
1440 /**
1441  * block_invalidatepage - invalidate part of all of a buffer-backed page
1442  *
1443  * @page: the page which is affected
1444  * @offset: the index of the truncation point
1445  *
1446  * block_invalidatepage() is called when all or part of the page has become
1447  * invalidatedby a truncate operation.
1448  *
1449  * block_invalidatepage() does not have to release all buffers, but it must
1450  * ensure that no dirty buffer is left outside @offset and that no I/O
1451  * is underway against any of the blocks which are outside the truncation
1452  * point.  Because the caller is about to free (and possibly reuse) those
1453  * blocks on-disk.
1454  */
1455 void block_invalidatepage(struct page *page, unsigned long offset)
1456 {
1457 	struct buffer_head *head, *bh, *next;
1458 	unsigned int curr_off = 0;
1459 
1460 	BUG_ON(!PageLocked(page));
1461 	if (!page_has_buffers(page))
1462 		goto out;
1463 
1464 	head = page_buffers(page);
1465 	bh = head;
1466 	do {
1467 		unsigned int next_off = curr_off + bh->b_size;
1468 		next = bh->b_this_page;
1469 
1470 		/*
1471 		 * is this block fully invalidated?
1472 		 */
1473 		if (offset <= curr_off)
1474 			discard_buffer(bh);
1475 		curr_off = next_off;
1476 		bh = next;
1477 	} while (bh != head);
1478 
1479 	/*
1480 	 * We release buffers only if the entire page is being invalidated.
1481 	 * The get_block cached value has been unconditionally invalidated,
1482 	 * so real IO is not possible anymore.
1483 	 */
1484 	if (offset == 0)
1485 		try_to_release_page(page, 0);
1486 out:
1487 	return;
1488 }
1489 EXPORT_SYMBOL(block_invalidatepage);
1490 
1491 /*
1492  * We attach and possibly dirty the buffers atomically wrt
1493  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1494  * is already excluded via the page lock.
1495  */
1496 void create_empty_buffers(struct page *page,
1497 			unsigned long blocksize, unsigned long b_state)
1498 {
1499 	struct buffer_head *bh, *head, *tail;
1500 
1501 	head = alloc_page_buffers(page, blocksize, 1);
1502 	bh = head;
1503 	do {
1504 		bh->b_state |= b_state;
1505 		tail = bh;
1506 		bh = bh->b_this_page;
1507 	} while (bh);
1508 	tail->b_this_page = head;
1509 
1510 	spin_lock(&page->mapping->private_lock);
1511 	if (PageUptodate(page) || PageDirty(page)) {
1512 		bh = head;
1513 		do {
1514 			if (PageDirty(page))
1515 				set_buffer_dirty(bh);
1516 			if (PageUptodate(page))
1517 				set_buffer_uptodate(bh);
1518 			bh = bh->b_this_page;
1519 		} while (bh != head);
1520 	}
1521 	attach_page_buffers(page, head);
1522 	spin_unlock(&page->mapping->private_lock);
1523 }
1524 EXPORT_SYMBOL(create_empty_buffers);
1525 
1526 /*
1527  * We are taking a block for data and we don't want any output from any
1528  * buffer-cache aliases starting from return from that function and
1529  * until the moment when something will explicitly mark the buffer
1530  * dirty (hopefully that will not happen until we will free that block ;-)
1531  * We don't even need to mark it not-uptodate - nobody can expect
1532  * anything from a newly allocated buffer anyway. We used to used
1533  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1534  * don't want to mark the alias unmapped, for example - it would confuse
1535  * anyone who might pick it with bread() afterwards...
1536  *
1537  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1538  * be writeout I/O going on against recently-freed buffers.  We don't
1539  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1540  * only if we really need to.  That happens here.
1541  */
1542 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1543 {
1544 	struct buffer_head *old_bh;
1545 
1546 	might_sleep();
1547 
1548 	old_bh = __find_get_block_slow(bdev, block);
1549 	if (old_bh) {
1550 		clear_buffer_dirty(old_bh);
1551 		wait_on_buffer(old_bh);
1552 		clear_buffer_req(old_bh);
1553 		__brelse(old_bh);
1554 	}
1555 }
1556 EXPORT_SYMBOL(unmap_underlying_metadata);
1557 
1558 /*
1559  * NOTE! All mapped/uptodate combinations are valid:
1560  *
1561  *	Mapped	Uptodate	Meaning
1562  *
1563  *	No	No		"unknown" - must do get_block()
1564  *	No	Yes		"hole" - zero-filled
1565  *	Yes	No		"allocated" - allocated on disk, not read in
1566  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1567  *
1568  * "Dirty" is valid only with the last case (mapped+uptodate).
1569  */
1570 
1571 /*
1572  * While block_write_full_page is writing back the dirty buffers under
1573  * the page lock, whoever dirtied the buffers may decide to clean them
1574  * again at any time.  We handle that by only looking at the buffer
1575  * state inside lock_buffer().
1576  *
1577  * If block_write_full_page() is called for regular writeback
1578  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1579  * locked buffer.   This only can happen if someone has written the buffer
1580  * directly, with submit_bh().  At the address_space level PageWriteback
1581  * prevents this contention from occurring.
1582  */
1583 static int __block_write_full_page(struct inode *inode, struct page *page,
1584 			get_block_t *get_block, struct writeback_control *wbc)
1585 {
1586 	int err;
1587 	sector_t block;
1588 	sector_t last_block;
1589 	struct buffer_head *bh, *head;
1590 	const unsigned blocksize = 1 << inode->i_blkbits;
1591 	int nr_underway = 0;
1592 
1593 	BUG_ON(!PageLocked(page));
1594 
1595 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1596 
1597 	if (!page_has_buffers(page)) {
1598 		create_empty_buffers(page, blocksize,
1599 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1600 	}
1601 
1602 	/*
1603 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1604 	 * here, and the (potentially unmapped) buffers may become dirty at
1605 	 * any time.  If a buffer becomes dirty here after we've inspected it
1606 	 * then we just miss that fact, and the page stays dirty.
1607 	 *
1608 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1609 	 * handle that here by just cleaning them.
1610 	 */
1611 
1612 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1613 	head = page_buffers(page);
1614 	bh = head;
1615 
1616 	/*
1617 	 * Get all the dirty buffers mapped to disk addresses and
1618 	 * handle any aliases from the underlying blockdev's mapping.
1619 	 */
1620 	do {
1621 		if (block > last_block) {
1622 			/*
1623 			 * mapped buffers outside i_size will occur, because
1624 			 * this page can be outside i_size when there is a
1625 			 * truncate in progress.
1626 			 */
1627 			/*
1628 			 * The buffer was zeroed by block_write_full_page()
1629 			 */
1630 			clear_buffer_dirty(bh);
1631 			set_buffer_uptodate(bh);
1632 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1633 			WARN_ON(bh->b_size != blocksize);
1634 			err = get_block(inode, block, bh, 1);
1635 			if (err)
1636 				goto recover;
1637 			if (buffer_new(bh)) {
1638 				/* blockdev mappings never come here */
1639 				clear_buffer_new(bh);
1640 				unmap_underlying_metadata(bh->b_bdev,
1641 							bh->b_blocknr);
1642 			}
1643 		}
1644 		bh = bh->b_this_page;
1645 		block++;
1646 	} while (bh != head);
1647 
1648 	do {
1649 		if (!buffer_mapped(bh))
1650 			continue;
1651 		/*
1652 		 * If it's a fully non-blocking write attempt and we cannot
1653 		 * lock the buffer then redirty the page.  Note that this can
1654 		 * potentially cause a busy-wait loop from pdflush and kswapd
1655 		 * activity, but those code paths have their own higher-level
1656 		 * throttling.
1657 		 */
1658 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1659 			lock_buffer(bh);
1660 		} else if (test_set_buffer_locked(bh)) {
1661 			redirty_page_for_writepage(wbc, page);
1662 			continue;
1663 		}
1664 		if (test_clear_buffer_dirty(bh)) {
1665 			mark_buffer_async_write(bh);
1666 		} else {
1667 			unlock_buffer(bh);
1668 		}
1669 	} while ((bh = bh->b_this_page) != head);
1670 
1671 	/*
1672 	 * The page and its buffers are protected by PageWriteback(), so we can
1673 	 * drop the bh refcounts early.
1674 	 */
1675 	BUG_ON(PageWriteback(page));
1676 	set_page_writeback(page);
1677 
1678 	do {
1679 		struct buffer_head *next = bh->b_this_page;
1680 		if (buffer_async_write(bh)) {
1681 			submit_bh(WRITE, bh);
1682 			nr_underway++;
1683 		}
1684 		bh = next;
1685 	} while (bh != head);
1686 	unlock_page(page);
1687 
1688 	err = 0;
1689 done:
1690 	if (nr_underway == 0) {
1691 		/*
1692 		 * The page was marked dirty, but the buffers were
1693 		 * clean.  Someone wrote them back by hand with
1694 		 * ll_rw_block/submit_bh.  A rare case.
1695 		 */
1696 		end_page_writeback(page);
1697 
1698 		/*
1699 		 * The page and buffer_heads can be released at any time from
1700 		 * here on.
1701 		 */
1702 		wbc->pages_skipped++;	/* We didn't write this page */
1703 	}
1704 	return err;
1705 
1706 recover:
1707 	/*
1708 	 * ENOSPC, or some other error.  We may already have added some
1709 	 * blocks to the file, so we need to write these out to avoid
1710 	 * exposing stale data.
1711 	 * The page is currently locked and not marked for writeback
1712 	 */
1713 	bh = head;
1714 	/* Recovery: lock and submit the mapped buffers */
1715 	do {
1716 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1717 			lock_buffer(bh);
1718 			mark_buffer_async_write(bh);
1719 		} else {
1720 			/*
1721 			 * The buffer may have been set dirty during
1722 			 * attachment to a dirty page.
1723 			 */
1724 			clear_buffer_dirty(bh);
1725 		}
1726 	} while ((bh = bh->b_this_page) != head);
1727 	SetPageError(page);
1728 	BUG_ON(PageWriteback(page));
1729 	mapping_set_error(page->mapping, err);
1730 	set_page_writeback(page);
1731 	do {
1732 		struct buffer_head *next = bh->b_this_page;
1733 		if (buffer_async_write(bh)) {
1734 			clear_buffer_dirty(bh);
1735 			submit_bh(WRITE, bh);
1736 			nr_underway++;
1737 		}
1738 		bh = next;
1739 	} while (bh != head);
1740 	unlock_page(page);
1741 	goto done;
1742 }
1743 
1744 static int __block_prepare_write(struct inode *inode, struct page *page,
1745 		unsigned from, unsigned to, get_block_t *get_block)
1746 {
1747 	unsigned block_start, block_end;
1748 	sector_t block;
1749 	int err = 0;
1750 	unsigned blocksize, bbits;
1751 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1752 
1753 	BUG_ON(!PageLocked(page));
1754 	BUG_ON(from > PAGE_CACHE_SIZE);
1755 	BUG_ON(to > PAGE_CACHE_SIZE);
1756 	BUG_ON(from > to);
1757 
1758 	blocksize = 1 << inode->i_blkbits;
1759 	if (!page_has_buffers(page))
1760 		create_empty_buffers(page, blocksize, 0);
1761 	head = page_buffers(page);
1762 
1763 	bbits = inode->i_blkbits;
1764 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1765 
1766 	for(bh = head, block_start = 0; bh != head || !block_start;
1767 	    block++, block_start=block_end, bh = bh->b_this_page) {
1768 		block_end = block_start + blocksize;
1769 		if (block_end <= from || block_start >= to) {
1770 			if (PageUptodate(page)) {
1771 				if (!buffer_uptodate(bh))
1772 					set_buffer_uptodate(bh);
1773 			}
1774 			continue;
1775 		}
1776 		if (buffer_new(bh))
1777 			clear_buffer_new(bh);
1778 		if (!buffer_mapped(bh)) {
1779 			WARN_ON(bh->b_size != blocksize);
1780 			err = get_block(inode, block, bh, 1);
1781 			if (err)
1782 				break;
1783 			if (buffer_new(bh)) {
1784 				unmap_underlying_metadata(bh->b_bdev,
1785 							bh->b_blocknr);
1786 				if (PageUptodate(page)) {
1787 					set_buffer_uptodate(bh);
1788 					continue;
1789 				}
1790 				if (block_end > to || block_start < from) {
1791 					void *kaddr;
1792 
1793 					kaddr = kmap_atomic(page, KM_USER0);
1794 					if (block_end > to)
1795 						memset(kaddr+to, 0,
1796 							block_end-to);
1797 					if (block_start < from)
1798 						memset(kaddr+block_start,
1799 							0, from-block_start);
1800 					flush_dcache_page(page);
1801 					kunmap_atomic(kaddr, KM_USER0);
1802 				}
1803 				continue;
1804 			}
1805 		}
1806 		if (PageUptodate(page)) {
1807 			if (!buffer_uptodate(bh))
1808 				set_buffer_uptodate(bh);
1809 			continue;
1810 		}
1811 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1812 		    !buffer_unwritten(bh) &&
1813 		     (block_start < from || block_end > to)) {
1814 			ll_rw_block(READ, 1, &bh);
1815 			*wait_bh++=bh;
1816 		}
1817 	}
1818 	/*
1819 	 * If we issued read requests - let them complete.
1820 	 */
1821 	while(wait_bh > wait) {
1822 		wait_on_buffer(*--wait_bh);
1823 		if (!buffer_uptodate(*wait_bh))
1824 			err = -EIO;
1825 	}
1826 	if (!err) {
1827 		bh = head;
1828 		do {
1829 			if (buffer_new(bh))
1830 				clear_buffer_new(bh);
1831 		} while ((bh = bh->b_this_page) != head);
1832 		return 0;
1833 	}
1834 	/* Error case: */
1835 	/*
1836 	 * Zero out any newly allocated blocks to avoid exposing stale
1837 	 * data.  If BH_New is set, we know that the block was newly
1838 	 * allocated in the above loop.
1839 	 */
1840 	bh = head;
1841 	block_start = 0;
1842 	do {
1843 		block_end = block_start+blocksize;
1844 		if (block_end <= from)
1845 			goto next_bh;
1846 		if (block_start >= to)
1847 			break;
1848 		if (buffer_new(bh)) {
1849 			clear_buffer_new(bh);
1850 			zero_user_page(page, block_start, bh->b_size, KM_USER0);
1851 			set_buffer_uptodate(bh);
1852 			mark_buffer_dirty(bh);
1853 		}
1854 next_bh:
1855 		block_start = block_end;
1856 		bh = bh->b_this_page;
1857 	} while (bh != head);
1858 	return err;
1859 }
1860 
1861 static int __block_commit_write(struct inode *inode, struct page *page,
1862 		unsigned from, unsigned to)
1863 {
1864 	unsigned block_start, block_end;
1865 	int partial = 0;
1866 	unsigned blocksize;
1867 	struct buffer_head *bh, *head;
1868 
1869 	blocksize = 1 << inode->i_blkbits;
1870 
1871 	for(bh = head = page_buffers(page), block_start = 0;
1872 	    bh != head || !block_start;
1873 	    block_start=block_end, bh = bh->b_this_page) {
1874 		block_end = block_start + blocksize;
1875 		if (block_end <= from || block_start >= to) {
1876 			if (!buffer_uptodate(bh))
1877 				partial = 1;
1878 		} else {
1879 			set_buffer_uptodate(bh);
1880 			mark_buffer_dirty(bh);
1881 		}
1882 	}
1883 
1884 	/*
1885 	 * If this is a partial write which happened to make all buffers
1886 	 * uptodate then we can optimize away a bogus readpage() for
1887 	 * the next read(). Here we 'discover' whether the page went
1888 	 * uptodate as a result of this (potentially partial) write.
1889 	 */
1890 	if (!partial)
1891 		SetPageUptodate(page);
1892 	return 0;
1893 }
1894 
1895 /*
1896  * Generic "read page" function for block devices that have the normal
1897  * get_block functionality. This is most of the block device filesystems.
1898  * Reads the page asynchronously --- the unlock_buffer() and
1899  * set/clear_buffer_uptodate() functions propagate buffer state into the
1900  * page struct once IO has completed.
1901  */
1902 int block_read_full_page(struct page *page, get_block_t *get_block)
1903 {
1904 	struct inode *inode = page->mapping->host;
1905 	sector_t iblock, lblock;
1906 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1907 	unsigned int blocksize;
1908 	int nr, i;
1909 	int fully_mapped = 1;
1910 
1911 	BUG_ON(!PageLocked(page));
1912 	blocksize = 1 << inode->i_blkbits;
1913 	if (!page_has_buffers(page))
1914 		create_empty_buffers(page, blocksize, 0);
1915 	head = page_buffers(page);
1916 
1917 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1918 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1919 	bh = head;
1920 	nr = 0;
1921 	i = 0;
1922 
1923 	do {
1924 		if (buffer_uptodate(bh))
1925 			continue;
1926 
1927 		if (!buffer_mapped(bh)) {
1928 			int err = 0;
1929 
1930 			fully_mapped = 0;
1931 			if (iblock < lblock) {
1932 				WARN_ON(bh->b_size != blocksize);
1933 				err = get_block(inode, iblock, bh, 0);
1934 				if (err)
1935 					SetPageError(page);
1936 			}
1937 			if (!buffer_mapped(bh)) {
1938 				zero_user_page(page, i * blocksize, blocksize,
1939 						KM_USER0);
1940 				if (!err)
1941 					set_buffer_uptodate(bh);
1942 				continue;
1943 			}
1944 			/*
1945 			 * get_block() might have updated the buffer
1946 			 * synchronously
1947 			 */
1948 			if (buffer_uptodate(bh))
1949 				continue;
1950 		}
1951 		arr[nr++] = bh;
1952 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
1953 
1954 	if (fully_mapped)
1955 		SetPageMappedToDisk(page);
1956 
1957 	if (!nr) {
1958 		/*
1959 		 * All buffers are uptodate - we can set the page uptodate
1960 		 * as well. But not if get_block() returned an error.
1961 		 */
1962 		if (!PageError(page))
1963 			SetPageUptodate(page);
1964 		unlock_page(page);
1965 		return 0;
1966 	}
1967 
1968 	/* Stage two: lock the buffers */
1969 	for (i = 0; i < nr; i++) {
1970 		bh = arr[i];
1971 		lock_buffer(bh);
1972 		mark_buffer_async_read(bh);
1973 	}
1974 
1975 	/*
1976 	 * Stage 3: start the IO.  Check for uptodateness
1977 	 * inside the buffer lock in case another process reading
1978 	 * the underlying blockdev brought it uptodate (the sct fix).
1979 	 */
1980 	for (i = 0; i < nr; i++) {
1981 		bh = arr[i];
1982 		if (buffer_uptodate(bh))
1983 			end_buffer_async_read(bh, 1);
1984 		else
1985 			submit_bh(READ, bh);
1986 	}
1987 	return 0;
1988 }
1989 
1990 /* utility function for filesystems that need to do work on expanding
1991  * truncates.  Uses prepare/commit_write to allow the filesystem to
1992  * deal with the hole.
1993  */
1994 static int __generic_cont_expand(struct inode *inode, loff_t size,
1995 				 pgoff_t index, unsigned int offset)
1996 {
1997 	struct address_space *mapping = inode->i_mapping;
1998 	struct page *page;
1999 	unsigned long limit;
2000 	int err;
2001 
2002 	err = -EFBIG;
2003         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2004 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2005 		send_sig(SIGXFSZ, current, 0);
2006 		goto out;
2007 	}
2008 	if (size > inode->i_sb->s_maxbytes)
2009 		goto out;
2010 
2011 	err = -ENOMEM;
2012 	page = grab_cache_page(mapping, index);
2013 	if (!page)
2014 		goto out;
2015 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2016 	if (err) {
2017 		/*
2018 		 * ->prepare_write() may have instantiated a few blocks
2019 		 * outside i_size.  Trim these off again.
2020 		 */
2021 		unlock_page(page);
2022 		page_cache_release(page);
2023 		vmtruncate(inode, inode->i_size);
2024 		goto out;
2025 	}
2026 
2027 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2028 
2029 	unlock_page(page);
2030 	page_cache_release(page);
2031 	if (err > 0)
2032 		err = 0;
2033 out:
2034 	return err;
2035 }
2036 
2037 int generic_cont_expand(struct inode *inode, loff_t size)
2038 {
2039 	pgoff_t index;
2040 	unsigned int offset;
2041 
2042 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2043 
2044 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
2045 	** skip the prepare.  make sure we never send an offset for the start
2046 	** of a block
2047 	*/
2048 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2049 		/* caller must handle this extra byte. */
2050 		offset++;
2051 	}
2052 	index = size >> PAGE_CACHE_SHIFT;
2053 
2054 	return __generic_cont_expand(inode, size, index, offset);
2055 }
2056 
2057 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2058 {
2059 	loff_t pos = size - 1;
2060 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2061 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2062 
2063 	/* prepare/commit_write can handle even if from==to==start of block. */
2064 	return __generic_cont_expand(inode, size, index, offset);
2065 }
2066 
2067 /*
2068  * For moronic filesystems that do not allow holes in file.
2069  * We may have to extend the file.
2070  */
2071 
2072 int cont_prepare_write(struct page *page, unsigned offset,
2073 		unsigned to, get_block_t *get_block, loff_t *bytes)
2074 {
2075 	struct address_space *mapping = page->mapping;
2076 	struct inode *inode = mapping->host;
2077 	struct page *new_page;
2078 	pgoff_t pgpos;
2079 	long status;
2080 	unsigned zerofrom;
2081 	unsigned blocksize = 1 << inode->i_blkbits;
2082 
2083 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2084 		status = -ENOMEM;
2085 		new_page = grab_cache_page(mapping, pgpos);
2086 		if (!new_page)
2087 			goto out;
2088 		/* we might sleep */
2089 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2090 			unlock_page(new_page);
2091 			page_cache_release(new_page);
2092 			continue;
2093 		}
2094 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2095 		if (zerofrom & (blocksize-1)) {
2096 			*bytes |= (blocksize-1);
2097 			(*bytes)++;
2098 		}
2099 		status = __block_prepare_write(inode, new_page, zerofrom,
2100 						PAGE_CACHE_SIZE, get_block);
2101 		if (status)
2102 			goto out_unmap;
2103 		zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
2104 				KM_USER0);
2105 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2106 		unlock_page(new_page);
2107 		page_cache_release(new_page);
2108 	}
2109 
2110 	if (page->index < pgpos) {
2111 		/* completely inside the area */
2112 		zerofrom = offset;
2113 	} else {
2114 		/* page covers the boundary, find the boundary offset */
2115 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2116 
2117 		/* if we will expand the thing last block will be filled */
2118 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
2119 			*bytes |= (blocksize-1);
2120 			(*bytes)++;
2121 		}
2122 
2123 		/* starting below the boundary? Nothing to zero out */
2124 		if (offset <= zerofrom)
2125 			zerofrom = offset;
2126 	}
2127 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2128 	if (status)
2129 		goto out1;
2130 	if (zerofrom < offset) {
2131 		zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
2132 		__block_commit_write(inode, page, zerofrom, offset);
2133 	}
2134 	return 0;
2135 out1:
2136 	ClearPageUptodate(page);
2137 	return status;
2138 
2139 out_unmap:
2140 	ClearPageUptodate(new_page);
2141 	unlock_page(new_page);
2142 	page_cache_release(new_page);
2143 out:
2144 	return status;
2145 }
2146 
2147 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2148 			get_block_t *get_block)
2149 {
2150 	struct inode *inode = page->mapping->host;
2151 	int err = __block_prepare_write(inode, page, from, to, get_block);
2152 	if (err)
2153 		ClearPageUptodate(page);
2154 	return err;
2155 }
2156 
2157 int block_commit_write(struct page *page, unsigned from, unsigned to)
2158 {
2159 	struct inode *inode = page->mapping->host;
2160 	__block_commit_write(inode,page,from,to);
2161 	return 0;
2162 }
2163 
2164 int generic_commit_write(struct file *file, struct page *page,
2165 		unsigned from, unsigned to)
2166 {
2167 	struct inode *inode = page->mapping->host;
2168 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2169 	__block_commit_write(inode,page,from,to);
2170 	/*
2171 	 * No need to use i_size_read() here, the i_size
2172 	 * cannot change under us because we hold i_mutex.
2173 	 */
2174 	if (pos > inode->i_size) {
2175 		i_size_write(inode, pos);
2176 		mark_inode_dirty(inode);
2177 	}
2178 	return 0;
2179 }
2180 
2181 
2182 /*
2183  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2184  * immediately, while under the page lock.  So it needs a special end_io
2185  * handler which does not touch the bh after unlocking it.
2186  *
2187  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2188  * a race there is benign: unlock_buffer() only use the bh's address for
2189  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2190  * itself.
2191  */
2192 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2193 {
2194 	if (uptodate) {
2195 		set_buffer_uptodate(bh);
2196 	} else {
2197 		/* This happens, due to failed READA attempts. */
2198 		clear_buffer_uptodate(bh);
2199 	}
2200 	unlock_buffer(bh);
2201 }
2202 
2203 /*
2204  * On entry, the page is fully not uptodate.
2205  * On exit the page is fully uptodate in the areas outside (from,to)
2206  */
2207 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2208 			get_block_t *get_block)
2209 {
2210 	struct inode *inode = page->mapping->host;
2211 	const unsigned blkbits = inode->i_blkbits;
2212 	const unsigned blocksize = 1 << blkbits;
2213 	struct buffer_head map_bh;
2214 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2215 	unsigned block_in_page;
2216 	unsigned block_start;
2217 	sector_t block_in_file;
2218 	char *kaddr;
2219 	int nr_reads = 0;
2220 	int i;
2221 	int ret = 0;
2222 	int is_mapped_to_disk = 1;
2223 
2224 	if (PageMappedToDisk(page))
2225 		return 0;
2226 
2227 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2228 	map_bh.b_page = page;
2229 
2230 	/*
2231 	 * We loop across all blocks in the page, whether or not they are
2232 	 * part of the affected region.  This is so we can discover if the
2233 	 * page is fully mapped-to-disk.
2234 	 */
2235 	for (block_start = 0, block_in_page = 0;
2236 		  block_start < PAGE_CACHE_SIZE;
2237 		  block_in_page++, block_start += blocksize) {
2238 		unsigned block_end = block_start + blocksize;
2239 		int create;
2240 
2241 		map_bh.b_state = 0;
2242 		create = 1;
2243 		if (block_start >= to)
2244 			create = 0;
2245 		map_bh.b_size = blocksize;
2246 		ret = get_block(inode, block_in_file + block_in_page,
2247 					&map_bh, create);
2248 		if (ret)
2249 			goto failed;
2250 		if (!buffer_mapped(&map_bh))
2251 			is_mapped_to_disk = 0;
2252 		if (buffer_new(&map_bh))
2253 			unmap_underlying_metadata(map_bh.b_bdev,
2254 							map_bh.b_blocknr);
2255 		if (PageUptodate(page))
2256 			continue;
2257 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2258 			kaddr = kmap_atomic(page, KM_USER0);
2259 			if (block_start < from)
2260 				memset(kaddr+block_start, 0, from-block_start);
2261 			if (block_end > to)
2262 				memset(kaddr + to, 0, block_end - to);
2263 			flush_dcache_page(page);
2264 			kunmap_atomic(kaddr, KM_USER0);
2265 			continue;
2266 		}
2267 		if (buffer_uptodate(&map_bh))
2268 			continue;	/* reiserfs does this */
2269 		if (block_start < from || block_end > to) {
2270 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2271 
2272 			if (!bh) {
2273 				ret = -ENOMEM;
2274 				goto failed;
2275 			}
2276 			bh->b_state = map_bh.b_state;
2277 			atomic_set(&bh->b_count, 0);
2278 			bh->b_this_page = NULL;
2279 			bh->b_page = page;
2280 			bh->b_blocknr = map_bh.b_blocknr;
2281 			bh->b_size = blocksize;
2282 			bh->b_data = (char *)(long)block_start;
2283 			bh->b_bdev = map_bh.b_bdev;
2284 			bh->b_private = NULL;
2285 			read_bh[nr_reads++] = bh;
2286 		}
2287 	}
2288 
2289 	if (nr_reads) {
2290 		struct buffer_head *bh;
2291 
2292 		/*
2293 		 * The page is locked, so these buffers are protected from
2294 		 * any VM or truncate activity.  Hence we don't need to care
2295 		 * for the buffer_head refcounts.
2296 		 */
2297 		for (i = 0; i < nr_reads; i++) {
2298 			bh = read_bh[i];
2299 			lock_buffer(bh);
2300 			bh->b_end_io = end_buffer_read_nobh;
2301 			submit_bh(READ, bh);
2302 		}
2303 		for (i = 0; i < nr_reads; i++) {
2304 			bh = read_bh[i];
2305 			wait_on_buffer(bh);
2306 			if (!buffer_uptodate(bh))
2307 				ret = -EIO;
2308 			free_buffer_head(bh);
2309 			read_bh[i] = NULL;
2310 		}
2311 		if (ret)
2312 			goto failed;
2313 	}
2314 
2315 	if (is_mapped_to_disk)
2316 		SetPageMappedToDisk(page);
2317 
2318 	return 0;
2319 
2320 failed:
2321 	for (i = 0; i < nr_reads; i++) {
2322 		if (read_bh[i])
2323 			free_buffer_head(read_bh[i]);
2324 	}
2325 
2326 	/*
2327 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
2328 	 * so we'll later zero out any blocks which _were_ allocated.
2329 	 */
2330 	zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
2331 	SetPageUptodate(page);
2332 	set_page_dirty(page);
2333 	return ret;
2334 }
2335 EXPORT_SYMBOL(nobh_prepare_write);
2336 
2337 /*
2338  * Make sure any changes to nobh_commit_write() are reflected in
2339  * nobh_truncate_page(), since it doesn't call commit_write().
2340  */
2341 int nobh_commit_write(struct file *file, struct page *page,
2342 		unsigned from, unsigned to)
2343 {
2344 	struct inode *inode = page->mapping->host;
2345 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2346 
2347 	SetPageUptodate(page);
2348 	set_page_dirty(page);
2349 	if (pos > inode->i_size) {
2350 		i_size_write(inode, pos);
2351 		mark_inode_dirty(inode);
2352 	}
2353 	return 0;
2354 }
2355 EXPORT_SYMBOL(nobh_commit_write);
2356 
2357 /*
2358  * nobh_writepage() - based on block_full_write_page() except
2359  * that it tries to operate without attaching bufferheads to
2360  * the page.
2361  */
2362 int nobh_writepage(struct page *page, get_block_t *get_block,
2363 			struct writeback_control *wbc)
2364 {
2365 	struct inode * const inode = page->mapping->host;
2366 	loff_t i_size = i_size_read(inode);
2367 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2368 	unsigned offset;
2369 	int ret;
2370 
2371 	/* Is the page fully inside i_size? */
2372 	if (page->index < end_index)
2373 		goto out;
2374 
2375 	/* Is the page fully outside i_size? (truncate in progress) */
2376 	offset = i_size & (PAGE_CACHE_SIZE-1);
2377 	if (page->index >= end_index+1 || !offset) {
2378 		/*
2379 		 * The page may have dirty, unmapped buffers.  For example,
2380 		 * they may have been added in ext3_writepage().  Make them
2381 		 * freeable here, so the page does not leak.
2382 		 */
2383 #if 0
2384 		/* Not really sure about this  - do we need this ? */
2385 		if (page->mapping->a_ops->invalidatepage)
2386 			page->mapping->a_ops->invalidatepage(page, offset);
2387 #endif
2388 		unlock_page(page);
2389 		return 0; /* don't care */
2390 	}
2391 
2392 	/*
2393 	 * The page straddles i_size.  It must be zeroed out on each and every
2394 	 * writepage invocation because it may be mmapped.  "A file is mapped
2395 	 * in multiples of the page size.  For a file that is not a multiple of
2396 	 * the  page size, the remaining memory is zeroed when mapped, and
2397 	 * writes to that region are not written out to the file."
2398 	 */
2399 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2400 out:
2401 	ret = mpage_writepage(page, get_block, wbc);
2402 	if (ret == -EAGAIN)
2403 		ret = __block_write_full_page(inode, page, get_block, wbc);
2404 	return ret;
2405 }
2406 EXPORT_SYMBOL(nobh_writepage);
2407 
2408 /*
2409  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2410  */
2411 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2412 {
2413 	struct inode *inode = mapping->host;
2414 	unsigned blocksize = 1 << inode->i_blkbits;
2415 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2416 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2417 	unsigned to;
2418 	struct page *page;
2419 	const struct address_space_operations *a_ops = mapping->a_ops;
2420 	int ret = 0;
2421 
2422 	if ((offset & (blocksize - 1)) == 0)
2423 		goto out;
2424 
2425 	ret = -ENOMEM;
2426 	page = grab_cache_page(mapping, index);
2427 	if (!page)
2428 		goto out;
2429 
2430 	to = (offset + blocksize) & ~(blocksize - 1);
2431 	ret = a_ops->prepare_write(NULL, page, offset, to);
2432 	if (ret == 0) {
2433 		zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2434 				KM_USER0);
2435 		/*
2436 		 * It would be more correct to call aops->commit_write()
2437 		 * here, but this is more efficient.
2438 		 */
2439 		SetPageUptodate(page);
2440 		set_page_dirty(page);
2441 	}
2442 	unlock_page(page);
2443 	page_cache_release(page);
2444 out:
2445 	return ret;
2446 }
2447 EXPORT_SYMBOL(nobh_truncate_page);
2448 
2449 int block_truncate_page(struct address_space *mapping,
2450 			loff_t from, get_block_t *get_block)
2451 {
2452 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2453 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2454 	unsigned blocksize;
2455 	sector_t iblock;
2456 	unsigned length, pos;
2457 	struct inode *inode = mapping->host;
2458 	struct page *page;
2459 	struct buffer_head *bh;
2460 	int err;
2461 
2462 	blocksize = 1 << inode->i_blkbits;
2463 	length = offset & (blocksize - 1);
2464 
2465 	/* Block boundary? Nothing to do */
2466 	if (!length)
2467 		return 0;
2468 
2469 	length = blocksize - length;
2470 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2471 
2472 	page = grab_cache_page(mapping, index);
2473 	err = -ENOMEM;
2474 	if (!page)
2475 		goto out;
2476 
2477 	if (!page_has_buffers(page))
2478 		create_empty_buffers(page, blocksize, 0);
2479 
2480 	/* Find the buffer that contains "offset" */
2481 	bh = page_buffers(page);
2482 	pos = blocksize;
2483 	while (offset >= pos) {
2484 		bh = bh->b_this_page;
2485 		iblock++;
2486 		pos += blocksize;
2487 	}
2488 
2489 	err = 0;
2490 	if (!buffer_mapped(bh)) {
2491 		WARN_ON(bh->b_size != blocksize);
2492 		err = get_block(inode, iblock, bh, 0);
2493 		if (err)
2494 			goto unlock;
2495 		/* unmapped? It's a hole - nothing to do */
2496 		if (!buffer_mapped(bh))
2497 			goto unlock;
2498 	}
2499 
2500 	/* Ok, it's mapped. Make sure it's up-to-date */
2501 	if (PageUptodate(page))
2502 		set_buffer_uptodate(bh);
2503 
2504 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2505 		err = -EIO;
2506 		ll_rw_block(READ, 1, &bh);
2507 		wait_on_buffer(bh);
2508 		/* Uhhuh. Read error. Complain and punt. */
2509 		if (!buffer_uptodate(bh))
2510 			goto unlock;
2511 	}
2512 
2513 	zero_user_page(page, offset, length, KM_USER0);
2514 	mark_buffer_dirty(bh);
2515 	err = 0;
2516 
2517 unlock:
2518 	unlock_page(page);
2519 	page_cache_release(page);
2520 out:
2521 	return err;
2522 }
2523 
2524 /*
2525  * The generic ->writepage function for buffer-backed address_spaces
2526  */
2527 int block_write_full_page(struct page *page, get_block_t *get_block,
2528 			struct writeback_control *wbc)
2529 {
2530 	struct inode * const inode = page->mapping->host;
2531 	loff_t i_size = i_size_read(inode);
2532 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2533 	unsigned offset;
2534 
2535 	/* Is the page fully inside i_size? */
2536 	if (page->index < end_index)
2537 		return __block_write_full_page(inode, page, get_block, wbc);
2538 
2539 	/* Is the page fully outside i_size? (truncate in progress) */
2540 	offset = i_size & (PAGE_CACHE_SIZE-1);
2541 	if (page->index >= end_index+1 || !offset) {
2542 		/*
2543 		 * The page may have dirty, unmapped buffers.  For example,
2544 		 * they may have been added in ext3_writepage().  Make them
2545 		 * freeable here, so the page does not leak.
2546 		 */
2547 		do_invalidatepage(page, 0);
2548 		unlock_page(page);
2549 		return 0; /* don't care */
2550 	}
2551 
2552 	/*
2553 	 * The page straddles i_size.  It must be zeroed out on each and every
2554 	 * writepage invokation because it may be mmapped.  "A file is mapped
2555 	 * in multiples of the page size.  For a file that is not a multiple of
2556 	 * the  page size, the remaining memory is zeroed when mapped, and
2557 	 * writes to that region are not written out to the file."
2558 	 */
2559 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2560 	return __block_write_full_page(inode, page, get_block, wbc);
2561 }
2562 
2563 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2564 			    get_block_t *get_block)
2565 {
2566 	struct buffer_head tmp;
2567 	struct inode *inode = mapping->host;
2568 	tmp.b_state = 0;
2569 	tmp.b_blocknr = 0;
2570 	tmp.b_size = 1 << inode->i_blkbits;
2571 	get_block(inode, block, &tmp, 0);
2572 	return tmp.b_blocknr;
2573 }
2574 
2575 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2576 {
2577 	struct buffer_head *bh = bio->bi_private;
2578 
2579 	if (bio->bi_size)
2580 		return 1;
2581 
2582 	if (err == -EOPNOTSUPP) {
2583 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2584 		set_bit(BH_Eopnotsupp, &bh->b_state);
2585 	}
2586 
2587 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2588 	bio_put(bio);
2589 	return 0;
2590 }
2591 
2592 int submit_bh(int rw, struct buffer_head * bh)
2593 {
2594 	struct bio *bio;
2595 	int ret = 0;
2596 
2597 	BUG_ON(!buffer_locked(bh));
2598 	BUG_ON(!buffer_mapped(bh));
2599 	BUG_ON(!bh->b_end_io);
2600 
2601 	if (buffer_ordered(bh) && (rw == WRITE))
2602 		rw = WRITE_BARRIER;
2603 
2604 	/*
2605 	 * Only clear out a write error when rewriting, should this
2606 	 * include WRITE_SYNC as well?
2607 	 */
2608 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2609 		clear_buffer_write_io_error(bh);
2610 
2611 	/*
2612 	 * from here on down, it's all bio -- do the initial mapping,
2613 	 * submit_bio -> generic_make_request may further map this bio around
2614 	 */
2615 	bio = bio_alloc(GFP_NOIO, 1);
2616 
2617 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2618 	bio->bi_bdev = bh->b_bdev;
2619 	bio->bi_io_vec[0].bv_page = bh->b_page;
2620 	bio->bi_io_vec[0].bv_len = bh->b_size;
2621 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2622 
2623 	bio->bi_vcnt = 1;
2624 	bio->bi_idx = 0;
2625 	bio->bi_size = bh->b_size;
2626 
2627 	bio->bi_end_io = end_bio_bh_io_sync;
2628 	bio->bi_private = bh;
2629 
2630 	bio_get(bio);
2631 	submit_bio(rw, bio);
2632 
2633 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2634 		ret = -EOPNOTSUPP;
2635 
2636 	bio_put(bio);
2637 	return ret;
2638 }
2639 
2640 /**
2641  * ll_rw_block: low-level access to block devices (DEPRECATED)
2642  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2643  * @nr: number of &struct buffer_heads in the array
2644  * @bhs: array of pointers to &struct buffer_head
2645  *
2646  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2647  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2648  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2649  * are sent to disk. The fourth %READA option is described in the documentation
2650  * for generic_make_request() which ll_rw_block() calls.
2651  *
2652  * This function drops any buffer that it cannot get a lock on (with the
2653  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2654  * clean when doing a write request, and any buffer that appears to be
2655  * up-to-date when doing read request.  Further it marks as clean buffers that
2656  * are processed for writing (the buffer cache won't assume that they are
2657  * actually clean until the buffer gets unlocked).
2658  *
2659  * ll_rw_block sets b_end_io to simple completion handler that marks
2660  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2661  * any waiters.
2662  *
2663  * All of the buffers must be for the same device, and must also be a
2664  * multiple of the current approved size for the device.
2665  */
2666 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2667 {
2668 	int i;
2669 
2670 	for (i = 0; i < nr; i++) {
2671 		struct buffer_head *bh = bhs[i];
2672 
2673 		if (rw == SWRITE)
2674 			lock_buffer(bh);
2675 		else if (test_set_buffer_locked(bh))
2676 			continue;
2677 
2678 		if (rw == WRITE || rw == SWRITE) {
2679 			if (test_clear_buffer_dirty(bh)) {
2680 				bh->b_end_io = end_buffer_write_sync;
2681 				get_bh(bh);
2682 				submit_bh(WRITE, bh);
2683 				continue;
2684 			}
2685 		} else {
2686 			if (!buffer_uptodate(bh)) {
2687 				bh->b_end_io = end_buffer_read_sync;
2688 				get_bh(bh);
2689 				submit_bh(rw, bh);
2690 				continue;
2691 			}
2692 		}
2693 		unlock_buffer(bh);
2694 	}
2695 }
2696 
2697 /*
2698  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2699  * and then start new I/O and then wait upon it.  The caller must have a ref on
2700  * the buffer_head.
2701  */
2702 int sync_dirty_buffer(struct buffer_head *bh)
2703 {
2704 	int ret = 0;
2705 
2706 	WARN_ON(atomic_read(&bh->b_count) < 1);
2707 	lock_buffer(bh);
2708 	if (test_clear_buffer_dirty(bh)) {
2709 		get_bh(bh);
2710 		bh->b_end_io = end_buffer_write_sync;
2711 		ret = submit_bh(WRITE, bh);
2712 		wait_on_buffer(bh);
2713 		if (buffer_eopnotsupp(bh)) {
2714 			clear_buffer_eopnotsupp(bh);
2715 			ret = -EOPNOTSUPP;
2716 		}
2717 		if (!ret && !buffer_uptodate(bh))
2718 			ret = -EIO;
2719 	} else {
2720 		unlock_buffer(bh);
2721 	}
2722 	return ret;
2723 }
2724 
2725 /*
2726  * try_to_free_buffers() checks if all the buffers on this particular page
2727  * are unused, and releases them if so.
2728  *
2729  * Exclusion against try_to_free_buffers may be obtained by either
2730  * locking the page or by holding its mapping's private_lock.
2731  *
2732  * If the page is dirty but all the buffers are clean then we need to
2733  * be sure to mark the page clean as well.  This is because the page
2734  * may be against a block device, and a later reattachment of buffers
2735  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2736  * filesystem data on the same device.
2737  *
2738  * The same applies to regular filesystem pages: if all the buffers are
2739  * clean then we set the page clean and proceed.  To do that, we require
2740  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2741  * private_lock.
2742  *
2743  * try_to_free_buffers() is non-blocking.
2744  */
2745 static inline int buffer_busy(struct buffer_head *bh)
2746 {
2747 	return atomic_read(&bh->b_count) |
2748 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2749 }
2750 
2751 static int
2752 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2753 {
2754 	struct buffer_head *head = page_buffers(page);
2755 	struct buffer_head *bh;
2756 
2757 	bh = head;
2758 	do {
2759 		if (buffer_write_io_error(bh) && page->mapping)
2760 			set_bit(AS_EIO, &page->mapping->flags);
2761 		if (buffer_busy(bh))
2762 			goto failed;
2763 		bh = bh->b_this_page;
2764 	} while (bh != head);
2765 
2766 	do {
2767 		struct buffer_head *next = bh->b_this_page;
2768 
2769 		if (!list_empty(&bh->b_assoc_buffers))
2770 			__remove_assoc_queue(bh);
2771 		bh = next;
2772 	} while (bh != head);
2773 	*buffers_to_free = head;
2774 	__clear_page_buffers(page);
2775 	return 1;
2776 failed:
2777 	return 0;
2778 }
2779 
2780 int try_to_free_buffers(struct page *page)
2781 {
2782 	struct address_space * const mapping = page->mapping;
2783 	struct buffer_head *buffers_to_free = NULL;
2784 	int ret = 0;
2785 
2786 	BUG_ON(!PageLocked(page));
2787 	if (PageWriteback(page))
2788 		return 0;
2789 
2790 	if (mapping == NULL) {		/* can this still happen? */
2791 		ret = drop_buffers(page, &buffers_to_free);
2792 		goto out;
2793 	}
2794 
2795 	spin_lock(&mapping->private_lock);
2796 	ret = drop_buffers(page, &buffers_to_free);
2797 
2798 	/*
2799 	 * If the filesystem writes its buffers by hand (eg ext3)
2800 	 * then we can have clean buffers against a dirty page.  We
2801 	 * clean the page here; otherwise the VM will never notice
2802 	 * that the filesystem did any IO at all.
2803 	 *
2804 	 * Also, during truncate, discard_buffer will have marked all
2805 	 * the page's buffers clean.  We discover that here and clean
2806 	 * the page also.
2807 	 *
2808 	 * private_lock must be held over this entire operation in order
2809 	 * to synchronise against __set_page_dirty_buffers and prevent the
2810 	 * dirty bit from being lost.
2811 	 */
2812 	if (ret)
2813 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
2814 	spin_unlock(&mapping->private_lock);
2815 out:
2816 	if (buffers_to_free) {
2817 		struct buffer_head *bh = buffers_to_free;
2818 
2819 		do {
2820 			struct buffer_head *next = bh->b_this_page;
2821 			free_buffer_head(bh);
2822 			bh = next;
2823 		} while (bh != buffers_to_free);
2824 	}
2825 	return ret;
2826 }
2827 EXPORT_SYMBOL(try_to_free_buffers);
2828 
2829 void block_sync_page(struct page *page)
2830 {
2831 	struct address_space *mapping;
2832 
2833 	smp_mb();
2834 	mapping = page_mapping(page);
2835 	if (mapping)
2836 		blk_run_backing_dev(mapping->backing_dev_info, page);
2837 }
2838 
2839 /*
2840  * There are no bdflush tunables left.  But distributions are
2841  * still running obsolete flush daemons, so we terminate them here.
2842  *
2843  * Use of bdflush() is deprecated and will be removed in a future kernel.
2844  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2845  */
2846 asmlinkage long sys_bdflush(int func, long data)
2847 {
2848 	static int msg_count;
2849 
2850 	if (!capable(CAP_SYS_ADMIN))
2851 		return -EPERM;
2852 
2853 	if (msg_count < 5) {
2854 		msg_count++;
2855 		printk(KERN_INFO
2856 			"warning: process `%s' used the obsolete bdflush"
2857 			" system call\n", current->comm);
2858 		printk(KERN_INFO "Fix your initscripts?\n");
2859 	}
2860 
2861 	if (func == 1)
2862 		do_exit(0);
2863 	return 0;
2864 }
2865 
2866 /*
2867  * Buffer-head allocation
2868  */
2869 static struct kmem_cache *bh_cachep;
2870 
2871 /*
2872  * Once the number of bh's in the machine exceeds this level, we start
2873  * stripping them in writeback.
2874  */
2875 static int max_buffer_heads;
2876 
2877 int buffer_heads_over_limit;
2878 
2879 struct bh_accounting {
2880 	int nr;			/* Number of live bh's */
2881 	int ratelimit;		/* Limit cacheline bouncing */
2882 };
2883 
2884 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2885 
2886 static void recalc_bh_state(void)
2887 {
2888 	int i;
2889 	int tot = 0;
2890 
2891 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2892 		return;
2893 	__get_cpu_var(bh_accounting).ratelimit = 0;
2894 	for_each_online_cpu(i)
2895 		tot += per_cpu(bh_accounting, i).nr;
2896 	buffer_heads_over_limit = (tot > max_buffer_heads);
2897 }
2898 
2899 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2900 {
2901 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2902 	if (ret) {
2903 		get_cpu_var(bh_accounting).nr++;
2904 		recalc_bh_state();
2905 		put_cpu_var(bh_accounting);
2906 	}
2907 	return ret;
2908 }
2909 EXPORT_SYMBOL(alloc_buffer_head);
2910 
2911 void free_buffer_head(struct buffer_head *bh)
2912 {
2913 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
2914 	kmem_cache_free(bh_cachep, bh);
2915 	get_cpu_var(bh_accounting).nr--;
2916 	recalc_bh_state();
2917 	put_cpu_var(bh_accounting);
2918 }
2919 EXPORT_SYMBOL(free_buffer_head);
2920 
2921 static void
2922 init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2923 {
2924 	if (flags & SLAB_CTOR_CONSTRUCTOR) {
2925 		struct buffer_head * bh = (struct buffer_head *)data;
2926 
2927 		memset(bh, 0, sizeof(*bh));
2928 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
2929 	}
2930 }
2931 
2932 static void buffer_exit_cpu(int cpu)
2933 {
2934 	int i;
2935 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2936 
2937 	for (i = 0; i < BH_LRU_SIZE; i++) {
2938 		brelse(b->bhs[i]);
2939 		b->bhs[i] = NULL;
2940 	}
2941 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2942 	per_cpu(bh_accounting, cpu).nr = 0;
2943 	put_cpu_var(bh_accounting);
2944 }
2945 
2946 static int buffer_cpu_notify(struct notifier_block *self,
2947 			      unsigned long action, void *hcpu)
2948 {
2949 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
2950 		buffer_exit_cpu((unsigned long)hcpu);
2951 	return NOTIFY_OK;
2952 }
2953 
2954 void __init buffer_init(void)
2955 {
2956 	int nrpages;
2957 
2958 	bh_cachep = kmem_cache_create("buffer_head",
2959 					sizeof(struct buffer_head), 0,
2960 					(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2961 					SLAB_MEM_SPREAD),
2962 					init_buffer_head,
2963 					NULL);
2964 
2965 	/*
2966 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
2967 	 */
2968 	nrpages = (nr_free_buffer_pages() * 10) / 100;
2969 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
2970 	hotcpu_notifier(buffer_cpu_notify, 0);
2971 }
2972 
2973 EXPORT_SYMBOL(__bforget);
2974 EXPORT_SYMBOL(__brelse);
2975 EXPORT_SYMBOL(__wait_on_buffer);
2976 EXPORT_SYMBOL(block_commit_write);
2977 EXPORT_SYMBOL(block_prepare_write);
2978 EXPORT_SYMBOL(block_read_full_page);
2979 EXPORT_SYMBOL(block_sync_page);
2980 EXPORT_SYMBOL(block_truncate_page);
2981 EXPORT_SYMBOL(block_write_full_page);
2982 EXPORT_SYMBOL(cont_prepare_write);
2983 EXPORT_SYMBOL(end_buffer_read_sync);
2984 EXPORT_SYMBOL(end_buffer_write_sync);
2985 EXPORT_SYMBOL(file_fsync);
2986 EXPORT_SYMBOL(fsync_bdev);
2987 EXPORT_SYMBOL(generic_block_bmap);
2988 EXPORT_SYMBOL(generic_commit_write);
2989 EXPORT_SYMBOL(generic_cont_expand);
2990 EXPORT_SYMBOL(generic_cont_expand_simple);
2991 EXPORT_SYMBOL(init_buffer);
2992 EXPORT_SYMBOL(invalidate_bdev);
2993 EXPORT_SYMBOL(ll_rw_block);
2994 EXPORT_SYMBOL(mark_buffer_dirty);
2995 EXPORT_SYMBOL(submit_bh);
2996 EXPORT_SYMBOL(sync_dirty_buffer);
2997 EXPORT_SYMBOL(unlock_buffer);
2998