xref: /openbmc/linux/fs/buffer.c (revision 721a9602e6607417c6bc15b18e97a2f35266c690)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56 
57 static int sleep_on_buffer(void *word)
58 {
59 	io_schedule();
60 	return 0;
61 }
62 
63 void __lock_buffer(struct buffer_head *bh)
64 {
65 	wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
66 							TASK_UNINTERRUPTIBLE);
67 }
68 EXPORT_SYMBOL(__lock_buffer);
69 
70 void unlock_buffer(struct buffer_head *bh)
71 {
72 	clear_bit_unlock(BH_Lock, &bh->b_state);
73 	smp_mb__after_clear_bit();
74 	wake_up_bit(&bh->b_state, BH_Lock);
75 }
76 EXPORT_SYMBOL(unlock_buffer);
77 
78 /*
79  * Block until a buffer comes unlocked.  This doesn't stop it
80  * from becoming locked again - you have to lock it yourself
81  * if you want to preserve its state.
82  */
83 void __wait_on_buffer(struct buffer_head * bh)
84 {
85 	wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
86 }
87 EXPORT_SYMBOL(__wait_on_buffer);
88 
89 static void
90 __clear_page_buffers(struct page *page)
91 {
92 	ClearPagePrivate(page);
93 	set_page_private(page, 0);
94 	page_cache_release(page);
95 }
96 
97 
98 static int quiet_error(struct buffer_head *bh)
99 {
100 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
101 		return 0;
102 	return 1;
103 }
104 
105 
106 static void buffer_io_error(struct buffer_head *bh)
107 {
108 	char b[BDEVNAME_SIZE];
109 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 			bdevname(bh->b_bdev, b),
111 			(unsigned long long)bh->b_blocknr);
112 }
113 
114 /*
115  * End-of-IO handler helper function which does not touch the bh after
116  * unlocking it.
117  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
118  * a race there is benign: unlock_buffer() only use the bh's address for
119  * hashing after unlocking the buffer, so it doesn't actually touch the bh
120  * itself.
121  */
122 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
123 {
124 	if (uptodate) {
125 		set_buffer_uptodate(bh);
126 	} else {
127 		/* This happens, due to failed READA attempts. */
128 		clear_buffer_uptodate(bh);
129 	}
130 	unlock_buffer(bh);
131 }
132 
133 /*
134  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
135  * unlock the buffer. This is what ll_rw_block uses too.
136  */
137 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
138 {
139 	__end_buffer_read_notouch(bh, uptodate);
140 	put_bh(bh);
141 }
142 EXPORT_SYMBOL(end_buffer_read_sync);
143 
144 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
145 {
146 	char b[BDEVNAME_SIZE];
147 
148 	if (uptodate) {
149 		set_buffer_uptodate(bh);
150 	} else {
151 		if (!quiet_error(bh)) {
152 			buffer_io_error(bh);
153 			printk(KERN_WARNING "lost page write due to "
154 					"I/O error on %s\n",
155 				       bdevname(bh->b_bdev, b));
156 		}
157 		set_buffer_write_io_error(bh);
158 		clear_buffer_uptodate(bh);
159 	}
160 	unlock_buffer(bh);
161 	put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_write_sync);
164 
165 /*
166  * Various filesystems appear to want __find_get_block to be non-blocking.
167  * But it's the page lock which protects the buffers.  To get around this,
168  * we get exclusion from try_to_free_buffers with the blockdev mapping's
169  * private_lock.
170  *
171  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
172  * may be quite high.  This code could TryLock the page, and if that
173  * succeeds, there is no need to take private_lock. (But if
174  * private_lock is contended then so is mapping->tree_lock).
175  */
176 static struct buffer_head *
177 __find_get_block_slow(struct block_device *bdev, sector_t block)
178 {
179 	struct inode *bd_inode = bdev->bd_inode;
180 	struct address_space *bd_mapping = bd_inode->i_mapping;
181 	struct buffer_head *ret = NULL;
182 	pgoff_t index;
183 	struct buffer_head *bh;
184 	struct buffer_head *head;
185 	struct page *page;
186 	int all_mapped = 1;
187 
188 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
189 	page = find_get_page(bd_mapping, index);
190 	if (!page)
191 		goto out;
192 
193 	spin_lock(&bd_mapping->private_lock);
194 	if (!page_has_buffers(page))
195 		goto out_unlock;
196 	head = page_buffers(page);
197 	bh = head;
198 	do {
199 		if (!buffer_mapped(bh))
200 			all_mapped = 0;
201 		else if (bh->b_blocknr == block) {
202 			ret = bh;
203 			get_bh(bh);
204 			goto out_unlock;
205 		}
206 		bh = bh->b_this_page;
207 	} while (bh != head);
208 
209 	/* we might be here because some of the buffers on this page are
210 	 * not mapped.  This is due to various races between
211 	 * file io on the block device and getblk.  It gets dealt with
212 	 * elsewhere, don't buffer_error if we had some unmapped buffers
213 	 */
214 	if (all_mapped) {
215 		printk("__find_get_block_slow() failed. "
216 			"block=%llu, b_blocknr=%llu\n",
217 			(unsigned long long)block,
218 			(unsigned long long)bh->b_blocknr);
219 		printk("b_state=0x%08lx, b_size=%zu\n",
220 			bh->b_state, bh->b_size);
221 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
222 	}
223 out_unlock:
224 	spin_unlock(&bd_mapping->private_lock);
225 	page_cache_release(page);
226 out:
227 	return ret;
228 }
229 
230 /* If invalidate_buffers() will trash dirty buffers, it means some kind
231    of fs corruption is going on. Trashing dirty data always imply losing
232    information that was supposed to be just stored on the physical layer
233    by the user.
234 
235    Thus invalidate_buffers in general usage is not allwowed to trash
236    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
237    be preserved.  These buffers are simply skipped.
238 
239    We also skip buffers which are still in use.  For example this can
240    happen if a userspace program is reading the block device.
241 
242    NOTE: In the case where the user removed a removable-media-disk even if
243    there's still dirty data not synced on disk (due a bug in the device driver
244    or due an error of the user), by not destroying the dirty buffers we could
245    generate corruption also on the next media inserted, thus a parameter is
246    necessary to handle this case in the most safe way possible (trying
247    to not corrupt also the new disk inserted with the data belonging to
248    the old now corrupted disk). Also for the ramdisk the natural thing
249    to do in order to release the ramdisk memory is to destroy dirty buffers.
250 
251    These are two special cases. Normal usage imply the device driver
252    to issue a sync on the device (without waiting I/O completion) and
253    then an invalidate_buffers call that doesn't trash dirty buffers.
254 
255    For handling cache coherency with the blkdev pagecache the 'update' case
256    is been introduced. It is needed to re-read from disk any pinned
257    buffer. NOTE: re-reading from disk is destructive so we can do it only
258    when we assume nobody is changing the buffercache under our I/O and when
259    we think the disk contains more recent information than the buffercache.
260    The update == 1 pass marks the buffers we need to update, the update == 2
261    pass does the actual I/O. */
262 void invalidate_bdev(struct block_device *bdev)
263 {
264 	struct address_space *mapping = bdev->bd_inode->i_mapping;
265 
266 	if (mapping->nrpages == 0)
267 		return;
268 
269 	invalidate_bh_lrus();
270 	lru_add_drain_all();	/* make sure all lru add caches are flushed */
271 	invalidate_mapping_pages(mapping, 0, -1);
272 }
273 EXPORT_SYMBOL(invalidate_bdev);
274 
275 /*
276  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
277  */
278 static void free_more_memory(void)
279 {
280 	struct zone *zone;
281 	int nid;
282 
283 	wakeup_flusher_threads(1024);
284 	yield();
285 
286 	for_each_online_node(nid) {
287 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
288 						gfp_zone(GFP_NOFS), NULL,
289 						&zone);
290 		if (zone)
291 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
292 						GFP_NOFS, NULL);
293 	}
294 }
295 
296 /*
297  * I/O completion handler for block_read_full_page() - pages
298  * which come unlocked at the end of I/O.
299  */
300 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
301 {
302 	unsigned long flags;
303 	struct buffer_head *first;
304 	struct buffer_head *tmp;
305 	struct page *page;
306 	int page_uptodate = 1;
307 
308 	BUG_ON(!buffer_async_read(bh));
309 
310 	page = bh->b_page;
311 	if (uptodate) {
312 		set_buffer_uptodate(bh);
313 	} else {
314 		clear_buffer_uptodate(bh);
315 		if (!quiet_error(bh))
316 			buffer_io_error(bh);
317 		SetPageError(page);
318 	}
319 
320 	/*
321 	 * Be _very_ careful from here on. Bad things can happen if
322 	 * two buffer heads end IO at almost the same time and both
323 	 * decide that the page is now completely done.
324 	 */
325 	first = page_buffers(page);
326 	local_irq_save(flags);
327 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
328 	clear_buffer_async_read(bh);
329 	unlock_buffer(bh);
330 	tmp = bh;
331 	do {
332 		if (!buffer_uptodate(tmp))
333 			page_uptodate = 0;
334 		if (buffer_async_read(tmp)) {
335 			BUG_ON(!buffer_locked(tmp));
336 			goto still_busy;
337 		}
338 		tmp = tmp->b_this_page;
339 	} while (tmp != bh);
340 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
341 	local_irq_restore(flags);
342 
343 	/*
344 	 * If none of the buffers had errors and they are all
345 	 * uptodate then we can set the page uptodate.
346 	 */
347 	if (page_uptodate && !PageError(page))
348 		SetPageUptodate(page);
349 	unlock_page(page);
350 	return;
351 
352 still_busy:
353 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
354 	local_irq_restore(flags);
355 	return;
356 }
357 
358 /*
359  * Completion handler for block_write_full_page() - pages which are unlocked
360  * during I/O, and which have PageWriteback cleared upon I/O completion.
361  */
362 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
363 {
364 	char b[BDEVNAME_SIZE];
365 	unsigned long flags;
366 	struct buffer_head *first;
367 	struct buffer_head *tmp;
368 	struct page *page;
369 
370 	BUG_ON(!buffer_async_write(bh));
371 
372 	page = bh->b_page;
373 	if (uptodate) {
374 		set_buffer_uptodate(bh);
375 	} else {
376 		if (!quiet_error(bh)) {
377 			buffer_io_error(bh);
378 			printk(KERN_WARNING "lost page write due to "
379 					"I/O error on %s\n",
380 			       bdevname(bh->b_bdev, b));
381 		}
382 		set_bit(AS_EIO, &page->mapping->flags);
383 		set_buffer_write_io_error(bh);
384 		clear_buffer_uptodate(bh);
385 		SetPageError(page);
386 	}
387 
388 	first = page_buffers(page);
389 	local_irq_save(flags);
390 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
391 
392 	clear_buffer_async_write(bh);
393 	unlock_buffer(bh);
394 	tmp = bh->b_this_page;
395 	while (tmp != bh) {
396 		if (buffer_async_write(tmp)) {
397 			BUG_ON(!buffer_locked(tmp));
398 			goto still_busy;
399 		}
400 		tmp = tmp->b_this_page;
401 	}
402 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
403 	local_irq_restore(flags);
404 	end_page_writeback(page);
405 	return;
406 
407 still_busy:
408 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
409 	local_irq_restore(flags);
410 	return;
411 }
412 EXPORT_SYMBOL(end_buffer_async_write);
413 
414 /*
415  * If a page's buffers are under async readin (end_buffer_async_read
416  * completion) then there is a possibility that another thread of
417  * control could lock one of the buffers after it has completed
418  * but while some of the other buffers have not completed.  This
419  * locked buffer would confuse end_buffer_async_read() into not unlocking
420  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
421  * that this buffer is not under async I/O.
422  *
423  * The page comes unlocked when it has no locked buffer_async buffers
424  * left.
425  *
426  * PageLocked prevents anyone starting new async I/O reads any of
427  * the buffers.
428  *
429  * PageWriteback is used to prevent simultaneous writeout of the same
430  * page.
431  *
432  * PageLocked prevents anyone from starting writeback of a page which is
433  * under read I/O (PageWriteback is only ever set against a locked page).
434  */
435 static void mark_buffer_async_read(struct buffer_head *bh)
436 {
437 	bh->b_end_io = end_buffer_async_read;
438 	set_buffer_async_read(bh);
439 }
440 
441 static void mark_buffer_async_write_endio(struct buffer_head *bh,
442 					  bh_end_io_t *handler)
443 {
444 	bh->b_end_io = handler;
445 	set_buffer_async_write(bh);
446 }
447 
448 void mark_buffer_async_write(struct buffer_head *bh)
449 {
450 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
451 }
452 EXPORT_SYMBOL(mark_buffer_async_write);
453 
454 
455 /*
456  * fs/buffer.c contains helper functions for buffer-backed address space's
457  * fsync functions.  A common requirement for buffer-based filesystems is
458  * that certain data from the backing blockdev needs to be written out for
459  * a successful fsync().  For example, ext2 indirect blocks need to be
460  * written back and waited upon before fsync() returns.
461  *
462  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464  * management of a list of dependent buffers at ->i_mapping->private_list.
465  *
466  * Locking is a little subtle: try_to_free_buffers() will remove buffers
467  * from their controlling inode's queue when they are being freed.  But
468  * try_to_free_buffers() will be operating against the *blockdev* mapping
469  * at the time, not against the S_ISREG file which depends on those buffers.
470  * So the locking for private_list is via the private_lock in the address_space
471  * which backs the buffers.  Which is different from the address_space
472  * against which the buffers are listed.  So for a particular address_space,
473  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
474  * mapping->private_list will always be protected by the backing blockdev's
475  * ->private_lock.
476  *
477  * Which introduces a requirement: all buffers on an address_space's
478  * ->private_list must be from the same address_space: the blockdev's.
479  *
480  * address_spaces which do not place buffers at ->private_list via these
481  * utility functions are free to use private_lock and private_list for
482  * whatever they want.  The only requirement is that list_empty(private_list)
483  * be true at clear_inode() time.
484  *
485  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
486  * filesystems should do that.  invalidate_inode_buffers() should just go
487  * BUG_ON(!list_empty).
488  *
489  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
490  * take an address_space, not an inode.  And it should be called
491  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
492  * queued up.
493  *
494  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495  * list if it is already on a list.  Because if the buffer is on a list,
496  * it *must* already be on the right one.  If not, the filesystem is being
497  * silly.  This will save a ton of locking.  But first we have to ensure
498  * that buffers are taken *off* the old inode's list when they are freed
499  * (presumably in truncate).  That requires careful auditing of all
500  * filesystems (do it inside bforget()).  It could also be done by bringing
501  * b_inode back.
502  */
503 
504 /*
505  * The buffer's backing address_space's private_lock must be held
506  */
507 static void __remove_assoc_queue(struct buffer_head *bh)
508 {
509 	list_del_init(&bh->b_assoc_buffers);
510 	WARN_ON(!bh->b_assoc_map);
511 	if (buffer_write_io_error(bh))
512 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 	bh->b_assoc_map = NULL;
514 }
515 
516 int inode_has_buffers(struct inode *inode)
517 {
518 	return !list_empty(&inode->i_data.private_list);
519 }
520 
521 /*
522  * osync is designed to support O_SYNC io.  It waits synchronously for
523  * all already-submitted IO to complete, but does not queue any new
524  * writes to the disk.
525  *
526  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527  * you dirty the buffers, and then use osync_inode_buffers to wait for
528  * completion.  Any other dirty buffers which are not yet queued for
529  * write will not be flushed to disk by the osync.
530  */
531 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
532 {
533 	struct buffer_head *bh;
534 	struct list_head *p;
535 	int err = 0;
536 
537 	spin_lock(lock);
538 repeat:
539 	list_for_each_prev(p, list) {
540 		bh = BH_ENTRY(p);
541 		if (buffer_locked(bh)) {
542 			get_bh(bh);
543 			spin_unlock(lock);
544 			wait_on_buffer(bh);
545 			if (!buffer_uptodate(bh))
546 				err = -EIO;
547 			brelse(bh);
548 			spin_lock(lock);
549 			goto repeat;
550 		}
551 	}
552 	spin_unlock(lock);
553 	return err;
554 }
555 
556 static void do_thaw_one(struct super_block *sb, void *unused)
557 {
558 	char b[BDEVNAME_SIZE];
559 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
560 		printk(KERN_WARNING "Emergency Thaw on %s\n",
561 		       bdevname(sb->s_bdev, b));
562 }
563 
564 static void do_thaw_all(struct work_struct *work)
565 {
566 	iterate_supers(do_thaw_one, NULL);
567 	kfree(work);
568 	printk(KERN_WARNING "Emergency Thaw complete\n");
569 }
570 
571 /**
572  * emergency_thaw_all -- forcibly thaw every frozen filesystem
573  *
574  * Used for emergency unfreeze of all filesystems via SysRq
575  */
576 void emergency_thaw_all(void)
577 {
578 	struct work_struct *work;
579 
580 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
581 	if (work) {
582 		INIT_WORK(work, do_thaw_all);
583 		schedule_work(work);
584 	}
585 }
586 
587 /**
588  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
589  * @mapping: the mapping which wants those buffers written
590  *
591  * Starts I/O against the buffers at mapping->private_list, and waits upon
592  * that I/O.
593  *
594  * Basically, this is a convenience function for fsync().
595  * @mapping is a file or directory which needs those buffers to be written for
596  * a successful fsync().
597  */
598 int sync_mapping_buffers(struct address_space *mapping)
599 {
600 	struct address_space *buffer_mapping = mapping->assoc_mapping;
601 
602 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
603 		return 0;
604 
605 	return fsync_buffers_list(&buffer_mapping->private_lock,
606 					&mapping->private_list);
607 }
608 EXPORT_SYMBOL(sync_mapping_buffers);
609 
610 /*
611  * Called when we've recently written block `bblock', and it is known that
612  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
613  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
614  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
615  */
616 void write_boundary_block(struct block_device *bdev,
617 			sector_t bblock, unsigned blocksize)
618 {
619 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
620 	if (bh) {
621 		if (buffer_dirty(bh))
622 			ll_rw_block(WRITE, 1, &bh);
623 		put_bh(bh);
624 	}
625 }
626 
627 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
628 {
629 	struct address_space *mapping = inode->i_mapping;
630 	struct address_space *buffer_mapping = bh->b_page->mapping;
631 
632 	mark_buffer_dirty(bh);
633 	if (!mapping->assoc_mapping) {
634 		mapping->assoc_mapping = buffer_mapping;
635 	} else {
636 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
637 	}
638 	if (!bh->b_assoc_map) {
639 		spin_lock(&buffer_mapping->private_lock);
640 		list_move_tail(&bh->b_assoc_buffers,
641 				&mapping->private_list);
642 		bh->b_assoc_map = mapping;
643 		spin_unlock(&buffer_mapping->private_lock);
644 	}
645 }
646 EXPORT_SYMBOL(mark_buffer_dirty_inode);
647 
648 /*
649  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
650  * dirty.
651  *
652  * If warn is true, then emit a warning if the page is not uptodate and has
653  * not been truncated.
654  */
655 static void __set_page_dirty(struct page *page,
656 		struct address_space *mapping, int warn)
657 {
658 	spin_lock_irq(&mapping->tree_lock);
659 	if (page->mapping) {	/* Race with truncate? */
660 		WARN_ON_ONCE(warn && !PageUptodate(page));
661 		account_page_dirtied(page, mapping);
662 		radix_tree_tag_set(&mapping->page_tree,
663 				page_index(page), PAGECACHE_TAG_DIRTY);
664 	}
665 	spin_unlock_irq(&mapping->tree_lock);
666 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
667 }
668 
669 /*
670  * Add a page to the dirty page list.
671  *
672  * It is a sad fact of life that this function is called from several places
673  * deeply under spinlocking.  It may not sleep.
674  *
675  * If the page has buffers, the uptodate buffers are set dirty, to preserve
676  * dirty-state coherency between the page and the buffers.  It the page does
677  * not have buffers then when they are later attached they will all be set
678  * dirty.
679  *
680  * The buffers are dirtied before the page is dirtied.  There's a small race
681  * window in which a writepage caller may see the page cleanness but not the
682  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
683  * before the buffers, a concurrent writepage caller could clear the page dirty
684  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
685  * page on the dirty page list.
686  *
687  * We use private_lock to lock against try_to_free_buffers while using the
688  * page's buffer list.  Also use this to protect against clean buffers being
689  * added to the page after it was set dirty.
690  *
691  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
692  * address_space though.
693  */
694 int __set_page_dirty_buffers(struct page *page)
695 {
696 	int newly_dirty;
697 	struct address_space *mapping = page_mapping(page);
698 
699 	if (unlikely(!mapping))
700 		return !TestSetPageDirty(page);
701 
702 	spin_lock(&mapping->private_lock);
703 	if (page_has_buffers(page)) {
704 		struct buffer_head *head = page_buffers(page);
705 		struct buffer_head *bh = head;
706 
707 		do {
708 			set_buffer_dirty(bh);
709 			bh = bh->b_this_page;
710 		} while (bh != head);
711 	}
712 	newly_dirty = !TestSetPageDirty(page);
713 	spin_unlock(&mapping->private_lock);
714 
715 	if (newly_dirty)
716 		__set_page_dirty(page, mapping, 1);
717 	return newly_dirty;
718 }
719 EXPORT_SYMBOL(__set_page_dirty_buffers);
720 
721 /*
722  * Write out and wait upon a list of buffers.
723  *
724  * We have conflicting pressures: we want to make sure that all
725  * initially dirty buffers get waited on, but that any subsequently
726  * dirtied buffers don't.  After all, we don't want fsync to last
727  * forever if somebody is actively writing to the file.
728  *
729  * Do this in two main stages: first we copy dirty buffers to a
730  * temporary inode list, queueing the writes as we go.  Then we clean
731  * up, waiting for those writes to complete.
732  *
733  * During this second stage, any subsequent updates to the file may end
734  * up refiling the buffer on the original inode's dirty list again, so
735  * there is a chance we will end up with a buffer queued for write but
736  * not yet completed on that list.  So, as a final cleanup we go through
737  * the osync code to catch these locked, dirty buffers without requeuing
738  * any newly dirty buffers for write.
739  */
740 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
741 {
742 	struct buffer_head *bh;
743 	struct list_head tmp;
744 	struct address_space *mapping;
745 	int err = 0, err2;
746 
747 	INIT_LIST_HEAD(&tmp);
748 
749 	spin_lock(lock);
750 	while (!list_empty(list)) {
751 		bh = BH_ENTRY(list->next);
752 		mapping = bh->b_assoc_map;
753 		__remove_assoc_queue(bh);
754 		/* Avoid race with mark_buffer_dirty_inode() which does
755 		 * a lockless check and we rely on seeing the dirty bit */
756 		smp_mb();
757 		if (buffer_dirty(bh) || buffer_locked(bh)) {
758 			list_add(&bh->b_assoc_buffers, &tmp);
759 			bh->b_assoc_map = mapping;
760 			if (buffer_dirty(bh)) {
761 				get_bh(bh);
762 				spin_unlock(lock);
763 				/*
764 				 * Ensure any pending I/O completes so that
765 				 * write_dirty_buffer() actually writes the
766 				 * current contents - it is a noop if I/O is
767 				 * still in flight on potentially older
768 				 * contents.
769 				 */
770 				write_dirty_buffer(bh, WRITE_SYNC);
771 
772 				/*
773 				 * Kick off IO for the previous mapping. Note
774 				 * that we will not run the very last mapping,
775 				 * wait_on_buffer() will do that for us
776 				 * through sync_buffer().
777 				 */
778 				brelse(bh);
779 				spin_lock(lock);
780 			}
781 		}
782 	}
783 
784 	while (!list_empty(&tmp)) {
785 		bh = BH_ENTRY(tmp.prev);
786 		get_bh(bh);
787 		mapping = bh->b_assoc_map;
788 		__remove_assoc_queue(bh);
789 		/* Avoid race with mark_buffer_dirty_inode() which does
790 		 * a lockless check and we rely on seeing the dirty bit */
791 		smp_mb();
792 		if (buffer_dirty(bh)) {
793 			list_add(&bh->b_assoc_buffers,
794 				 &mapping->private_list);
795 			bh->b_assoc_map = mapping;
796 		}
797 		spin_unlock(lock);
798 		wait_on_buffer(bh);
799 		if (!buffer_uptodate(bh))
800 			err = -EIO;
801 		brelse(bh);
802 		spin_lock(lock);
803 	}
804 
805 	spin_unlock(lock);
806 	err2 = osync_buffers_list(lock, list);
807 	if (err)
808 		return err;
809 	else
810 		return err2;
811 }
812 
813 /*
814  * Invalidate any and all dirty buffers on a given inode.  We are
815  * probably unmounting the fs, but that doesn't mean we have already
816  * done a sync().  Just drop the buffers from the inode list.
817  *
818  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
819  * assumes that all the buffers are against the blockdev.  Not true
820  * for reiserfs.
821  */
822 void invalidate_inode_buffers(struct inode *inode)
823 {
824 	if (inode_has_buffers(inode)) {
825 		struct address_space *mapping = &inode->i_data;
826 		struct list_head *list = &mapping->private_list;
827 		struct address_space *buffer_mapping = mapping->assoc_mapping;
828 
829 		spin_lock(&buffer_mapping->private_lock);
830 		while (!list_empty(list))
831 			__remove_assoc_queue(BH_ENTRY(list->next));
832 		spin_unlock(&buffer_mapping->private_lock);
833 	}
834 }
835 EXPORT_SYMBOL(invalidate_inode_buffers);
836 
837 /*
838  * Remove any clean buffers from the inode's buffer list.  This is called
839  * when we're trying to free the inode itself.  Those buffers can pin it.
840  *
841  * Returns true if all buffers were removed.
842  */
843 int remove_inode_buffers(struct inode *inode)
844 {
845 	int ret = 1;
846 
847 	if (inode_has_buffers(inode)) {
848 		struct address_space *mapping = &inode->i_data;
849 		struct list_head *list = &mapping->private_list;
850 		struct address_space *buffer_mapping = mapping->assoc_mapping;
851 
852 		spin_lock(&buffer_mapping->private_lock);
853 		while (!list_empty(list)) {
854 			struct buffer_head *bh = BH_ENTRY(list->next);
855 			if (buffer_dirty(bh)) {
856 				ret = 0;
857 				break;
858 			}
859 			__remove_assoc_queue(bh);
860 		}
861 		spin_unlock(&buffer_mapping->private_lock);
862 	}
863 	return ret;
864 }
865 
866 /*
867  * Create the appropriate buffers when given a page for data area and
868  * the size of each buffer.. Use the bh->b_this_page linked list to
869  * follow the buffers created.  Return NULL if unable to create more
870  * buffers.
871  *
872  * The retry flag is used to differentiate async IO (paging, swapping)
873  * which may not fail from ordinary buffer allocations.
874  */
875 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
876 		int retry)
877 {
878 	struct buffer_head *bh, *head;
879 	long offset;
880 
881 try_again:
882 	head = NULL;
883 	offset = PAGE_SIZE;
884 	while ((offset -= size) >= 0) {
885 		bh = alloc_buffer_head(GFP_NOFS);
886 		if (!bh)
887 			goto no_grow;
888 
889 		bh->b_bdev = NULL;
890 		bh->b_this_page = head;
891 		bh->b_blocknr = -1;
892 		head = bh;
893 
894 		bh->b_state = 0;
895 		atomic_set(&bh->b_count, 0);
896 		bh->b_size = size;
897 
898 		/* Link the buffer to its page */
899 		set_bh_page(bh, page, offset);
900 
901 		init_buffer(bh, NULL, NULL);
902 	}
903 	return head;
904 /*
905  * In case anything failed, we just free everything we got.
906  */
907 no_grow:
908 	if (head) {
909 		do {
910 			bh = head;
911 			head = head->b_this_page;
912 			free_buffer_head(bh);
913 		} while (head);
914 	}
915 
916 	/*
917 	 * Return failure for non-async IO requests.  Async IO requests
918 	 * are not allowed to fail, so we have to wait until buffer heads
919 	 * become available.  But we don't want tasks sleeping with
920 	 * partially complete buffers, so all were released above.
921 	 */
922 	if (!retry)
923 		return NULL;
924 
925 	/* We're _really_ low on memory. Now we just
926 	 * wait for old buffer heads to become free due to
927 	 * finishing IO.  Since this is an async request and
928 	 * the reserve list is empty, we're sure there are
929 	 * async buffer heads in use.
930 	 */
931 	free_more_memory();
932 	goto try_again;
933 }
934 EXPORT_SYMBOL_GPL(alloc_page_buffers);
935 
936 static inline void
937 link_dev_buffers(struct page *page, struct buffer_head *head)
938 {
939 	struct buffer_head *bh, *tail;
940 
941 	bh = head;
942 	do {
943 		tail = bh;
944 		bh = bh->b_this_page;
945 	} while (bh);
946 	tail->b_this_page = head;
947 	attach_page_buffers(page, head);
948 }
949 
950 /*
951  * Initialise the state of a blockdev page's buffers.
952  */
953 static void
954 init_page_buffers(struct page *page, struct block_device *bdev,
955 			sector_t block, int size)
956 {
957 	struct buffer_head *head = page_buffers(page);
958 	struct buffer_head *bh = head;
959 	int uptodate = PageUptodate(page);
960 
961 	do {
962 		if (!buffer_mapped(bh)) {
963 			init_buffer(bh, NULL, NULL);
964 			bh->b_bdev = bdev;
965 			bh->b_blocknr = block;
966 			if (uptodate)
967 				set_buffer_uptodate(bh);
968 			set_buffer_mapped(bh);
969 		}
970 		block++;
971 		bh = bh->b_this_page;
972 	} while (bh != head);
973 }
974 
975 /*
976  * Create the page-cache page that contains the requested block.
977  *
978  * This is user purely for blockdev mappings.
979  */
980 static struct page *
981 grow_dev_page(struct block_device *bdev, sector_t block,
982 		pgoff_t index, int size)
983 {
984 	struct inode *inode = bdev->bd_inode;
985 	struct page *page;
986 	struct buffer_head *bh;
987 
988 	page = find_or_create_page(inode->i_mapping, index,
989 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
990 	if (!page)
991 		return NULL;
992 
993 	BUG_ON(!PageLocked(page));
994 
995 	if (page_has_buffers(page)) {
996 		bh = page_buffers(page);
997 		if (bh->b_size == size) {
998 			init_page_buffers(page, bdev, block, size);
999 			return page;
1000 		}
1001 		if (!try_to_free_buffers(page))
1002 			goto failed;
1003 	}
1004 
1005 	/*
1006 	 * Allocate some buffers for this page
1007 	 */
1008 	bh = alloc_page_buffers(page, size, 0);
1009 	if (!bh)
1010 		goto failed;
1011 
1012 	/*
1013 	 * Link the page to the buffers and initialise them.  Take the
1014 	 * lock to be atomic wrt __find_get_block(), which does not
1015 	 * run under the page lock.
1016 	 */
1017 	spin_lock(&inode->i_mapping->private_lock);
1018 	link_dev_buffers(page, bh);
1019 	init_page_buffers(page, bdev, block, size);
1020 	spin_unlock(&inode->i_mapping->private_lock);
1021 	return page;
1022 
1023 failed:
1024 	BUG();
1025 	unlock_page(page);
1026 	page_cache_release(page);
1027 	return NULL;
1028 }
1029 
1030 /*
1031  * Create buffers for the specified block device block's page.  If
1032  * that page was dirty, the buffers are set dirty also.
1033  */
1034 static int
1035 grow_buffers(struct block_device *bdev, sector_t block, int size)
1036 {
1037 	struct page *page;
1038 	pgoff_t index;
1039 	int sizebits;
1040 
1041 	sizebits = -1;
1042 	do {
1043 		sizebits++;
1044 	} while ((size << sizebits) < PAGE_SIZE);
1045 
1046 	index = block >> sizebits;
1047 
1048 	/*
1049 	 * Check for a block which wants to lie outside our maximum possible
1050 	 * pagecache index.  (this comparison is done using sector_t types).
1051 	 */
1052 	if (unlikely(index != block >> sizebits)) {
1053 		char b[BDEVNAME_SIZE];
1054 
1055 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056 			"device %s\n",
1057 			__func__, (unsigned long long)block,
1058 			bdevname(bdev, b));
1059 		return -EIO;
1060 	}
1061 	block = index << sizebits;
1062 	/* Create a page with the proper size buffers.. */
1063 	page = grow_dev_page(bdev, block, index, size);
1064 	if (!page)
1065 		return 0;
1066 	unlock_page(page);
1067 	page_cache_release(page);
1068 	return 1;
1069 }
1070 
1071 static struct buffer_head *
1072 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1073 {
1074 	/* Size must be multiple of hard sectorsize */
1075 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1076 			(size < 512 || size > PAGE_SIZE))) {
1077 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078 					size);
1079 		printk(KERN_ERR "logical block size: %d\n",
1080 					bdev_logical_block_size(bdev));
1081 
1082 		dump_stack();
1083 		return NULL;
1084 	}
1085 
1086 	for (;;) {
1087 		struct buffer_head * bh;
1088 		int ret;
1089 
1090 		bh = __find_get_block(bdev, block, size);
1091 		if (bh)
1092 			return bh;
1093 
1094 		ret = grow_buffers(bdev, block, size);
1095 		if (ret < 0)
1096 			return NULL;
1097 		if (ret == 0)
1098 			free_more_memory();
1099 	}
1100 }
1101 
1102 /*
1103  * The relationship between dirty buffers and dirty pages:
1104  *
1105  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106  * the page is tagged dirty in its radix tree.
1107  *
1108  * At all times, the dirtiness of the buffers represents the dirtiness of
1109  * subsections of the page.  If the page has buffers, the page dirty bit is
1110  * merely a hint about the true dirty state.
1111  *
1112  * When a page is set dirty in its entirety, all its buffers are marked dirty
1113  * (if the page has buffers).
1114  *
1115  * When a buffer is marked dirty, its page is dirtied, but the page's other
1116  * buffers are not.
1117  *
1118  * Also.  When blockdev buffers are explicitly read with bread(), they
1119  * individually become uptodate.  But their backing page remains not
1120  * uptodate - even if all of its buffers are uptodate.  A subsequent
1121  * block_read_full_page() against that page will discover all the uptodate
1122  * buffers, will set the page uptodate and will perform no I/O.
1123  */
1124 
1125 /**
1126  * mark_buffer_dirty - mark a buffer_head as needing writeout
1127  * @bh: the buffer_head to mark dirty
1128  *
1129  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130  * backing page dirty, then tag the page as dirty in its address_space's radix
1131  * tree and then attach the address_space's inode to its superblock's dirty
1132  * inode list.
1133  *
1134  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1135  * mapping->tree_lock and the global inode_lock.
1136  */
1137 void mark_buffer_dirty(struct buffer_head *bh)
1138 {
1139 	WARN_ON_ONCE(!buffer_uptodate(bh));
1140 
1141 	/*
1142 	 * Very *carefully* optimize the it-is-already-dirty case.
1143 	 *
1144 	 * Don't let the final "is it dirty" escape to before we
1145 	 * perhaps modified the buffer.
1146 	 */
1147 	if (buffer_dirty(bh)) {
1148 		smp_mb();
1149 		if (buffer_dirty(bh))
1150 			return;
1151 	}
1152 
1153 	if (!test_set_buffer_dirty(bh)) {
1154 		struct page *page = bh->b_page;
1155 		if (!TestSetPageDirty(page)) {
1156 			struct address_space *mapping = page_mapping(page);
1157 			if (mapping)
1158 				__set_page_dirty(page, mapping, 0);
1159 		}
1160 	}
1161 }
1162 EXPORT_SYMBOL(mark_buffer_dirty);
1163 
1164 /*
1165  * Decrement a buffer_head's reference count.  If all buffers against a page
1166  * have zero reference count, are clean and unlocked, and if the page is clean
1167  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1168  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1169  * a page but it ends up not being freed, and buffers may later be reattached).
1170  */
1171 void __brelse(struct buffer_head * buf)
1172 {
1173 	if (atomic_read(&buf->b_count)) {
1174 		put_bh(buf);
1175 		return;
1176 	}
1177 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1178 }
1179 EXPORT_SYMBOL(__brelse);
1180 
1181 /*
1182  * bforget() is like brelse(), except it discards any
1183  * potentially dirty data.
1184  */
1185 void __bforget(struct buffer_head *bh)
1186 {
1187 	clear_buffer_dirty(bh);
1188 	if (bh->b_assoc_map) {
1189 		struct address_space *buffer_mapping = bh->b_page->mapping;
1190 
1191 		spin_lock(&buffer_mapping->private_lock);
1192 		list_del_init(&bh->b_assoc_buffers);
1193 		bh->b_assoc_map = NULL;
1194 		spin_unlock(&buffer_mapping->private_lock);
1195 	}
1196 	__brelse(bh);
1197 }
1198 EXPORT_SYMBOL(__bforget);
1199 
1200 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1201 {
1202 	lock_buffer(bh);
1203 	if (buffer_uptodate(bh)) {
1204 		unlock_buffer(bh);
1205 		return bh;
1206 	} else {
1207 		get_bh(bh);
1208 		bh->b_end_io = end_buffer_read_sync;
1209 		submit_bh(READ, bh);
1210 		wait_on_buffer(bh);
1211 		if (buffer_uptodate(bh))
1212 			return bh;
1213 	}
1214 	brelse(bh);
1215 	return NULL;
1216 }
1217 
1218 /*
1219  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1220  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1221  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1222  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1223  * CPU's LRUs at the same time.
1224  *
1225  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1226  * sb_find_get_block().
1227  *
1228  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1229  * a local interrupt disable for that.
1230  */
1231 
1232 #define BH_LRU_SIZE	8
1233 
1234 struct bh_lru {
1235 	struct buffer_head *bhs[BH_LRU_SIZE];
1236 };
1237 
1238 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1239 
1240 #ifdef CONFIG_SMP
1241 #define bh_lru_lock()	local_irq_disable()
1242 #define bh_lru_unlock()	local_irq_enable()
1243 #else
1244 #define bh_lru_lock()	preempt_disable()
1245 #define bh_lru_unlock()	preempt_enable()
1246 #endif
1247 
1248 static inline void check_irqs_on(void)
1249 {
1250 #ifdef irqs_disabled
1251 	BUG_ON(irqs_disabled());
1252 #endif
1253 }
1254 
1255 /*
1256  * The LRU management algorithm is dopey-but-simple.  Sorry.
1257  */
1258 static void bh_lru_install(struct buffer_head *bh)
1259 {
1260 	struct buffer_head *evictee = NULL;
1261 
1262 	check_irqs_on();
1263 	bh_lru_lock();
1264 	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1265 		struct buffer_head *bhs[BH_LRU_SIZE];
1266 		int in;
1267 		int out = 0;
1268 
1269 		get_bh(bh);
1270 		bhs[out++] = bh;
1271 		for (in = 0; in < BH_LRU_SIZE; in++) {
1272 			struct buffer_head *bh2 =
1273 				__this_cpu_read(bh_lrus.bhs[in]);
1274 
1275 			if (bh2 == bh) {
1276 				__brelse(bh2);
1277 			} else {
1278 				if (out >= BH_LRU_SIZE) {
1279 					BUG_ON(evictee != NULL);
1280 					evictee = bh2;
1281 				} else {
1282 					bhs[out++] = bh2;
1283 				}
1284 			}
1285 		}
1286 		while (out < BH_LRU_SIZE)
1287 			bhs[out++] = NULL;
1288 		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1289 	}
1290 	bh_lru_unlock();
1291 
1292 	if (evictee)
1293 		__brelse(evictee);
1294 }
1295 
1296 /*
1297  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1298  */
1299 static struct buffer_head *
1300 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1301 {
1302 	struct buffer_head *ret = NULL;
1303 	unsigned int i;
1304 
1305 	check_irqs_on();
1306 	bh_lru_lock();
1307 	for (i = 0; i < BH_LRU_SIZE; i++) {
1308 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1309 
1310 		if (bh && bh->b_bdev == bdev &&
1311 				bh->b_blocknr == block && bh->b_size == size) {
1312 			if (i) {
1313 				while (i) {
1314 					__this_cpu_write(bh_lrus.bhs[i],
1315 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1316 					i--;
1317 				}
1318 				__this_cpu_write(bh_lrus.bhs[0], bh);
1319 			}
1320 			get_bh(bh);
1321 			ret = bh;
1322 			break;
1323 		}
1324 	}
1325 	bh_lru_unlock();
1326 	return ret;
1327 }
1328 
1329 /*
1330  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1331  * it in the LRU and mark it as accessed.  If it is not present then return
1332  * NULL
1333  */
1334 struct buffer_head *
1335 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1336 {
1337 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1338 
1339 	if (bh == NULL) {
1340 		bh = __find_get_block_slow(bdev, block);
1341 		if (bh)
1342 			bh_lru_install(bh);
1343 	}
1344 	if (bh)
1345 		touch_buffer(bh);
1346 	return bh;
1347 }
1348 EXPORT_SYMBOL(__find_get_block);
1349 
1350 /*
1351  * __getblk will locate (and, if necessary, create) the buffer_head
1352  * which corresponds to the passed block_device, block and size. The
1353  * returned buffer has its reference count incremented.
1354  *
1355  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1356  * illegal block number, __getblk() will happily return a buffer_head
1357  * which represents the non-existent block.  Very weird.
1358  *
1359  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1360  * attempt is failing.  FIXME, perhaps?
1361  */
1362 struct buffer_head *
1363 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1364 {
1365 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1366 
1367 	might_sleep();
1368 	if (bh == NULL)
1369 		bh = __getblk_slow(bdev, block, size);
1370 	return bh;
1371 }
1372 EXPORT_SYMBOL(__getblk);
1373 
1374 /*
1375  * Do async read-ahead on a buffer..
1376  */
1377 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379 	struct buffer_head *bh = __getblk(bdev, block, size);
1380 	if (likely(bh)) {
1381 		ll_rw_block(READA, 1, &bh);
1382 		brelse(bh);
1383 	}
1384 }
1385 EXPORT_SYMBOL(__breadahead);
1386 
1387 /**
1388  *  __bread() - reads a specified block and returns the bh
1389  *  @bdev: the block_device to read from
1390  *  @block: number of block
1391  *  @size: size (in bytes) to read
1392  *
1393  *  Reads a specified block, and returns buffer head that contains it.
1394  *  It returns NULL if the block was unreadable.
1395  */
1396 struct buffer_head *
1397 __bread(struct block_device *bdev, sector_t block, unsigned size)
1398 {
1399 	struct buffer_head *bh = __getblk(bdev, block, size);
1400 
1401 	if (likely(bh) && !buffer_uptodate(bh))
1402 		bh = __bread_slow(bh);
1403 	return bh;
1404 }
1405 EXPORT_SYMBOL(__bread);
1406 
1407 /*
1408  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1409  * This doesn't race because it runs in each cpu either in irq
1410  * or with preempt disabled.
1411  */
1412 static void invalidate_bh_lru(void *arg)
1413 {
1414 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1415 	int i;
1416 
1417 	for (i = 0; i < BH_LRU_SIZE; i++) {
1418 		brelse(b->bhs[i]);
1419 		b->bhs[i] = NULL;
1420 	}
1421 	put_cpu_var(bh_lrus);
1422 }
1423 
1424 void invalidate_bh_lrus(void)
1425 {
1426 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1427 }
1428 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1429 
1430 void set_bh_page(struct buffer_head *bh,
1431 		struct page *page, unsigned long offset)
1432 {
1433 	bh->b_page = page;
1434 	BUG_ON(offset >= PAGE_SIZE);
1435 	if (PageHighMem(page))
1436 		/*
1437 		 * This catches illegal uses and preserves the offset:
1438 		 */
1439 		bh->b_data = (char *)(0 + offset);
1440 	else
1441 		bh->b_data = page_address(page) + offset;
1442 }
1443 EXPORT_SYMBOL(set_bh_page);
1444 
1445 /*
1446  * Called when truncating a buffer on a page completely.
1447  */
1448 static void discard_buffer(struct buffer_head * bh)
1449 {
1450 	lock_buffer(bh);
1451 	clear_buffer_dirty(bh);
1452 	bh->b_bdev = NULL;
1453 	clear_buffer_mapped(bh);
1454 	clear_buffer_req(bh);
1455 	clear_buffer_new(bh);
1456 	clear_buffer_delay(bh);
1457 	clear_buffer_unwritten(bh);
1458 	unlock_buffer(bh);
1459 }
1460 
1461 /**
1462  * block_invalidatepage - invalidate part of all of a buffer-backed page
1463  *
1464  * @page: the page which is affected
1465  * @offset: the index of the truncation point
1466  *
1467  * block_invalidatepage() is called when all or part of the page has become
1468  * invalidatedby a truncate operation.
1469  *
1470  * block_invalidatepage() does not have to release all buffers, but it must
1471  * ensure that no dirty buffer is left outside @offset and that no I/O
1472  * is underway against any of the blocks which are outside the truncation
1473  * point.  Because the caller is about to free (and possibly reuse) those
1474  * blocks on-disk.
1475  */
1476 void block_invalidatepage(struct page *page, unsigned long offset)
1477 {
1478 	struct buffer_head *head, *bh, *next;
1479 	unsigned int curr_off = 0;
1480 
1481 	BUG_ON(!PageLocked(page));
1482 	if (!page_has_buffers(page))
1483 		goto out;
1484 
1485 	head = page_buffers(page);
1486 	bh = head;
1487 	do {
1488 		unsigned int next_off = curr_off + bh->b_size;
1489 		next = bh->b_this_page;
1490 
1491 		/*
1492 		 * is this block fully invalidated?
1493 		 */
1494 		if (offset <= curr_off)
1495 			discard_buffer(bh);
1496 		curr_off = next_off;
1497 		bh = next;
1498 	} while (bh != head);
1499 
1500 	/*
1501 	 * We release buffers only if the entire page is being invalidated.
1502 	 * The get_block cached value has been unconditionally invalidated,
1503 	 * so real IO is not possible anymore.
1504 	 */
1505 	if (offset == 0)
1506 		try_to_release_page(page, 0);
1507 out:
1508 	return;
1509 }
1510 EXPORT_SYMBOL(block_invalidatepage);
1511 
1512 /*
1513  * We attach and possibly dirty the buffers atomically wrt
1514  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1515  * is already excluded via the page lock.
1516  */
1517 void create_empty_buffers(struct page *page,
1518 			unsigned long blocksize, unsigned long b_state)
1519 {
1520 	struct buffer_head *bh, *head, *tail;
1521 
1522 	head = alloc_page_buffers(page, blocksize, 1);
1523 	bh = head;
1524 	do {
1525 		bh->b_state |= b_state;
1526 		tail = bh;
1527 		bh = bh->b_this_page;
1528 	} while (bh);
1529 	tail->b_this_page = head;
1530 
1531 	spin_lock(&page->mapping->private_lock);
1532 	if (PageUptodate(page) || PageDirty(page)) {
1533 		bh = head;
1534 		do {
1535 			if (PageDirty(page))
1536 				set_buffer_dirty(bh);
1537 			if (PageUptodate(page))
1538 				set_buffer_uptodate(bh);
1539 			bh = bh->b_this_page;
1540 		} while (bh != head);
1541 	}
1542 	attach_page_buffers(page, head);
1543 	spin_unlock(&page->mapping->private_lock);
1544 }
1545 EXPORT_SYMBOL(create_empty_buffers);
1546 
1547 /*
1548  * We are taking a block for data and we don't want any output from any
1549  * buffer-cache aliases starting from return from that function and
1550  * until the moment when something will explicitly mark the buffer
1551  * dirty (hopefully that will not happen until we will free that block ;-)
1552  * We don't even need to mark it not-uptodate - nobody can expect
1553  * anything from a newly allocated buffer anyway. We used to used
1554  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1555  * don't want to mark the alias unmapped, for example - it would confuse
1556  * anyone who might pick it with bread() afterwards...
1557  *
1558  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1559  * be writeout I/O going on against recently-freed buffers.  We don't
1560  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1561  * only if we really need to.  That happens here.
1562  */
1563 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1564 {
1565 	struct buffer_head *old_bh;
1566 
1567 	might_sleep();
1568 
1569 	old_bh = __find_get_block_slow(bdev, block);
1570 	if (old_bh) {
1571 		clear_buffer_dirty(old_bh);
1572 		wait_on_buffer(old_bh);
1573 		clear_buffer_req(old_bh);
1574 		__brelse(old_bh);
1575 	}
1576 }
1577 EXPORT_SYMBOL(unmap_underlying_metadata);
1578 
1579 /*
1580  * NOTE! All mapped/uptodate combinations are valid:
1581  *
1582  *	Mapped	Uptodate	Meaning
1583  *
1584  *	No	No		"unknown" - must do get_block()
1585  *	No	Yes		"hole" - zero-filled
1586  *	Yes	No		"allocated" - allocated on disk, not read in
1587  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1588  *
1589  * "Dirty" is valid only with the last case (mapped+uptodate).
1590  */
1591 
1592 /*
1593  * While block_write_full_page is writing back the dirty buffers under
1594  * the page lock, whoever dirtied the buffers may decide to clean them
1595  * again at any time.  We handle that by only looking at the buffer
1596  * state inside lock_buffer().
1597  *
1598  * If block_write_full_page() is called for regular writeback
1599  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1600  * locked buffer.   This only can happen if someone has written the buffer
1601  * directly, with submit_bh().  At the address_space level PageWriteback
1602  * prevents this contention from occurring.
1603  *
1604  * If block_write_full_page() is called with wbc->sync_mode ==
1605  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1606  * causes the writes to be flagged as synchronous writes.
1607  */
1608 static int __block_write_full_page(struct inode *inode, struct page *page,
1609 			get_block_t *get_block, struct writeback_control *wbc,
1610 			bh_end_io_t *handler)
1611 {
1612 	int err;
1613 	sector_t block;
1614 	sector_t last_block;
1615 	struct buffer_head *bh, *head;
1616 	const unsigned blocksize = 1 << inode->i_blkbits;
1617 	int nr_underway = 0;
1618 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1619 			WRITE_SYNC : WRITE);
1620 
1621 	BUG_ON(!PageLocked(page));
1622 
1623 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1624 
1625 	if (!page_has_buffers(page)) {
1626 		create_empty_buffers(page, blocksize,
1627 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1628 	}
1629 
1630 	/*
1631 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1632 	 * here, and the (potentially unmapped) buffers may become dirty at
1633 	 * any time.  If a buffer becomes dirty here after we've inspected it
1634 	 * then we just miss that fact, and the page stays dirty.
1635 	 *
1636 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1637 	 * handle that here by just cleaning them.
1638 	 */
1639 
1640 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1641 	head = page_buffers(page);
1642 	bh = head;
1643 
1644 	/*
1645 	 * Get all the dirty buffers mapped to disk addresses and
1646 	 * handle any aliases from the underlying blockdev's mapping.
1647 	 */
1648 	do {
1649 		if (block > last_block) {
1650 			/*
1651 			 * mapped buffers outside i_size will occur, because
1652 			 * this page can be outside i_size when there is a
1653 			 * truncate in progress.
1654 			 */
1655 			/*
1656 			 * The buffer was zeroed by block_write_full_page()
1657 			 */
1658 			clear_buffer_dirty(bh);
1659 			set_buffer_uptodate(bh);
1660 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1661 			   buffer_dirty(bh)) {
1662 			WARN_ON(bh->b_size != blocksize);
1663 			err = get_block(inode, block, bh, 1);
1664 			if (err)
1665 				goto recover;
1666 			clear_buffer_delay(bh);
1667 			if (buffer_new(bh)) {
1668 				/* blockdev mappings never come here */
1669 				clear_buffer_new(bh);
1670 				unmap_underlying_metadata(bh->b_bdev,
1671 							bh->b_blocknr);
1672 			}
1673 		}
1674 		bh = bh->b_this_page;
1675 		block++;
1676 	} while (bh != head);
1677 
1678 	do {
1679 		if (!buffer_mapped(bh))
1680 			continue;
1681 		/*
1682 		 * If it's a fully non-blocking write attempt and we cannot
1683 		 * lock the buffer then redirty the page.  Note that this can
1684 		 * potentially cause a busy-wait loop from writeback threads
1685 		 * and kswapd activity, but those code paths have their own
1686 		 * higher-level throttling.
1687 		 */
1688 		if (wbc->sync_mode != WB_SYNC_NONE) {
1689 			lock_buffer(bh);
1690 		} else if (!trylock_buffer(bh)) {
1691 			redirty_page_for_writepage(wbc, page);
1692 			continue;
1693 		}
1694 		if (test_clear_buffer_dirty(bh)) {
1695 			mark_buffer_async_write_endio(bh, handler);
1696 		} else {
1697 			unlock_buffer(bh);
1698 		}
1699 	} while ((bh = bh->b_this_page) != head);
1700 
1701 	/*
1702 	 * The page and its buffers are protected by PageWriteback(), so we can
1703 	 * drop the bh refcounts early.
1704 	 */
1705 	BUG_ON(PageWriteback(page));
1706 	set_page_writeback(page);
1707 
1708 	do {
1709 		struct buffer_head *next = bh->b_this_page;
1710 		if (buffer_async_write(bh)) {
1711 			submit_bh(write_op, bh);
1712 			nr_underway++;
1713 		}
1714 		bh = next;
1715 	} while (bh != head);
1716 	unlock_page(page);
1717 
1718 	err = 0;
1719 done:
1720 	if (nr_underway == 0) {
1721 		/*
1722 		 * The page was marked dirty, but the buffers were
1723 		 * clean.  Someone wrote them back by hand with
1724 		 * ll_rw_block/submit_bh.  A rare case.
1725 		 */
1726 		end_page_writeback(page);
1727 
1728 		/*
1729 		 * The page and buffer_heads can be released at any time from
1730 		 * here on.
1731 		 */
1732 	}
1733 	return err;
1734 
1735 recover:
1736 	/*
1737 	 * ENOSPC, or some other error.  We may already have added some
1738 	 * blocks to the file, so we need to write these out to avoid
1739 	 * exposing stale data.
1740 	 * The page is currently locked and not marked for writeback
1741 	 */
1742 	bh = head;
1743 	/* Recovery: lock and submit the mapped buffers */
1744 	do {
1745 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1746 		    !buffer_delay(bh)) {
1747 			lock_buffer(bh);
1748 			mark_buffer_async_write_endio(bh, handler);
1749 		} else {
1750 			/*
1751 			 * The buffer may have been set dirty during
1752 			 * attachment to a dirty page.
1753 			 */
1754 			clear_buffer_dirty(bh);
1755 		}
1756 	} while ((bh = bh->b_this_page) != head);
1757 	SetPageError(page);
1758 	BUG_ON(PageWriteback(page));
1759 	mapping_set_error(page->mapping, err);
1760 	set_page_writeback(page);
1761 	do {
1762 		struct buffer_head *next = bh->b_this_page;
1763 		if (buffer_async_write(bh)) {
1764 			clear_buffer_dirty(bh);
1765 			submit_bh(write_op, bh);
1766 			nr_underway++;
1767 		}
1768 		bh = next;
1769 	} while (bh != head);
1770 	unlock_page(page);
1771 	goto done;
1772 }
1773 
1774 /*
1775  * If a page has any new buffers, zero them out here, and mark them uptodate
1776  * and dirty so they'll be written out (in order to prevent uninitialised
1777  * block data from leaking). And clear the new bit.
1778  */
1779 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1780 {
1781 	unsigned int block_start, block_end;
1782 	struct buffer_head *head, *bh;
1783 
1784 	BUG_ON(!PageLocked(page));
1785 	if (!page_has_buffers(page))
1786 		return;
1787 
1788 	bh = head = page_buffers(page);
1789 	block_start = 0;
1790 	do {
1791 		block_end = block_start + bh->b_size;
1792 
1793 		if (buffer_new(bh)) {
1794 			if (block_end > from && block_start < to) {
1795 				if (!PageUptodate(page)) {
1796 					unsigned start, size;
1797 
1798 					start = max(from, block_start);
1799 					size = min(to, block_end) - start;
1800 
1801 					zero_user(page, start, size);
1802 					set_buffer_uptodate(bh);
1803 				}
1804 
1805 				clear_buffer_new(bh);
1806 				mark_buffer_dirty(bh);
1807 			}
1808 		}
1809 
1810 		block_start = block_end;
1811 		bh = bh->b_this_page;
1812 	} while (bh != head);
1813 }
1814 EXPORT_SYMBOL(page_zero_new_buffers);
1815 
1816 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1817 		get_block_t *get_block)
1818 {
1819 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1820 	unsigned to = from + len;
1821 	struct inode *inode = page->mapping->host;
1822 	unsigned block_start, block_end;
1823 	sector_t block;
1824 	int err = 0;
1825 	unsigned blocksize, bbits;
1826 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1827 
1828 	BUG_ON(!PageLocked(page));
1829 	BUG_ON(from > PAGE_CACHE_SIZE);
1830 	BUG_ON(to > PAGE_CACHE_SIZE);
1831 	BUG_ON(from > to);
1832 
1833 	blocksize = 1 << inode->i_blkbits;
1834 	if (!page_has_buffers(page))
1835 		create_empty_buffers(page, blocksize, 0);
1836 	head = page_buffers(page);
1837 
1838 	bbits = inode->i_blkbits;
1839 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1840 
1841 	for(bh = head, block_start = 0; bh != head || !block_start;
1842 	    block++, block_start=block_end, bh = bh->b_this_page) {
1843 		block_end = block_start + blocksize;
1844 		if (block_end <= from || block_start >= to) {
1845 			if (PageUptodate(page)) {
1846 				if (!buffer_uptodate(bh))
1847 					set_buffer_uptodate(bh);
1848 			}
1849 			continue;
1850 		}
1851 		if (buffer_new(bh))
1852 			clear_buffer_new(bh);
1853 		if (!buffer_mapped(bh)) {
1854 			WARN_ON(bh->b_size != blocksize);
1855 			err = get_block(inode, block, bh, 1);
1856 			if (err)
1857 				break;
1858 			if (buffer_new(bh)) {
1859 				unmap_underlying_metadata(bh->b_bdev,
1860 							bh->b_blocknr);
1861 				if (PageUptodate(page)) {
1862 					clear_buffer_new(bh);
1863 					set_buffer_uptodate(bh);
1864 					mark_buffer_dirty(bh);
1865 					continue;
1866 				}
1867 				if (block_end > to || block_start < from)
1868 					zero_user_segments(page,
1869 						to, block_end,
1870 						block_start, from);
1871 				continue;
1872 			}
1873 		}
1874 		if (PageUptodate(page)) {
1875 			if (!buffer_uptodate(bh))
1876 				set_buffer_uptodate(bh);
1877 			continue;
1878 		}
1879 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1880 		    !buffer_unwritten(bh) &&
1881 		     (block_start < from || block_end > to)) {
1882 			ll_rw_block(READ, 1, &bh);
1883 			*wait_bh++=bh;
1884 		}
1885 	}
1886 	/*
1887 	 * If we issued read requests - let them complete.
1888 	 */
1889 	while(wait_bh > wait) {
1890 		wait_on_buffer(*--wait_bh);
1891 		if (!buffer_uptodate(*wait_bh))
1892 			err = -EIO;
1893 	}
1894 	if (unlikely(err)) {
1895 		page_zero_new_buffers(page, from, to);
1896 		ClearPageUptodate(page);
1897 	}
1898 	return err;
1899 }
1900 EXPORT_SYMBOL(__block_write_begin);
1901 
1902 static int __block_commit_write(struct inode *inode, struct page *page,
1903 		unsigned from, unsigned to)
1904 {
1905 	unsigned block_start, block_end;
1906 	int partial = 0;
1907 	unsigned blocksize;
1908 	struct buffer_head *bh, *head;
1909 
1910 	blocksize = 1 << inode->i_blkbits;
1911 
1912 	for(bh = head = page_buffers(page), block_start = 0;
1913 	    bh != head || !block_start;
1914 	    block_start=block_end, bh = bh->b_this_page) {
1915 		block_end = block_start + blocksize;
1916 		if (block_end <= from || block_start >= to) {
1917 			if (!buffer_uptodate(bh))
1918 				partial = 1;
1919 		} else {
1920 			set_buffer_uptodate(bh);
1921 			mark_buffer_dirty(bh);
1922 		}
1923 		clear_buffer_new(bh);
1924 	}
1925 
1926 	/*
1927 	 * If this is a partial write which happened to make all buffers
1928 	 * uptodate then we can optimize away a bogus readpage() for
1929 	 * the next read(). Here we 'discover' whether the page went
1930 	 * uptodate as a result of this (potentially partial) write.
1931 	 */
1932 	if (!partial)
1933 		SetPageUptodate(page);
1934 	return 0;
1935 }
1936 
1937 /*
1938  * block_write_begin takes care of the basic task of block allocation and
1939  * bringing partial write blocks uptodate first.
1940  *
1941  * The filesystem needs to handle block truncation upon failure.
1942  */
1943 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1944 		unsigned flags, struct page **pagep, get_block_t *get_block)
1945 {
1946 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1947 	struct page *page;
1948 	int status;
1949 
1950 	page = grab_cache_page_write_begin(mapping, index, flags);
1951 	if (!page)
1952 		return -ENOMEM;
1953 
1954 	status = __block_write_begin(page, pos, len, get_block);
1955 	if (unlikely(status)) {
1956 		unlock_page(page);
1957 		page_cache_release(page);
1958 		page = NULL;
1959 	}
1960 
1961 	*pagep = page;
1962 	return status;
1963 }
1964 EXPORT_SYMBOL(block_write_begin);
1965 
1966 int block_write_end(struct file *file, struct address_space *mapping,
1967 			loff_t pos, unsigned len, unsigned copied,
1968 			struct page *page, void *fsdata)
1969 {
1970 	struct inode *inode = mapping->host;
1971 	unsigned start;
1972 
1973 	start = pos & (PAGE_CACHE_SIZE - 1);
1974 
1975 	if (unlikely(copied < len)) {
1976 		/*
1977 		 * The buffers that were written will now be uptodate, so we
1978 		 * don't have to worry about a readpage reading them and
1979 		 * overwriting a partial write. However if we have encountered
1980 		 * a short write and only partially written into a buffer, it
1981 		 * will not be marked uptodate, so a readpage might come in and
1982 		 * destroy our partial write.
1983 		 *
1984 		 * Do the simplest thing, and just treat any short write to a
1985 		 * non uptodate page as a zero-length write, and force the
1986 		 * caller to redo the whole thing.
1987 		 */
1988 		if (!PageUptodate(page))
1989 			copied = 0;
1990 
1991 		page_zero_new_buffers(page, start+copied, start+len);
1992 	}
1993 	flush_dcache_page(page);
1994 
1995 	/* This could be a short (even 0-length) commit */
1996 	__block_commit_write(inode, page, start, start+copied);
1997 
1998 	return copied;
1999 }
2000 EXPORT_SYMBOL(block_write_end);
2001 
2002 int generic_write_end(struct file *file, struct address_space *mapping,
2003 			loff_t pos, unsigned len, unsigned copied,
2004 			struct page *page, void *fsdata)
2005 {
2006 	struct inode *inode = mapping->host;
2007 	int i_size_changed = 0;
2008 
2009 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2010 
2011 	/*
2012 	 * No need to use i_size_read() here, the i_size
2013 	 * cannot change under us because we hold i_mutex.
2014 	 *
2015 	 * But it's important to update i_size while still holding page lock:
2016 	 * page writeout could otherwise come in and zero beyond i_size.
2017 	 */
2018 	if (pos+copied > inode->i_size) {
2019 		i_size_write(inode, pos+copied);
2020 		i_size_changed = 1;
2021 	}
2022 
2023 	unlock_page(page);
2024 	page_cache_release(page);
2025 
2026 	/*
2027 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2028 	 * makes the holding time of page lock longer. Second, it forces lock
2029 	 * ordering of page lock and transaction start for journaling
2030 	 * filesystems.
2031 	 */
2032 	if (i_size_changed)
2033 		mark_inode_dirty(inode);
2034 
2035 	return copied;
2036 }
2037 EXPORT_SYMBOL(generic_write_end);
2038 
2039 /*
2040  * block_is_partially_uptodate checks whether buffers within a page are
2041  * uptodate or not.
2042  *
2043  * Returns true if all buffers which correspond to a file portion
2044  * we want to read are uptodate.
2045  */
2046 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2047 					unsigned long from)
2048 {
2049 	struct inode *inode = page->mapping->host;
2050 	unsigned block_start, block_end, blocksize;
2051 	unsigned to;
2052 	struct buffer_head *bh, *head;
2053 	int ret = 1;
2054 
2055 	if (!page_has_buffers(page))
2056 		return 0;
2057 
2058 	blocksize = 1 << inode->i_blkbits;
2059 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2060 	to = from + to;
2061 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2062 		return 0;
2063 
2064 	head = page_buffers(page);
2065 	bh = head;
2066 	block_start = 0;
2067 	do {
2068 		block_end = block_start + blocksize;
2069 		if (block_end > from && block_start < to) {
2070 			if (!buffer_uptodate(bh)) {
2071 				ret = 0;
2072 				break;
2073 			}
2074 			if (block_end >= to)
2075 				break;
2076 		}
2077 		block_start = block_end;
2078 		bh = bh->b_this_page;
2079 	} while (bh != head);
2080 
2081 	return ret;
2082 }
2083 EXPORT_SYMBOL(block_is_partially_uptodate);
2084 
2085 /*
2086  * Generic "read page" function for block devices that have the normal
2087  * get_block functionality. This is most of the block device filesystems.
2088  * Reads the page asynchronously --- the unlock_buffer() and
2089  * set/clear_buffer_uptodate() functions propagate buffer state into the
2090  * page struct once IO has completed.
2091  */
2092 int block_read_full_page(struct page *page, get_block_t *get_block)
2093 {
2094 	struct inode *inode = page->mapping->host;
2095 	sector_t iblock, lblock;
2096 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2097 	unsigned int blocksize;
2098 	int nr, i;
2099 	int fully_mapped = 1;
2100 
2101 	BUG_ON(!PageLocked(page));
2102 	blocksize = 1 << inode->i_blkbits;
2103 	if (!page_has_buffers(page))
2104 		create_empty_buffers(page, blocksize, 0);
2105 	head = page_buffers(page);
2106 
2107 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2108 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2109 	bh = head;
2110 	nr = 0;
2111 	i = 0;
2112 
2113 	do {
2114 		if (buffer_uptodate(bh))
2115 			continue;
2116 
2117 		if (!buffer_mapped(bh)) {
2118 			int err = 0;
2119 
2120 			fully_mapped = 0;
2121 			if (iblock < lblock) {
2122 				WARN_ON(bh->b_size != blocksize);
2123 				err = get_block(inode, iblock, bh, 0);
2124 				if (err)
2125 					SetPageError(page);
2126 			}
2127 			if (!buffer_mapped(bh)) {
2128 				zero_user(page, i * blocksize, blocksize);
2129 				if (!err)
2130 					set_buffer_uptodate(bh);
2131 				continue;
2132 			}
2133 			/*
2134 			 * get_block() might have updated the buffer
2135 			 * synchronously
2136 			 */
2137 			if (buffer_uptodate(bh))
2138 				continue;
2139 		}
2140 		arr[nr++] = bh;
2141 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2142 
2143 	if (fully_mapped)
2144 		SetPageMappedToDisk(page);
2145 
2146 	if (!nr) {
2147 		/*
2148 		 * All buffers are uptodate - we can set the page uptodate
2149 		 * as well. But not if get_block() returned an error.
2150 		 */
2151 		if (!PageError(page))
2152 			SetPageUptodate(page);
2153 		unlock_page(page);
2154 		return 0;
2155 	}
2156 
2157 	/* Stage two: lock the buffers */
2158 	for (i = 0; i < nr; i++) {
2159 		bh = arr[i];
2160 		lock_buffer(bh);
2161 		mark_buffer_async_read(bh);
2162 	}
2163 
2164 	/*
2165 	 * Stage 3: start the IO.  Check for uptodateness
2166 	 * inside the buffer lock in case another process reading
2167 	 * the underlying blockdev brought it uptodate (the sct fix).
2168 	 */
2169 	for (i = 0; i < nr; i++) {
2170 		bh = arr[i];
2171 		if (buffer_uptodate(bh))
2172 			end_buffer_async_read(bh, 1);
2173 		else
2174 			submit_bh(READ, bh);
2175 	}
2176 	return 0;
2177 }
2178 EXPORT_SYMBOL(block_read_full_page);
2179 
2180 /* utility function for filesystems that need to do work on expanding
2181  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2182  * deal with the hole.
2183  */
2184 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2185 {
2186 	struct address_space *mapping = inode->i_mapping;
2187 	struct page *page;
2188 	void *fsdata;
2189 	int err;
2190 
2191 	err = inode_newsize_ok(inode, size);
2192 	if (err)
2193 		goto out;
2194 
2195 	err = pagecache_write_begin(NULL, mapping, size, 0,
2196 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2197 				&page, &fsdata);
2198 	if (err)
2199 		goto out;
2200 
2201 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2202 	BUG_ON(err > 0);
2203 
2204 out:
2205 	return err;
2206 }
2207 EXPORT_SYMBOL(generic_cont_expand_simple);
2208 
2209 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2210 			    loff_t pos, loff_t *bytes)
2211 {
2212 	struct inode *inode = mapping->host;
2213 	unsigned blocksize = 1 << inode->i_blkbits;
2214 	struct page *page;
2215 	void *fsdata;
2216 	pgoff_t index, curidx;
2217 	loff_t curpos;
2218 	unsigned zerofrom, offset, len;
2219 	int err = 0;
2220 
2221 	index = pos >> PAGE_CACHE_SHIFT;
2222 	offset = pos & ~PAGE_CACHE_MASK;
2223 
2224 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2225 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2226 		if (zerofrom & (blocksize-1)) {
2227 			*bytes |= (blocksize-1);
2228 			(*bytes)++;
2229 		}
2230 		len = PAGE_CACHE_SIZE - zerofrom;
2231 
2232 		err = pagecache_write_begin(file, mapping, curpos, len,
2233 						AOP_FLAG_UNINTERRUPTIBLE,
2234 						&page, &fsdata);
2235 		if (err)
2236 			goto out;
2237 		zero_user(page, zerofrom, len);
2238 		err = pagecache_write_end(file, mapping, curpos, len, len,
2239 						page, fsdata);
2240 		if (err < 0)
2241 			goto out;
2242 		BUG_ON(err != len);
2243 		err = 0;
2244 
2245 		balance_dirty_pages_ratelimited(mapping);
2246 	}
2247 
2248 	/* page covers the boundary, find the boundary offset */
2249 	if (index == curidx) {
2250 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2251 		/* if we will expand the thing last block will be filled */
2252 		if (offset <= zerofrom) {
2253 			goto out;
2254 		}
2255 		if (zerofrom & (blocksize-1)) {
2256 			*bytes |= (blocksize-1);
2257 			(*bytes)++;
2258 		}
2259 		len = offset - zerofrom;
2260 
2261 		err = pagecache_write_begin(file, mapping, curpos, len,
2262 						AOP_FLAG_UNINTERRUPTIBLE,
2263 						&page, &fsdata);
2264 		if (err)
2265 			goto out;
2266 		zero_user(page, zerofrom, len);
2267 		err = pagecache_write_end(file, mapping, curpos, len, len,
2268 						page, fsdata);
2269 		if (err < 0)
2270 			goto out;
2271 		BUG_ON(err != len);
2272 		err = 0;
2273 	}
2274 out:
2275 	return err;
2276 }
2277 
2278 /*
2279  * For moronic filesystems that do not allow holes in file.
2280  * We may have to extend the file.
2281  */
2282 int cont_write_begin(struct file *file, struct address_space *mapping,
2283 			loff_t pos, unsigned len, unsigned flags,
2284 			struct page **pagep, void **fsdata,
2285 			get_block_t *get_block, loff_t *bytes)
2286 {
2287 	struct inode *inode = mapping->host;
2288 	unsigned blocksize = 1 << inode->i_blkbits;
2289 	unsigned zerofrom;
2290 	int err;
2291 
2292 	err = cont_expand_zero(file, mapping, pos, bytes);
2293 	if (err)
2294 		return err;
2295 
2296 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2297 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2298 		*bytes |= (blocksize-1);
2299 		(*bytes)++;
2300 	}
2301 
2302 	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2303 }
2304 EXPORT_SYMBOL(cont_write_begin);
2305 
2306 int block_commit_write(struct page *page, unsigned from, unsigned to)
2307 {
2308 	struct inode *inode = page->mapping->host;
2309 	__block_commit_write(inode,page,from,to);
2310 	return 0;
2311 }
2312 EXPORT_SYMBOL(block_commit_write);
2313 
2314 /*
2315  * block_page_mkwrite() is not allowed to change the file size as it gets
2316  * called from a page fault handler when a page is first dirtied. Hence we must
2317  * be careful to check for EOF conditions here. We set the page up correctly
2318  * for a written page which means we get ENOSPC checking when writing into
2319  * holes and correct delalloc and unwritten extent mapping on filesystems that
2320  * support these features.
2321  *
2322  * We are not allowed to take the i_mutex here so we have to play games to
2323  * protect against truncate races as the page could now be beyond EOF.  Because
2324  * truncate writes the inode size before removing pages, once we have the
2325  * page lock we can determine safely if the page is beyond EOF. If it is not
2326  * beyond EOF, then the page is guaranteed safe against truncation until we
2327  * unlock the page.
2328  */
2329 int
2330 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2331 		   get_block_t get_block)
2332 {
2333 	struct page *page = vmf->page;
2334 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2335 	unsigned long end;
2336 	loff_t size;
2337 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2338 
2339 	lock_page(page);
2340 	size = i_size_read(inode);
2341 	if ((page->mapping != inode->i_mapping) ||
2342 	    (page_offset(page) > size)) {
2343 		/* page got truncated out from underneath us */
2344 		unlock_page(page);
2345 		goto out;
2346 	}
2347 
2348 	/* page is wholly or partially inside EOF */
2349 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2350 		end = size & ~PAGE_CACHE_MASK;
2351 	else
2352 		end = PAGE_CACHE_SIZE;
2353 
2354 	ret = __block_write_begin(page, 0, end, get_block);
2355 	if (!ret)
2356 		ret = block_commit_write(page, 0, end);
2357 
2358 	if (unlikely(ret)) {
2359 		unlock_page(page);
2360 		if (ret == -ENOMEM)
2361 			ret = VM_FAULT_OOM;
2362 		else /* -ENOSPC, -EIO, etc */
2363 			ret = VM_FAULT_SIGBUS;
2364 	} else
2365 		ret = VM_FAULT_LOCKED;
2366 
2367 out:
2368 	return ret;
2369 }
2370 EXPORT_SYMBOL(block_page_mkwrite);
2371 
2372 /*
2373  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2374  * immediately, while under the page lock.  So it needs a special end_io
2375  * handler which does not touch the bh after unlocking it.
2376  */
2377 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2378 {
2379 	__end_buffer_read_notouch(bh, uptodate);
2380 }
2381 
2382 /*
2383  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2384  * the page (converting it to circular linked list and taking care of page
2385  * dirty races).
2386  */
2387 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2388 {
2389 	struct buffer_head *bh;
2390 
2391 	BUG_ON(!PageLocked(page));
2392 
2393 	spin_lock(&page->mapping->private_lock);
2394 	bh = head;
2395 	do {
2396 		if (PageDirty(page))
2397 			set_buffer_dirty(bh);
2398 		if (!bh->b_this_page)
2399 			bh->b_this_page = head;
2400 		bh = bh->b_this_page;
2401 	} while (bh != head);
2402 	attach_page_buffers(page, head);
2403 	spin_unlock(&page->mapping->private_lock);
2404 }
2405 
2406 /*
2407  * On entry, the page is fully not uptodate.
2408  * On exit the page is fully uptodate in the areas outside (from,to)
2409  * The filesystem needs to handle block truncation upon failure.
2410  */
2411 int nobh_write_begin(struct address_space *mapping,
2412 			loff_t pos, unsigned len, unsigned flags,
2413 			struct page **pagep, void **fsdata,
2414 			get_block_t *get_block)
2415 {
2416 	struct inode *inode = mapping->host;
2417 	const unsigned blkbits = inode->i_blkbits;
2418 	const unsigned blocksize = 1 << blkbits;
2419 	struct buffer_head *head, *bh;
2420 	struct page *page;
2421 	pgoff_t index;
2422 	unsigned from, to;
2423 	unsigned block_in_page;
2424 	unsigned block_start, block_end;
2425 	sector_t block_in_file;
2426 	int nr_reads = 0;
2427 	int ret = 0;
2428 	int is_mapped_to_disk = 1;
2429 
2430 	index = pos >> PAGE_CACHE_SHIFT;
2431 	from = pos & (PAGE_CACHE_SIZE - 1);
2432 	to = from + len;
2433 
2434 	page = grab_cache_page_write_begin(mapping, index, flags);
2435 	if (!page)
2436 		return -ENOMEM;
2437 	*pagep = page;
2438 	*fsdata = NULL;
2439 
2440 	if (page_has_buffers(page)) {
2441 		ret = __block_write_begin(page, pos, len, get_block);
2442 		if (unlikely(ret))
2443 			goto out_release;
2444 		return ret;
2445 	}
2446 
2447 	if (PageMappedToDisk(page))
2448 		return 0;
2449 
2450 	/*
2451 	 * Allocate buffers so that we can keep track of state, and potentially
2452 	 * attach them to the page if an error occurs. In the common case of
2453 	 * no error, they will just be freed again without ever being attached
2454 	 * to the page (which is all OK, because we're under the page lock).
2455 	 *
2456 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2457 	 * than the circular one we're used to.
2458 	 */
2459 	head = alloc_page_buffers(page, blocksize, 0);
2460 	if (!head) {
2461 		ret = -ENOMEM;
2462 		goto out_release;
2463 	}
2464 
2465 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2466 
2467 	/*
2468 	 * We loop across all blocks in the page, whether or not they are
2469 	 * part of the affected region.  This is so we can discover if the
2470 	 * page is fully mapped-to-disk.
2471 	 */
2472 	for (block_start = 0, block_in_page = 0, bh = head;
2473 		  block_start < PAGE_CACHE_SIZE;
2474 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2475 		int create;
2476 
2477 		block_end = block_start + blocksize;
2478 		bh->b_state = 0;
2479 		create = 1;
2480 		if (block_start >= to)
2481 			create = 0;
2482 		ret = get_block(inode, block_in_file + block_in_page,
2483 					bh, create);
2484 		if (ret)
2485 			goto failed;
2486 		if (!buffer_mapped(bh))
2487 			is_mapped_to_disk = 0;
2488 		if (buffer_new(bh))
2489 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2490 		if (PageUptodate(page)) {
2491 			set_buffer_uptodate(bh);
2492 			continue;
2493 		}
2494 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2495 			zero_user_segments(page, block_start, from,
2496 							to, block_end);
2497 			continue;
2498 		}
2499 		if (buffer_uptodate(bh))
2500 			continue;	/* reiserfs does this */
2501 		if (block_start < from || block_end > to) {
2502 			lock_buffer(bh);
2503 			bh->b_end_io = end_buffer_read_nobh;
2504 			submit_bh(READ, bh);
2505 			nr_reads++;
2506 		}
2507 	}
2508 
2509 	if (nr_reads) {
2510 		/*
2511 		 * The page is locked, so these buffers are protected from
2512 		 * any VM or truncate activity.  Hence we don't need to care
2513 		 * for the buffer_head refcounts.
2514 		 */
2515 		for (bh = head; bh; bh = bh->b_this_page) {
2516 			wait_on_buffer(bh);
2517 			if (!buffer_uptodate(bh))
2518 				ret = -EIO;
2519 		}
2520 		if (ret)
2521 			goto failed;
2522 	}
2523 
2524 	if (is_mapped_to_disk)
2525 		SetPageMappedToDisk(page);
2526 
2527 	*fsdata = head; /* to be released by nobh_write_end */
2528 
2529 	return 0;
2530 
2531 failed:
2532 	BUG_ON(!ret);
2533 	/*
2534 	 * Error recovery is a bit difficult. We need to zero out blocks that
2535 	 * were newly allocated, and dirty them to ensure they get written out.
2536 	 * Buffers need to be attached to the page at this point, otherwise
2537 	 * the handling of potential IO errors during writeout would be hard
2538 	 * (could try doing synchronous writeout, but what if that fails too?)
2539 	 */
2540 	attach_nobh_buffers(page, head);
2541 	page_zero_new_buffers(page, from, to);
2542 
2543 out_release:
2544 	unlock_page(page);
2545 	page_cache_release(page);
2546 	*pagep = NULL;
2547 
2548 	return ret;
2549 }
2550 EXPORT_SYMBOL(nobh_write_begin);
2551 
2552 int nobh_write_end(struct file *file, struct address_space *mapping,
2553 			loff_t pos, unsigned len, unsigned copied,
2554 			struct page *page, void *fsdata)
2555 {
2556 	struct inode *inode = page->mapping->host;
2557 	struct buffer_head *head = fsdata;
2558 	struct buffer_head *bh;
2559 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2560 
2561 	if (unlikely(copied < len) && head)
2562 		attach_nobh_buffers(page, head);
2563 	if (page_has_buffers(page))
2564 		return generic_write_end(file, mapping, pos, len,
2565 					copied, page, fsdata);
2566 
2567 	SetPageUptodate(page);
2568 	set_page_dirty(page);
2569 	if (pos+copied > inode->i_size) {
2570 		i_size_write(inode, pos+copied);
2571 		mark_inode_dirty(inode);
2572 	}
2573 
2574 	unlock_page(page);
2575 	page_cache_release(page);
2576 
2577 	while (head) {
2578 		bh = head;
2579 		head = head->b_this_page;
2580 		free_buffer_head(bh);
2581 	}
2582 
2583 	return copied;
2584 }
2585 EXPORT_SYMBOL(nobh_write_end);
2586 
2587 /*
2588  * nobh_writepage() - based on block_full_write_page() except
2589  * that it tries to operate without attaching bufferheads to
2590  * the page.
2591  */
2592 int nobh_writepage(struct page *page, get_block_t *get_block,
2593 			struct writeback_control *wbc)
2594 {
2595 	struct inode * const inode = page->mapping->host;
2596 	loff_t i_size = i_size_read(inode);
2597 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2598 	unsigned offset;
2599 	int ret;
2600 
2601 	/* Is the page fully inside i_size? */
2602 	if (page->index < end_index)
2603 		goto out;
2604 
2605 	/* Is the page fully outside i_size? (truncate in progress) */
2606 	offset = i_size & (PAGE_CACHE_SIZE-1);
2607 	if (page->index >= end_index+1 || !offset) {
2608 		/*
2609 		 * The page may have dirty, unmapped buffers.  For example,
2610 		 * they may have been added in ext3_writepage().  Make them
2611 		 * freeable here, so the page does not leak.
2612 		 */
2613 #if 0
2614 		/* Not really sure about this  - do we need this ? */
2615 		if (page->mapping->a_ops->invalidatepage)
2616 			page->mapping->a_ops->invalidatepage(page, offset);
2617 #endif
2618 		unlock_page(page);
2619 		return 0; /* don't care */
2620 	}
2621 
2622 	/*
2623 	 * The page straddles i_size.  It must be zeroed out on each and every
2624 	 * writepage invocation because it may be mmapped.  "A file is mapped
2625 	 * in multiples of the page size.  For a file that is not a multiple of
2626 	 * the  page size, the remaining memory is zeroed when mapped, and
2627 	 * writes to that region are not written out to the file."
2628 	 */
2629 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2630 out:
2631 	ret = mpage_writepage(page, get_block, wbc);
2632 	if (ret == -EAGAIN)
2633 		ret = __block_write_full_page(inode, page, get_block, wbc,
2634 					      end_buffer_async_write);
2635 	return ret;
2636 }
2637 EXPORT_SYMBOL(nobh_writepage);
2638 
2639 int nobh_truncate_page(struct address_space *mapping,
2640 			loff_t from, get_block_t *get_block)
2641 {
2642 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2643 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2644 	unsigned blocksize;
2645 	sector_t iblock;
2646 	unsigned length, pos;
2647 	struct inode *inode = mapping->host;
2648 	struct page *page;
2649 	struct buffer_head map_bh;
2650 	int err;
2651 
2652 	blocksize = 1 << inode->i_blkbits;
2653 	length = offset & (blocksize - 1);
2654 
2655 	/* Block boundary? Nothing to do */
2656 	if (!length)
2657 		return 0;
2658 
2659 	length = blocksize - length;
2660 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2661 
2662 	page = grab_cache_page(mapping, index);
2663 	err = -ENOMEM;
2664 	if (!page)
2665 		goto out;
2666 
2667 	if (page_has_buffers(page)) {
2668 has_buffers:
2669 		unlock_page(page);
2670 		page_cache_release(page);
2671 		return block_truncate_page(mapping, from, get_block);
2672 	}
2673 
2674 	/* Find the buffer that contains "offset" */
2675 	pos = blocksize;
2676 	while (offset >= pos) {
2677 		iblock++;
2678 		pos += blocksize;
2679 	}
2680 
2681 	map_bh.b_size = blocksize;
2682 	map_bh.b_state = 0;
2683 	err = get_block(inode, iblock, &map_bh, 0);
2684 	if (err)
2685 		goto unlock;
2686 	/* unmapped? It's a hole - nothing to do */
2687 	if (!buffer_mapped(&map_bh))
2688 		goto unlock;
2689 
2690 	/* Ok, it's mapped. Make sure it's up-to-date */
2691 	if (!PageUptodate(page)) {
2692 		err = mapping->a_ops->readpage(NULL, page);
2693 		if (err) {
2694 			page_cache_release(page);
2695 			goto out;
2696 		}
2697 		lock_page(page);
2698 		if (!PageUptodate(page)) {
2699 			err = -EIO;
2700 			goto unlock;
2701 		}
2702 		if (page_has_buffers(page))
2703 			goto has_buffers;
2704 	}
2705 	zero_user(page, offset, length);
2706 	set_page_dirty(page);
2707 	err = 0;
2708 
2709 unlock:
2710 	unlock_page(page);
2711 	page_cache_release(page);
2712 out:
2713 	return err;
2714 }
2715 EXPORT_SYMBOL(nobh_truncate_page);
2716 
2717 int block_truncate_page(struct address_space *mapping,
2718 			loff_t from, get_block_t *get_block)
2719 {
2720 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2721 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2722 	unsigned blocksize;
2723 	sector_t iblock;
2724 	unsigned length, pos;
2725 	struct inode *inode = mapping->host;
2726 	struct page *page;
2727 	struct buffer_head *bh;
2728 	int err;
2729 
2730 	blocksize = 1 << inode->i_blkbits;
2731 	length = offset & (blocksize - 1);
2732 
2733 	/* Block boundary? Nothing to do */
2734 	if (!length)
2735 		return 0;
2736 
2737 	length = blocksize - length;
2738 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2739 
2740 	page = grab_cache_page(mapping, index);
2741 	err = -ENOMEM;
2742 	if (!page)
2743 		goto out;
2744 
2745 	if (!page_has_buffers(page))
2746 		create_empty_buffers(page, blocksize, 0);
2747 
2748 	/* Find the buffer that contains "offset" */
2749 	bh = page_buffers(page);
2750 	pos = blocksize;
2751 	while (offset >= pos) {
2752 		bh = bh->b_this_page;
2753 		iblock++;
2754 		pos += blocksize;
2755 	}
2756 
2757 	err = 0;
2758 	if (!buffer_mapped(bh)) {
2759 		WARN_ON(bh->b_size != blocksize);
2760 		err = get_block(inode, iblock, bh, 0);
2761 		if (err)
2762 			goto unlock;
2763 		/* unmapped? It's a hole - nothing to do */
2764 		if (!buffer_mapped(bh))
2765 			goto unlock;
2766 	}
2767 
2768 	/* Ok, it's mapped. Make sure it's up-to-date */
2769 	if (PageUptodate(page))
2770 		set_buffer_uptodate(bh);
2771 
2772 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2773 		err = -EIO;
2774 		ll_rw_block(READ, 1, &bh);
2775 		wait_on_buffer(bh);
2776 		/* Uhhuh. Read error. Complain and punt. */
2777 		if (!buffer_uptodate(bh))
2778 			goto unlock;
2779 	}
2780 
2781 	zero_user(page, offset, length);
2782 	mark_buffer_dirty(bh);
2783 	err = 0;
2784 
2785 unlock:
2786 	unlock_page(page);
2787 	page_cache_release(page);
2788 out:
2789 	return err;
2790 }
2791 EXPORT_SYMBOL(block_truncate_page);
2792 
2793 /*
2794  * The generic ->writepage function for buffer-backed address_spaces
2795  * this form passes in the end_io handler used to finish the IO.
2796  */
2797 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2798 			struct writeback_control *wbc, bh_end_io_t *handler)
2799 {
2800 	struct inode * const inode = page->mapping->host;
2801 	loff_t i_size = i_size_read(inode);
2802 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2803 	unsigned offset;
2804 
2805 	/* Is the page fully inside i_size? */
2806 	if (page->index < end_index)
2807 		return __block_write_full_page(inode, page, get_block, wbc,
2808 					       handler);
2809 
2810 	/* Is the page fully outside i_size? (truncate in progress) */
2811 	offset = i_size & (PAGE_CACHE_SIZE-1);
2812 	if (page->index >= end_index+1 || !offset) {
2813 		/*
2814 		 * The page may have dirty, unmapped buffers.  For example,
2815 		 * they may have been added in ext3_writepage().  Make them
2816 		 * freeable here, so the page does not leak.
2817 		 */
2818 		do_invalidatepage(page, 0);
2819 		unlock_page(page);
2820 		return 0; /* don't care */
2821 	}
2822 
2823 	/*
2824 	 * The page straddles i_size.  It must be zeroed out on each and every
2825 	 * writepage invocation because it may be mmapped.  "A file is mapped
2826 	 * in multiples of the page size.  For a file that is not a multiple of
2827 	 * the  page size, the remaining memory is zeroed when mapped, and
2828 	 * writes to that region are not written out to the file."
2829 	 */
2830 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2831 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2832 }
2833 EXPORT_SYMBOL(block_write_full_page_endio);
2834 
2835 /*
2836  * The generic ->writepage function for buffer-backed address_spaces
2837  */
2838 int block_write_full_page(struct page *page, get_block_t *get_block,
2839 			struct writeback_control *wbc)
2840 {
2841 	return block_write_full_page_endio(page, get_block, wbc,
2842 					   end_buffer_async_write);
2843 }
2844 EXPORT_SYMBOL(block_write_full_page);
2845 
2846 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2847 			    get_block_t *get_block)
2848 {
2849 	struct buffer_head tmp;
2850 	struct inode *inode = mapping->host;
2851 	tmp.b_state = 0;
2852 	tmp.b_blocknr = 0;
2853 	tmp.b_size = 1 << inode->i_blkbits;
2854 	get_block(inode, block, &tmp, 0);
2855 	return tmp.b_blocknr;
2856 }
2857 EXPORT_SYMBOL(generic_block_bmap);
2858 
2859 static void end_bio_bh_io_sync(struct bio *bio, int err)
2860 {
2861 	struct buffer_head *bh = bio->bi_private;
2862 
2863 	if (err == -EOPNOTSUPP) {
2864 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2865 	}
2866 
2867 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2868 		set_bit(BH_Quiet, &bh->b_state);
2869 
2870 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2871 	bio_put(bio);
2872 }
2873 
2874 int submit_bh(int rw, struct buffer_head * bh)
2875 {
2876 	struct bio *bio;
2877 	int ret = 0;
2878 
2879 	BUG_ON(!buffer_locked(bh));
2880 	BUG_ON(!buffer_mapped(bh));
2881 	BUG_ON(!bh->b_end_io);
2882 	BUG_ON(buffer_delay(bh));
2883 	BUG_ON(buffer_unwritten(bh));
2884 
2885 	/*
2886 	 * Only clear out a write error when rewriting
2887 	 */
2888 	if (test_set_buffer_req(bh) && (rw & WRITE))
2889 		clear_buffer_write_io_error(bh);
2890 
2891 	/*
2892 	 * from here on down, it's all bio -- do the initial mapping,
2893 	 * submit_bio -> generic_make_request may further map this bio around
2894 	 */
2895 	bio = bio_alloc(GFP_NOIO, 1);
2896 
2897 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2898 	bio->bi_bdev = bh->b_bdev;
2899 	bio->bi_io_vec[0].bv_page = bh->b_page;
2900 	bio->bi_io_vec[0].bv_len = bh->b_size;
2901 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2902 
2903 	bio->bi_vcnt = 1;
2904 	bio->bi_idx = 0;
2905 	bio->bi_size = bh->b_size;
2906 
2907 	bio->bi_end_io = end_bio_bh_io_sync;
2908 	bio->bi_private = bh;
2909 
2910 	bio_get(bio);
2911 	submit_bio(rw, bio);
2912 
2913 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2914 		ret = -EOPNOTSUPP;
2915 
2916 	bio_put(bio);
2917 	return ret;
2918 }
2919 EXPORT_SYMBOL(submit_bh);
2920 
2921 /**
2922  * ll_rw_block: low-level access to block devices (DEPRECATED)
2923  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2924  * @nr: number of &struct buffer_heads in the array
2925  * @bhs: array of pointers to &struct buffer_head
2926  *
2927  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2928  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2929  * %READA option is described in the documentation for generic_make_request()
2930  * which ll_rw_block() calls.
2931  *
2932  * This function drops any buffer that it cannot get a lock on (with the
2933  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2934  * request, and any buffer that appears to be up-to-date when doing read
2935  * request.  Further it marks as clean buffers that are processed for
2936  * writing (the buffer cache won't assume that they are actually clean
2937  * until the buffer gets unlocked).
2938  *
2939  * ll_rw_block sets b_end_io to simple completion handler that marks
2940  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2941  * any waiters.
2942  *
2943  * All of the buffers must be for the same device, and must also be a
2944  * multiple of the current approved size for the device.
2945  */
2946 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2947 {
2948 	int i;
2949 
2950 	for (i = 0; i < nr; i++) {
2951 		struct buffer_head *bh = bhs[i];
2952 
2953 		if (!trylock_buffer(bh))
2954 			continue;
2955 		if (rw == WRITE) {
2956 			if (test_clear_buffer_dirty(bh)) {
2957 				bh->b_end_io = end_buffer_write_sync;
2958 				get_bh(bh);
2959 				submit_bh(WRITE, bh);
2960 				continue;
2961 			}
2962 		} else {
2963 			if (!buffer_uptodate(bh)) {
2964 				bh->b_end_io = end_buffer_read_sync;
2965 				get_bh(bh);
2966 				submit_bh(rw, bh);
2967 				continue;
2968 			}
2969 		}
2970 		unlock_buffer(bh);
2971 	}
2972 }
2973 EXPORT_SYMBOL(ll_rw_block);
2974 
2975 void write_dirty_buffer(struct buffer_head *bh, int rw)
2976 {
2977 	lock_buffer(bh);
2978 	if (!test_clear_buffer_dirty(bh)) {
2979 		unlock_buffer(bh);
2980 		return;
2981 	}
2982 	bh->b_end_io = end_buffer_write_sync;
2983 	get_bh(bh);
2984 	submit_bh(rw, bh);
2985 }
2986 EXPORT_SYMBOL(write_dirty_buffer);
2987 
2988 /*
2989  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2990  * and then start new I/O and then wait upon it.  The caller must have a ref on
2991  * the buffer_head.
2992  */
2993 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
2994 {
2995 	int ret = 0;
2996 
2997 	WARN_ON(atomic_read(&bh->b_count) < 1);
2998 	lock_buffer(bh);
2999 	if (test_clear_buffer_dirty(bh)) {
3000 		get_bh(bh);
3001 		bh->b_end_io = end_buffer_write_sync;
3002 		ret = submit_bh(rw, bh);
3003 		wait_on_buffer(bh);
3004 		if (!ret && !buffer_uptodate(bh))
3005 			ret = -EIO;
3006 	} else {
3007 		unlock_buffer(bh);
3008 	}
3009 	return ret;
3010 }
3011 EXPORT_SYMBOL(__sync_dirty_buffer);
3012 
3013 int sync_dirty_buffer(struct buffer_head *bh)
3014 {
3015 	return __sync_dirty_buffer(bh, WRITE_SYNC);
3016 }
3017 EXPORT_SYMBOL(sync_dirty_buffer);
3018 
3019 /*
3020  * try_to_free_buffers() checks if all the buffers on this particular page
3021  * are unused, and releases them if so.
3022  *
3023  * Exclusion against try_to_free_buffers may be obtained by either
3024  * locking the page or by holding its mapping's private_lock.
3025  *
3026  * If the page is dirty but all the buffers are clean then we need to
3027  * be sure to mark the page clean as well.  This is because the page
3028  * may be against a block device, and a later reattachment of buffers
3029  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3030  * filesystem data on the same device.
3031  *
3032  * The same applies to regular filesystem pages: if all the buffers are
3033  * clean then we set the page clean and proceed.  To do that, we require
3034  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3035  * private_lock.
3036  *
3037  * try_to_free_buffers() is non-blocking.
3038  */
3039 static inline int buffer_busy(struct buffer_head *bh)
3040 {
3041 	return atomic_read(&bh->b_count) |
3042 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3043 }
3044 
3045 static int
3046 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3047 {
3048 	struct buffer_head *head = page_buffers(page);
3049 	struct buffer_head *bh;
3050 
3051 	bh = head;
3052 	do {
3053 		if (buffer_write_io_error(bh) && page->mapping)
3054 			set_bit(AS_EIO, &page->mapping->flags);
3055 		if (buffer_busy(bh))
3056 			goto failed;
3057 		bh = bh->b_this_page;
3058 	} while (bh != head);
3059 
3060 	do {
3061 		struct buffer_head *next = bh->b_this_page;
3062 
3063 		if (bh->b_assoc_map)
3064 			__remove_assoc_queue(bh);
3065 		bh = next;
3066 	} while (bh != head);
3067 	*buffers_to_free = head;
3068 	__clear_page_buffers(page);
3069 	return 1;
3070 failed:
3071 	return 0;
3072 }
3073 
3074 int try_to_free_buffers(struct page *page)
3075 {
3076 	struct address_space * const mapping = page->mapping;
3077 	struct buffer_head *buffers_to_free = NULL;
3078 	int ret = 0;
3079 
3080 	BUG_ON(!PageLocked(page));
3081 	if (PageWriteback(page))
3082 		return 0;
3083 
3084 	if (mapping == NULL) {		/* can this still happen? */
3085 		ret = drop_buffers(page, &buffers_to_free);
3086 		goto out;
3087 	}
3088 
3089 	spin_lock(&mapping->private_lock);
3090 	ret = drop_buffers(page, &buffers_to_free);
3091 
3092 	/*
3093 	 * If the filesystem writes its buffers by hand (eg ext3)
3094 	 * then we can have clean buffers against a dirty page.  We
3095 	 * clean the page here; otherwise the VM will never notice
3096 	 * that the filesystem did any IO at all.
3097 	 *
3098 	 * Also, during truncate, discard_buffer will have marked all
3099 	 * the page's buffers clean.  We discover that here and clean
3100 	 * the page also.
3101 	 *
3102 	 * private_lock must be held over this entire operation in order
3103 	 * to synchronise against __set_page_dirty_buffers and prevent the
3104 	 * dirty bit from being lost.
3105 	 */
3106 	if (ret)
3107 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3108 	spin_unlock(&mapping->private_lock);
3109 out:
3110 	if (buffers_to_free) {
3111 		struct buffer_head *bh = buffers_to_free;
3112 
3113 		do {
3114 			struct buffer_head *next = bh->b_this_page;
3115 			free_buffer_head(bh);
3116 			bh = next;
3117 		} while (bh != buffers_to_free);
3118 	}
3119 	return ret;
3120 }
3121 EXPORT_SYMBOL(try_to_free_buffers);
3122 
3123 /*
3124  * There are no bdflush tunables left.  But distributions are
3125  * still running obsolete flush daemons, so we terminate them here.
3126  *
3127  * Use of bdflush() is deprecated and will be removed in a future kernel.
3128  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3129  */
3130 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3131 {
3132 	static int msg_count;
3133 
3134 	if (!capable(CAP_SYS_ADMIN))
3135 		return -EPERM;
3136 
3137 	if (msg_count < 5) {
3138 		msg_count++;
3139 		printk(KERN_INFO
3140 			"warning: process `%s' used the obsolete bdflush"
3141 			" system call\n", current->comm);
3142 		printk(KERN_INFO "Fix your initscripts?\n");
3143 	}
3144 
3145 	if (func == 1)
3146 		do_exit(0);
3147 	return 0;
3148 }
3149 
3150 /*
3151  * Buffer-head allocation
3152  */
3153 static struct kmem_cache *bh_cachep;
3154 
3155 /*
3156  * Once the number of bh's in the machine exceeds this level, we start
3157  * stripping them in writeback.
3158  */
3159 static int max_buffer_heads;
3160 
3161 int buffer_heads_over_limit;
3162 
3163 struct bh_accounting {
3164 	int nr;			/* Number of live bh's */
3165 	int ratelimit;		/* Limit cacheline bouncing */
3166 };
3167 
3168 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3169 
3170 static void recalc_bh_state(void)
3171 {
3172 	int i;
3173 	int tot = 0;
3174 
3175 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3176 		return;
3177 	__this_cpu_write(bh_accounting.ratelimit, 0);
3178 	for_each_online_cpu(i)
3179 		tot += per_cpu(bh_accounting, i).nr;
3180 	buffer_heads_over_limit = (tot > max_buffer_heads);
3181 }
3182 
3183 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3184 {
3185 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3186 	if (ret) {
3187 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3188 		preempt_disable();
3189 		__this_cpu_inc(bh_accounting.nr);
3190 		recalc_bh_state();
3191 		preempt_enable();
3192 	}
3193 	return ret;
3194 }
3195 EXPORT_SYMBOL(alloc_buffer_head);
3196 
3197 void free_buffer_head(struct buffer_head *bh)
3198 {
3199 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3200 	kmem_cache_free(bh_cachep, bh);
3201 	preempt_disable();
3202 	__this_cpu_dec(bh_accounting.nr);
3203 	recalc_bh_state();
3204 	preempt_enable();
3205 }
3206 EXPORT_SYMBOL(free_buffer_head);
3207 
3208 static void buffer_exit_cpu(int cpu)
3209 {
3210 	int i;
3211 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3212 
3213 	for (i = 0; i < BH_LRU_SIZE; i++) {
3214 		brelse(b->bhs[i]);
3215 		b->bhs[i] = NULL;
3216 	}
3217 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3218 	per_cpu(bh_accounting, cpu).nr = 0;
3219 }
3220 
3221 static int buffer_cpu_notify(struct notifier_block *self,
3222 			      unsigned long action, void *hcpu)
3223 {
3224 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3225 		buffer_exit_cpu((unsigned long)hcpu);
3226 	return NOTIFY_OK;
3227 }
3228 
3229 /**
3230  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3231  * @bh: struct buffer_head
3232  *
3233  * Return true if the buffer is up-to-date and false,
3234  * with the buffer locked, if not.
3235  */
3236 int bh_uptodate_or_lock(struct buffer_head *bh)
3237 {
3238 	if (!buffer_uptodate(bh)) {
3239 		lock_buffer(bh);
3240 		if (!buffer_uptodate(bh))
3241 			return 0;
3242 		unlock_buffer(bh);
3243 	}
3244 	return 1;
3245 }
3246 EXPORT_SYMBOL(bh_uptodate_or_lock);
3247 
3248 /**
3249  * bh_submit_read - Submit a locked buffer for reading
3250  * @bh: struct buffer_head
3251  *
3252  * Returns zero on success and -EIO on error.
3253  */
3254 int bh_submit_read(struct buffer_head *bh)
3255 {
3256 	BUG_ON(!buffer_locked(bh));
3257 
3258 	if (buffer_uptodate(bh)) {
3259 		unlock_buffer(bh);
3260 		return 0;
3261 	}
3262 
3263 	get_bh(bh);
3264 	bh->b_end_io = end_buffer_read_sync;
3265 	submit_bh(READ, bh);
3266 	wait_on_buffer(bh);
3267 	if (buffer_uptodate(bh))
3268 		return 0;
3269 	return -EIO;
3270 }
3271 EXPORT_SYMBOL(bh_submit_read);
3272 
3273 void __init buffer_init(void)
3274 {
3275 	int nrpages;
3276 
3277 	bh_cachep = kmem_cache_create("buffer_head",
3278 			sizeof(struct buffer_head), 0,
3279 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3280 				SLAB_MEM_SPREAD),
3281 				NULL);
3282 
3283 	/*
3284 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3285 	 */
3286 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3287 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3288 	hotcpu_notifier(buffer_cpu_notify, 0);
3289 }
3290