xref: /openbmc/linux/fs/jfs/jfs_metapage.c (revision 160b8e75)
1 /*
2  *   Copyright (C) International Business Machines Corp., 2000-2005
3  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4  *
5  *   This program is free software;  you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation; either version 2 of the License, or
8  *   (at your option) any later version.
9  *
10  *   This program is distributed in the hope that it will be useful,
11  *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13  *   the GNU General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program;  if not, write to the Free Software
17  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/bio.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/buffer_head.h>
27 #include <linux/mempool.h>
28 #include <linux/seq_file.h>
29 #include "jfs_incore.h"
30 #include "jfs_superblock.h"
31 #include "jfs_filsys.h"
32 #include "jfs_metapage.h"
33 #include "jfs_txnmgr.h"
34 #include "jfs_debug.h"
35 
36 #ifdef CONFIG_JFS_STATISTICS
37 static struct {
38 	uint	pagealloc;	/* # of page allocations */
39 	uint	pagefree;	/* # of page frees */
40 	uint	lockwait;	/* # of sleeping lock_metapage() calls */
41 } mpStat;
42 #endif
43 
44 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
45 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
46 
47 static inline void unlock_metapage(struct metapage *mp)
48 {
49 	clear_bit_unlock(META_locked, &mp->flag);
50 	wake_up(&mp->wait);
51 }
52 
53 static inline void __lock_metapage(struct metapage *mp)
54 {
55 	DECLARE_WAITQUEUE(wait, current);
56 	INCREMENT(mpStat.lockwait);
57 	add_wait_queue_exclusive(&mp->wait, &wait);
58 	do {
59 		set_current_state(TASK_UNINTERRUPTIBLE);
60 		if (metapage_locked(mp)) {
61 			unlock_page(mp->page);
62 			io_schedule();
63 			lock_page(mp->page);
64 		}
65 	} while (trylock_metapage(mp));
66 	__set_current_state(TASK_RUNNING);
67 	remove_wait_queue(&mp->wait, &wait);
68 }
69 
70 /*
71  * Must have mp->page locked
72  */
73 static inline void lock_metapage(struct metapage *mp)
74 {
75 	if (trylock_metapage(mp))
76 		__lock_metapage(mp);
77 }
78 
79 #define METAPOOL_MIN_PAGES 32
80 static struct kmem_cache *metapage_cache;
81 static mempool_t *metapage_mempool;
82 
83 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
84 
85 #if MPS_PER_PAGE > 1
86 
87 struct meta_anchor {
88 	int mp_count;
89 	atomic_t io_count;
90 	struct metapage *mp[MPS_PER_PAGE];
91 };
92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
93 
94 static inline struct metapage *page_to_mp(struct page *page, int offset)
95 {
96 	if (!PagePrivate(page))
97 		return NULL;
98 	return mp_anchor(page)->mp[offset >> L2PSIZE];
99 }
100 
101 static inline int insert_metapage(struct page *page, struct metapage *mp)
102 {
103 	struct meta_anchor *a;
104 	int index;
105 	int l2mp_blocks;	/* log2 blocks per metapage */
106 
107 	if (PagePrivate(page))
108 		a = mp_anchor(page);
109 	else {
110 		a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
111 		if (!a)
112 			return -ENOMEM;
113 		set_page_private(page, (unsigned long)a);
114 		SetPagePrivate(page);
115 		kmap(page);
116 	}
117 
118 	if (mp) {
119 		l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
120 		index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
121 		a->mp_count++;
122 		a->mp[index] = mp;
123 	}
124 
125 	return 0;
126 }
127 
128 static inline void remove_metapage(struct page *page, struct metapage *mp)
129 {
130 	struct meta_anchor *a = mp_anchor(page);
131 	int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
132 	int index;
133 
134 	index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
135 
136 	BUG_ON(a->mp[index] != mp);
137 
138 	a->mp[index] = NULL;
139 	if (--a->mp_count == 0) {
140 		kfree(a);
141 		set_page_private(page, 0);
142 		ClearPagePrivate(page);
143 		kunmap(page);
144 	}
145 }
146 
147 static inline void inc_io(struct page *page)
148 {
149 	atomic_inc(&mp_anchor(page)->io_count);
150 }
151 
152 static inline void dec_io(struct page *page, void (*handler) (struct page *))
153 {
154 	if (atomic_dec_and_test(&mp_anchor(page)->io_count))
155 		handler(page);
156 }
157 
158 #else
159 static inline struct metapage *page_to_mp(struct page *page, int offset)
160 {
161 	return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
162 }
163 
164 static inline int insert_metapage(struct page *page, struct metapage *mp)
165 {
166 	if (mp) {
167 		set_page_private(page, (unsigned long)mp);
168 		SetPagePrivate(page);
169 		kmap(page);
170 	}
171 	return 0;
172 }
173 
174 static inline void remove_metapage(struct page *page, struct metapage *mp)
175 {
176 	set_page_private(page, 0);
177 	ClearPagePrivate(page);
178 	kunmap(page);
179 }
180 
181 #define inc_io(page) do {} while(0)
182 #define dec_io(page, handler) handler(page)
183 
184 #endif
185 
186 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
187 {
188 	struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
189 
190 	if (mp) {
191 		mp->lid = 0;
192 		mp->lsn = 0;
193 		mp->data = NULL;
194 		mp->clsn = 0;
195 		mp->log = NULL;
196 		init_waitqueue_head(&mp->wait);
197 	}
198 	return mp;
199 }
200 
201 static inline void free_metapage(struct metapage *mp)
202 {
203 	mempool_free(mp, metapage_mempool);
204 }
205 
206 int __init metapage_init(void)
207 {
208 	/*
209 	 * Allocate the metapage structures
210 	 */
211 	metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
212 					   0, 0, NULL);
213 	if (metapage_cache == NULL)
214 		return -ENOMEM;
215 
216 	metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
217 						    metapage_cache);
218 
219 	if (metapage_mempool == NULL) {
220 		kmem_cache_destroy(metapage_cache);
221 		return -ENOMEM;
222 	}
223 
224 	return 0;
225 }
226 
227 void metapage_exit(void)
228 {
229 	mempool_destroy(metapage_mempool);
230 	kmem_cache_destroy(metapage_cache);
231 }
232 
233 static inline void drop_metapage(struct page *page, struct metapage *mp)
234 {
235 	if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
236 	    test_bit(META_io, &mp->flag))
237 		return;
238 	remove_metapage(page, mp);
239 	INCREMENT(mpStat.pagefree);
240 	free_metapage(mp);
241 }
242 
243 /*
244  * Metapage address space operations
245  */
246 
247 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
248 				    int *len)
249 {
250 	int rc = 0;
251 	int xflag;
252 	s64 xaddr;
253 	sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
254 			       inode->i_blkbits;
255 
256 	if (lblock >= file_blocks)
257 		return 0;
258 	if (lblock + *len > file_blocks)
259 		*len = file_blocks - lblock;
260 
261 	if (inode->i_ino) {
262 		rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
263 		if ((rc == 0) && *len)
264 			lblock = (sector_t)xaddr;
265 		else
266 			lblock = 0;
267 	} /* else no mapping */
268 
269 	return lblock;
270 }
271 
272 static void last_read_complete(struct page *page)
273 {
274 	if (!PageError(page))
275 		SetPageUptodate(page);
276 	unlock_page(page);
277 }
278 
279 static void metapage_read_end_io(struct bio *bio)
280 {
281 	struct page *page = bio->bi_private;
282 
283 	if (bio->bi_status) {
284 		printk(KERN_ERR "metapage_read_end_io: I/O error\n");
285 		SetPageError(page);
286 	}
287 
288 	dec_io(page, last_read_complete);
289 	bio_put(bio);
290 }
291 
292 static void remove_from_logsync(struct metapage *mp)
293 {
294 	struct jfs_log *log = mp->log;
295 	unsigned long flags;
296 /*
297  * This can race.  Recheck that log hasn't been set to null, and after
298  * acquiring logsync lock, recheck lsn
299  */
300 	if (!log)
301 		return;
302 
303 	LOGSYNC_LOCK(log, flags);
304 	if (mp->lsn) {
305 		mp->log = NULL;
306 		mp->lsn = 0;
307 		mp->clsn = 0;
308 		log->count--;
309 		list_del(&mp->synclist);
310 	}
311 	LOGSYNC_UNLOCK(log, flags);
312 }
313 
314 static void last_write_complete(struct page *page)
315 {
316 	struct metapage *mp;
317 	unsigned int offset;
318 
319 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
320 		mp = page_to_mp(page, offset);
321 		if (mp && test_bit(META_io, &mp->flag)) {
322 			if (mp->lsn)
323 				remove_from_logsync(mp);
324 			clear_bit(META_io, &mp->flag);
325 		}
326 		/*
327 		 * I'd like to call drop_metapage here, but I don't think it's
328 		 * safe unless I have the page locked
329 		 */
330 	}
331 	end_page_writeback(page);
332 }
333 
334 static void metapage_write_end_io(struct bio *bio)
335 {
336 	struct page *page = bio->bi_private;
337 
338 	BUG_ON(!PagePrivate(page));
339 
340 	if (bio->bi_status) {
341 		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
342 		SetPageError(page);
343 	}
344 	dec_io(page, last_write_complete);
345 	bio_put(bio);
346 }
347 
348 static int metapage_writepage(struct page *page, struct writeback_control *wbc)
349 {
350 	struct bio *bio = NULL;
351 	int block_offset;	/* block offset of mp within page */
352 	struct inode *inode = page->mapping->host;
353 	int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
354 	int len;
355 	int xlen;
356 	struct metapage *mp;
357 	int redirty = 0;
358 	sector_t lblock;
359 	int nr_underway = 0;
360 	sector_t pblock;
361 	sector_t next_block = 0;
362 	sector_t page_start;
363 	unsigned long bio_bytes = 0;
364 	unsigned long bio_offset = 0;
365 	int offset;
366 	int bad_blocks = 0;
367 
368 	page_start = (sector_t)page->index <<
369 		     (PAGE_SHIFT - inode->i_blkbits);
370 	BUG_ON(!PageLocked(page));
371 	BUG_ON(PageWriteback(page));
372 	set_page_writeback(page);
373 
374 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
375 		mp = page_to_mp(page, offset);
376 
377 		if (!mp || !test_bit(META_dirty, &mp->flag))
378 			continue;
379 
380 		if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
381 			redirty = 1;
382 			/*
383 			 * Make sure this page isn't blocked indefinitely.
384 			 * If the journal isn't undergoing I/O, push it
385 			 */
386 			if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
387 				jfs_flush_journal(mp->log, 0);
388 			continue;
389 		}
390 
391 		clear_bit(META_dirty, &mp->flag);
392 		set_bit(META_io, &mp->flag);
393 		block_offset = offset >> inode->i_blkbits;
394 		lblock = page_start + block_offset;
395 		if (bio) {
396 			if (xlen && lblock == next_block) {
397 				/* Contiguous, in memory & on disk */
398 				len = min(xlen, blocks_per_mp);
399 				xlen -= len;
400 				bio_bytes += len << inode->i_blkbits;
401 				continue;
402 			}
403 			/* Not contiguous */
404 			if (bio_add_page(bio, page, bio_bytes, bio_offset) <
405 			    bio_bytes)
406 				goto add_failed;
407 			/*
408 			 * Increment counter before submitting i/o to keep
409 			 * count from hitting zero before we're through
410 			 */
411 			inc_io(page);
412 			if (!bio->bi_iter.bi_size)
413 				goto dump_bio;
414 			submit_bio(bio);
415 			nr_underway++;
416 			bio = NULL;
417 		} else
418 			inc_io(page);
419 		xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
420 		pblock = metapage_get_blocks(inode, lblock, &xlen);
421 		if (!pblock) {
422 			printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
423 			/*
424 			 * We already called inc_io(), but can't cancel it
425 			 * with dec_io() until we're done with the page
426 			 */
427 			bad_blocks++;
428 			continue;
429 		}
430 		len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
431 
432 		bio = bio_alloc(GFP_NOFS, 1);
433 		bio_set_dev(bio, inode->i_sb->s_bdev);
434 		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
435 		bio->bi_end_io = metapage_write_end_io;
436 		bio->bi_private = page;
437 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
438 
439 		/* Don't call bio_add_page yet, we may add to this vec */
440 		bio_offset = offset;
441 		bio_bytes = len << inode->i_blkbits;
442 
443 		xlen -= len;
444 		next_block = lblock + len;
445 	}
446 	if (bio) {
447 		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
448 				goto add_failed;
449 		if (!bio->bi_iter.bi_size)
450 			goto dump_bio;
451 
452 		submit_bio(bio);
453 		nr_underway++;
454 	}
455 	if (redirty)
456 		redirty_page_for_writepage(wbc, page);
457 
458 	unlock_page(page);
459 
460 	if (bad_blocks)
461 		goto err_out;
462 
463 	if (nr_underway == 0)
464 		end_page_writeback(page);
465 
466 	return 0;
467 add_failed:
468 	/* We should never reach here, since we're only adding one vec */
469 	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
470 	goto skip;
471 dump_bio:
472 	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
473 		       4, bio, sizeof(*bio), 0);
474 skip:
475 	bio_put(bio);
476 	unlock_page(page);
477 	dec_io(page, last_write_complete);
478 err_out:
479 	while (bad_blocks--)
480 		dec_io(page, last_write_complete);
481 	return -EIO;
482 }
483 
484 static int metapage_readpage(struct file *fp, struct page *page)
485 {
486 	struct inode *inode = page->mapping->host;
487 	struct bio *bio = NULL;
488 	int block_offset;
489 	int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
490 	sector_t page_start;	/* address of page in fs blocks */
491 	sector_t pblock;
492 	int xlen;
493 	unsigned int len;
494 	int offset;
495 
496 	BUG_ON(!PageLocked(page));
497 	page_start = (sector_t)page->index <<
498 		     (PAGE_SHIFT - inode->i_blkbits);
499 
500 	block_offset = 0;
501 	while (block_offset < blocks_per_page) {
502 		xlen = blocks_per_page - block_offset;
503 		pblock = metapage_get_blocks(inode, page_start + block_offset,
504 					     &xlen);
505 		if (pblock) {
506 			if (!PagePrivate(page))
507 				insert_metapage(page, NULL);
508 			inc_io(page);
509 			if (bio)
510 				submit_bio(bio);
511 
512 			bio = bio_alloc(GFP_NOFS, 1);
513 			bio_set_dev(bio, inode->i_sb->s_bdev);
514 			bio->bi_iter.bi_sector =
515 				pblock << (inode->i_blkbits - 9);
516 			bio->bi_end_io = metapage_read_end_io;
517 			bio->bi_private = page;
518 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
519 			len = xlen << inode->i_blkbits;
520 			offset = block_offset << inode->i_blkbits;
521 			if (bio_add_page(bio, page, len, offset) < len)
522 				goto add_failed;
523 			block_offset += xlen;
524 		} else
525 			block_offset++;
526 	}
527 	if (bio)
528 		submit_bio(bio);
529 	else
530 		unlock_page(page);
531 
532 	return 0;
533 
534 add_failed:
535 	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
536 	bio_put(bio);
537 	dec_io(page, last_read_complete);
538 	return -EIO;
539 }
540 
541 static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
542 {
543 	struct metapage *mp;
544 	int ret = 1;
545 	int offset;
546 
547 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
548 		mp = page_to_mp(page, offset);
549 
550 		if (!mp)
551 			continue;
552 
553 		jfs_info("metapage_releasepage: mp = 0x%p", mp);
554 		if (mp->count || mp->nohomeok ||
555 		    test_bit(META_dirty, &mp->flag)) {
556 			jfs_info("count = %ld, nohomeok = %d", mp->count,
557 				 mp->nohomeok);
558 			ret = 0;
559 			continue;
560 		}
561 		if (mp->lsn)
562 			remove_from_logsync(mp);
563 		remove_metapage(page, mp);
564 		INCREMENT(mpStat.pagefree);
565 		free_metapage(mp);
566 	}
567 	return ret;
568 }
569 
570 static void metapage_invalidatepage(struct page *page, unsigned int offset,
571 				    unsigned int length)
572 {
573 	BUG_ON(offset || length < PAGE_SIZE);
574 
575 	BUG_ON(PageWriteback(page));
576 
577 	metapage_releasepage(page, 0);
578 }
579 
580 const struct address_space_operations jfs_metapage_aops = {
581 	.readpage	= metapage_readpage,
582 	.writepage	= metapage_writepage,
583 	.releasepage	= metapage_releasepage,
584 	.invalidatepage	= metapage_invalidatepage,
585 	.set_page_dirty	= __set_page_dirty_nobuffers,
586 };
587 
588 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
589 				unsigned int size, int absolute,
590 				unsigned long new)
591 {
592 	int l2BlocksPerPage;
593 	int l2bsize;
594 	struct address_space *mapping;
595 	struct metapage *mp = NULL;
596 	struct page *page;
597 	unsigned long page_index;
598 	unsigned long page_offset;
599 
600 	jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
601 		 inode->i_ino, lblock, absolute);
602 
603 	l2bsize = inode->i_blkbits;
604 	l2BlocksPerPage = PAGE_SHIFT - l2bsize;
605 	page_index = lblock >> l2BlocksPerPage;
606 	page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
607 	if ((page_offset + size) > PAGE_SIZE) {
608 		jfs_err("MetaData crosses page boundary!!");
609 		jfs_err("lblock = %lx, size  = %d", lblock, size);
610 		dump_stack();
611 		return NULL;
612 	}
613 	if (absolute)
614 		mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
615 	else {
616 		/*
617 		 * If an nfs client tries to read an inode that is larger
618 		 * than any existing inodes, we may try to read past the
619 		 * end of the inode map
620 		 */
621 		if ((lblock << inode->i_blkbits) >= inode->i_size)
622 			return NULL;
623 		mapping = inode->i_mapping;
624 	}
625 
626 	if (new && (PSIZE == PAGE_SIZE)) {
627 		page = grab_cache_page(mapping, page_index);
628 		if (!page) {
629 			jfs_err("grab_cache_page failed!");
630 			return NULL;
631 		}
632 		SetPageUptodate(page);
633 	} else {
634 		page = read_mapping_page(mapping, page_index, NULL);
635 		if (IS_ERR(page) || !PageUptodate(page)) {
636 			jfs_err("read_mapping_page failed!");
637 			return NULL;
638 		}
639 		lock_page(page);
640 	}
641 
642 	mp = page_to_mp(page, page_offset);
643 	if (mp) {
644 		if (mp->logical_size != size) {
645 			jfs_error(inode->i_sb,
646 				  "get_mp->logical_size != size\n");
647 			jfs_err("logical_size = %d, size = %d",
648 				mp->logical_size, size);
649 			dump_stack();
650 			goto unlock;
651 		}
652 		mp->count++;
653 		lock_metapage(mp);
654 		if (test_bit(META_discard, &mp->flag)) {
655 			if (!new) {
656 				jfs_error(inode->i_sb,
657 					  "using a discarded metapage\n");
658 				discard_metapage(mp);
659 				goto unlock;
660 			}
661 			clear_bit(META_discard, &mp->flag);
662 		}
663 	} else {
664 		INCREMENT(mpStat.pagealloc);
665 		mp = alloc_metapage(GFP_NOFS);
666 		if (!mp)
667 			goto unlock;
668 		mp->page = page;
669 		mp->sb = inode->i_sb;
670 		mp->flag = 0;
671 		mp->xflag = COMMIT_PAGE;
672 		mp->count = 1;
673 		mp->nohomeok = 0;
674 		mp->logical_size = size;
675 		mp->data = page_address(page) + page_offset;
676 		mp->index = lblock;
677 		if (unlikely(insert_metapage(page, mp))) {
678 			free_metapage(mp);
679 			goto unlock;
680 		}
681 		lock_metapage(mp);
682 	}
683 
684 	if (new) {
685 		jfs_info("zeroing mp = 0x%p", mp);
686 		memset(mp->data, 0, PSIZE);
687 	}
688 
689 	unlock_page(page);
690 	jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
691 	return mp;
692 
693 unlock:
694 	unlock_page(page);
695 	return NULL;
696 }
697 
698 void grab_metapage(struct metapage * mp)
699 {
700 	jfs_info("grab_metapage: mp = 0x%p", mp);
701 	get_page(mp->page);
702 	lock_page(mp->page);
703 	mp->count++;
704 	lock_metapage(mp);
705 	unlock_page(mp->page);
706 }
707 
708 void force_metapage(struct metapage *mp)
709 {
710 	struct page *page = mp->page;
711 	jfs_info("force_metapage: mp = 0x%p", mp);
712 	set_bit(META_forcewrite, &mp->flag);
713 	clear_bit(META_sync, &mp->flag);
714 	get_page(page);
715 	lock_page(page);
716 	set_page_dirty(page);
717 	if (write_one_page(page))
718 		jfs_error(mp->sb, "write_one_page() failed\n");
719 	clear_bit(META_forcewrite, &mp->flag);
720 	put_page(page);
721 }
722 
723 void hold_metapage(struct metapage *mp)
724 {
725 	lock_page(mp->page);
726 }
727 
728 void put_metapage(struct metapage *mp)
729 {
730 	if (mp->count || mp->nohomeok) {
731 		/* Someone else will release this */
732 		unlock_page(mp->page);
733 		return;
734 	}
735 	get_page(mp->page);
736 	mp->count++;
737 	lock_metapage(mp);
738 	unlock_page(mp->page);
739 	release_metapage(mp);
740 }
741 
742 void release_metapage(struct metapage * mp)
743 {
744 	struct page *page = mp->page;
745 	jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
746 
747 	BUG_ON(!page);
748 
749 	lock_page(page);
750 	unlock_metapage(mp);
751 
752 	assert(mp->count);
753 	if (--mp->count || mp->nohomeok) {
754 		unlock_page(page);
755 		put_page(page);
756 		return;
757 	}
758 
759 	if (test_bit(META_dirty, &mp->flag)) {
760 		set_page_dirty(page);
761 		if (test_bit(META_sync, &mp->flag)) {
762 			clear_bit(META_sync, &mp->flag);
763 			if (write_one_page(page))
764 				jfs_error(mp->sb, "write_one_page() failed\n");
765 			lock_page(page); /* write_one_page unlocks the page */
766 		}
767 	} else if (mp->lsn)	/* discard_metapage doesn't remove it */
768 		remove_from_logsync(mp);
769 
770 	/* Try to keep metapages from using up too much memory */
771 	drop_metapage(page, mp);
772 
773 	unlock_page(page);
774 	put_page(page);
775 }
776 
777 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
778 {
779 	sector_t lblock;
780 	int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
781 	int BlocksPerPage = 1 << l2BlocksPerPage;
782 	/* All callers are interested in block device's mapping */
783 	struct address_space *mapping =
784 		JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
785 	struct metapage *mp;
786 	struct page *page;
787 	unsigned int offset;
788 
789 	/*
790 	 * Mark metapages to discard.  They will eventually be
791 	 * released, but should not be written.
792 	 */
793 	for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
794 	     lblock += BlocksPerPage) {
795 		page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
796 		if (!page)
797 			continue;
798 		for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
799 			mp = page_to_mp(page, offset);
800 			if (!mp)
801 				continue;
802 			if (mp->index < addr)
803 				continue;
804 			if (mp->index >= addr + len)
805 				break;
806 
807 			clear_bit(META_dirty, &mp->flag);
808 			set_bit(META_discard, &mp->flag);
809 			if (mp->lsn)
810 				remove_from_logsync(mp);
811 		}
812 		unlock_page(page);
813 		put_page(page);
814 	}
815 }
816 
817 #ifdef CONFIG_JFS_STATISTICS
818 static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
819 {
820 	seq_printf(m,
821 		       "JFS Metapage statistics\n"
822 		       "=======================\n"
823 		       "page allocations = %d\n"
824 		       "page frees = %d\n"
825 		       "lock waits = %d\n",
826 		       mpStat.pagealloc,
827 		       mpStat.pagefree,
828 		       mpStat.lockwait);
829 	return 0;
830 }
831 
832 static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
833 {
834 	return single_open(file, jfs_mpstat_proc_show, NULL);
835 }
836 
837 const struct file_operations jfs_mpstat_proc_fops = {
838 	.open		= jfs_mpstat_proc_open,
839 	.read		= seq_read,
840 	.llseek		= seq_lseek,
841 	.release	= single_release,
842 };
843 #endif
844