1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2005 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/bio.h> 24 #include <linux/init.h> 25 #include <linux/buffer_head.h> 26 #include <linux/mempool.h> 27 #include <linux/seq_file.h> 28 #include "jfs_incore.h" 29 #include "jfs_superblock.h" 30 #include "jfs_filsys.h" 31 #include "jfs_metapage.h" 32 #include "jfs_txnmgr.h" 33 #include "jfs_debug.h" 34 35 #ifdef CONFIG_JFS_STATISTICS 36 static struct { 37 uint pagealloc; /* # of page allocations */ 38 uint pagefree; /* # of page frees */ 39 uint lockwait; /* # of sleeping lock_metapage() calls */ 40 } mpStat; 41 #endif 42 43 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag) 44 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag) 45 46 static inline void unlock_metapage(struct metapage *mp) 47 { 48 clear_bit_unlock(META_locked, &mp->flag); 49 wake_up(&mp->wait); 50 } 51 52 static inline void __lock_metapage(struct metapage *mp) 53 { 54 DECLARE_WAITQUEUE(wait, current); 55 INCREMENT(mpStat.lockwait); 56 add_wait_queue_exclusive(&mp->wait, &wait); 57 do { 58 set_current_state(TASK_UNINTERRUPTIBLE); 59 if (metapage_locked(mp)) { 60 unlock_page(mp->page); 61 io_schedule(); 62 lock_page(mp->page); 63 } 64 } while (trylock_metapage(mp)); 65 __set_current_state(TASK_RUNNING); 66 remove_wait_queue(&mp->wait, &wait); 67 } 68 69 /* 70 * Must have mp->page locked 71 */ 72 static inline void lock_metapage(struct metapage *mp) 73 { 74 if (trylock_metapage(mp)) 75 __lock_metapage(mp); 76 } 77 78 #define METAPOOL_MIN_PAGES 32 79 static struct kmem_cache *metapage_cache; 80 static mempool_t *metapage_mempool; 81 82 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 83 84 #if MPS_PER_PAGE > 1 85 86 struct meta_anchor { 87 int mp_count; 88 atomic_t io_count; 89 struct metapage *mp[MPS_PER_PAGE]; 90 }; 91 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) 92 93 static inline struct metapage *page_to_mp(struct page *page, int offset) 94 { 95 if (!PagePrivate(page)) 96 return NULL; 97 return mp_anchor(page)->mp[offset >> L2PSIZE]; 98 } 99 100 static inline int insert_metapage(struct page *page, struct metapage *mp) 101 { 102 struct meta_anchor *a; 103 int index; 104 int l2mp_blocks; /* log2 blocks per metapage */ 105 106 if (PagePrivate(page)) 107 a = mp_anchor(page); 108 else { 109 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS); 110 if (!a) 111 return -ENOMEM; 112 set_page_private(page, (unsigned long)a); 113 SetPagePrivate(page); 114 kmap(page); 115 } 116 117 if (mp) { 118 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; 119 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); 120 a->mp_count++; 121 a->mp[index] = mp; 122 } 123 124 return 0; 125 } 126 127 static inline void remove_metapage(struct page *page, struct metapage *mp) 128 { 129 struct meta_anchor *a = mp_anchor(page); 130 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; 131 int index; 132 133 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); 134 135 BUG_ON(a->mp[index] != mp); 136 137 a->mp[index] = NULL; 138 if (--a->mp_count == 0) { 139 kfree(a); 140 set_page_private(page, 0); 141 ClearPagePrivate(page); 142 kunmap(page); 143 } 144 } 145 146 static inline void inc_io(struct page *page) 147 { 148 atomic_inc(&mp_anchor(page)->io_count); 149 } 150 151 static inline void dec_io(struct page *page, void (*handler) (struct page *)) 152 { 153 if (atomic_dec_and_test(&mp_anchor(page)->io_count)) 154 handler(page); 155 } 156 157 #else 158 static inline struct metapage *page_to_mp(struct page *page, int offset) 159 { 160 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; 161 } 162 163 static inline int insert_metapage(struct page *page, struct metapage *mp) 164 { 165 if (mp) { 166 set_page_private(page, (unsigned long)mp); 167 SetPagePrivate(page); 168 kmap(page); 169 } 170 return 0; 171 } 172 173 static inline void remove_metapage(struct page *page, struct metapage *mp) 174 { 175 set_page_private(page, 0); 176 ClearPagePrivate(page); 177 kunmap(page); 178 } 179 180 #define inc_io(page) do {} while(0) 181 #define dec_io(page, handler) handler(page) 182 183 #endif 184 185 static void init_once(void *foo) 186 { 187 struct metapage *mp = (struct metapage *)foo; 188 189 mp->lid = 0; 190 mp->lsn = 0; 191 mp->flag = 0; 192 mp->data = NULL; 193 mp->clsn = 0; 194 mp->log = NULL; 195 set_bit(META_free, &mp->flag); 196 init_waitqueue_head(&mp->wait); 197 } 198 199 static inline struct metapage *alloc_metapage(gfp_t gfp_mask) 200 { 201 return mempool_alloc(metapage_mempool, gfp_mask); 202 } 203 204 static inline void free_metapage(struct metapage *mp) 205 { 206 mp->flag = 0; 207 set_bit(META_free, &mp->flag); 208 209 mempool_free(mp, metapage_mempool); 210 } 211 212 int __init metapage_init(void) 213 { 214 /* 215 * Allocate the metapage structures 216 */ 217 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 218 0, 0, init_once); 219 if (metapage_cache == NULL) 220 return -ENOMEM; 221 222 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES, 223 metapage_cache); 224 225 if (metapage_mempool == NULL) { 226 kmem_cache_destroy(metapage_cache); 227 return -ENOMEM; 228 } 229 230 return 0; 231 } 232 233 void metapage_exit(void) 234 { 235 mempool_destroy(metapage_mempool); 236 kmem_cache_destroy(metapage_cache); 237 } 238 239 static inline void drop_metapage(struct page *page, struct metapage *mp) 240 { 241 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) || 242 test_bit(META_io, &mp->flag)) 243 return; 244 remove_metapage(page, mp); 245 INCREMENT(mpStat.pagefree); 246 free_metapage(mp); 247 } 248 249 /* 250 * Metapage address space operations 251 */ 252 253 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock, 254 int *len) 255 { 256 int rc = 0; 257 int xflag; 258 s64 xaddr; 259 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 260 inode->i_blkbits; 261 262 if (lblock >= file_blocks) 263 return 0; 264 if (lblock + *len > file_blocks) 265 *len = file_blocks - lblock; 266 267 if (inode->i_ino) { 268 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0); 269 if ((rc == 0) && *len) 270 lblock = (sector_t)xaddr; 271 else 272 lblock = 0; 273 } /* else no mapping */ 274 275 return lblock; 276 } 277 278 static void last_read_complete(struct page *page) 279 { 280 if (!PageError(page)) 281 SetPageUptodate(page); 282 unlock_page(page); 283 } 284 285 static void metapage_read_end_io(struct bio *bio, int err) 286 { 287 struct page *page = bio->bi_private; 288 289 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 290 printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 291 SetPageError(page); 292 } 293 294 dec_io(page, last_read_complete); 295 bio_put(bio); 296 } 297 298 static void remove_from_logsync(struct metapage *mp) 299 { 300 struct jfs_log *log = mp->log; 301 unsigned long flags; 302 /* 303 * This can race. Recheck that log hasn't been set to null, and after 304 * acquiring logsync lock, recheck lsn 305 */ 306 if (!log) 307 return; 308 309 LOGSYNC_LOCK(log, flags); 310 if (mp->lsn) { 311 mp->log = NULL; 312 mp->lsn = 0; 313 mp->clsn = 0; 314 log->count--; 315 list_del(&mp->synclist); 316 } 317 LOGSYNC_UNLOCK(log, flags); 318 } 319 320 static void last_write_complete(struct page *page) 321 { 322 struct metapage *mp; 323 unsigned int offset; 324 325 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 326 mp = page_to_mp(page, offset); 327 if (mp && test_bit(META_io, &mp->flag)) { 328 if (mp->lsn) 329 remove_from_logsync(mp); 330 clear_bit(META_io, &mp->flag); 331 } 332 /* 333 * I'd like to call drop_metapage here, but I don't think it's 334 * safe unless I have the page locked 335 */ 336 } 337 end_page_writeback(page); 338 } 339 340 static void metapage_write_end_io(struct bio *bio, int err) 341 { 342 struct page *page = bio->bi_private; 343 344 BUG_ON(!PagePrivate(page)); 345 346 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { 347 printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 348 SetPageError(page); 349 } 350 dec_io(page, last_write_complete); 351 bio_put(bio); 352 } 353 354 static int metapage_writepage(struct page *page, struct writeback_control *wbc) 355 { 356 struct bio *bio = NULL; 357 int block_offset; /* block offset of mp within page */ 358 struct inode *inode = page->mapping->host; 359 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage; 360 int len; 361 int xlen; 362 struct metapage *mp; 363 int redirty = 0; 364 sector_t lblock; 365 int nr_underway = 0; 366 sector_t pblock; 367 sector_t next_block = 0; 368 sector_t page_start; 369 unsigned long bio_bytes = 0; 370 unsigned long bio_offset = 0; 371 int offset; 372 int bad_blocks = 0; 373 374 page_start = (sector_t)page->index << 375 (PAGE_CACHE_SHIFT - inode->i_blkbits); 376 BUG_ON(!PageLocked(page)); 377 BUG_ON(PageWriteback(page)); 378 set_page_writeback(page); 379 380 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 381 mp = page_to_mp(page, offset); 382 383 if (!mp || !test_bit(META_dirty, &mp->flag)) 384 continue; 385 386 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) { 387 redirty = 1; 388 /* 389 * Make sure this page isn't blocked indefinitely. 390 * If the journal isn't undergoing I/O, push it 391 */ 392 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT)) 393 jfs_flush_journal(mp->log, 0); 394 continue; 395 } 396 397 clear_bit(META_dirty, &mp->flag); 398 set_bit(META_io, &mp->flag); 399 block_offset = offset >> inode->i_blkbits; 400 lblock = page_start + block_offset; 401 if (bio) { 402 if (xlen && lblock == next_block) { 403 /* Contiguous, in memory & on disk */ 404 len = min(xlen, blocks_per_mp); 405 xlen -= len; 406 bio_bytes += len << inode->i_blkbits; 407 continue; 408 } 409 /* Not contiguous */ 410 if (bio_add_page(bio, page, bio_bytes, bio_offset) < 411 bio_bytes) 412 goto add_failed; 413 /* 414 * Increment counter before submitting i/o to keep 415 * count from hitting zero before we're through 416 */ 417 inc_io(page); 418 if (!bio->bi_size) 419 goto dump_bio; 420 submit_bio(WRITE, bio); 421 nr_underway++; 422 bio = NULL; 423 } else 424 inc_io(page); 425 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; 426 pblock = metapage_get_blocks(inode, lblock, &xlen); 427 if (!pblock) { 428 printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); 429 /* 430 * We already called inc_io(), but can't cancel it 431 * with dec_io() until we're done with the page 432 */ 433 bad_blocks++; 434 continue; 435 } 436 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); 437 438 bio = bio_alloc(GFP_NOFS, 1); 439 bio->bi_bdev = inode->i_sb->s_bdev; 440 bio->bi_sector = pblock << (inode->i_blkbits - 9); 441 bio->bi_end_io = metapage_write_end_io; 442 bio->bi_private = page; 443 444 /* Don't call bio_add_page yet, we may add to this vec */ 445 bio_offset = offset; 446 bio_bytes = len << inode->i_blkbits; 447 448 xlen -= len; 449 next_block = lblock + len; 450 } 451 if (bio) { 452 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) 453 goto add_failed; 454 if (!bio->bi_size) 455 goto dump_bio; 456 457 submit_bio(WRITE, bio); 458 nr_underway++; 459 } 460 if (redirty) 461 redirty_page_for_writepage(wbc, page); 462 463 unlock_page(page); 464 465 if (bad_blocks) 466 goto err_out; 467 468 if (nr_underway == 0) 469 end_page_writeback(page); 470 471 return 0; 472 add_failed: 473 /* We should never reach here, since we're only adding one vec */ 474 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n"); 475 goto skip; 476 dump_bio: 477 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16, 478 4, bio, sizeof(*bio), 0); 479 skip: 480 bio_put(bio); 481 unlock_page(page); 482 dec_io(page, last_write_complete); 483 err_out: 484 while (bad_blocks--) 485 dec_io(page, last_write_complete); 486 return -EIO; 487 } 488 489 static int metapage_readpage(struct file *fp, struct page *page) 490 { 491 struct inode *inode = page->mapping->host; 492 struct bio *bio = NULL; 493 int block_offset; 494 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; 495 sector_t page_start; /* address of page in fs blocks */ 496 sector_t pblock; 497 int xlen; 498 unsigned int len; 499 int offset; 500 501 BUG_ON(!PageLocked(page)); 502 page_start = (sector_t)page->index << 503 (PAGE_CACHE_SHIFT - inode->i_blkbits); 504 505 block_offset = 0; 506 while (block_offset < blocks_per_page) { 507 xlen = blocks_per_page - block_offset; 508 pblock = metapage_get_blocks(inode, page_start + block_offset, 509 &xlen); 510 if (pblock) { 511 if (!PagePrivate(page)) 512 insert_metapage(page, NULL); 513 inc_io(page); 514 if (bio) 515 submit_bio(READ, bio); 516 517 bio = bio_alloc(GFP_NOFS, 1); 518 bio->bi_bdev = inode->i_sb->s_bdev; 519 bio->bi_sector = pblock << (inode->i_blkbits - 9); 520 bio->bi_end_io = metapage_read_end_io; 521 bio->bi_private = page; 522 len = xlen << inode->i_blkbits; 523 offset = block_offset << inode->i_blkbits; 524 if (bio_add_page(bio, page, len, offset) < len) 525 goto add_failed; 526 block_offset += xlen; 527 } else 528 block_offset++; 529 } 530 if (bio) 531 submit_bio(READ, bio); 532 else 533 unlock_page(page); 534 535 return 0; 536 537 add_failed: 538 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n"); 539 bio_put(bio); 540 dec_io(page, last_read_complete); 541 return -EIO; 542 } 543 544 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) 545 { 546 struct metapage *mp; 547 int ret = 1; 548 int offset; 549 550 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 551 mp = page_to_mp(page, offset); 552 553 if (!mp) 554 continue; 555 556 jfs_info("metapage_releasepage: mp = 0x%p", mp); 557 if (mp->count || mp->nohomeok || 558 test_bit(META_dirty, &mp->flag)) { 559 jfs_info("count = %ld, nohomeok = %d", mp->count, 560 mp->nohomeok); 561 ret = 0; 562 continue; 563 } 564 if (mp->lsn) 565 remove_from_logsync(mp); 566 remove_metapage(page, mp); 567 INCREMENT(mpStat.pagefree); 568 free_metapage(mp); 569 } 570 return ret; 571 } 572 573 static void metapage_invalidatepage(struct page *page, unsigned long offset) 574 { 575 BUG_ON(offset); 576 577 BUG_ON(PageWriteback(page)); 578 579 metapage_releasepage(page, 0); 580 } 581 582 const struct address_space_operations jfs_metapage_aops = { 583 .readpage = metapage_readpage, 584 .writepage = metapage_writepage, 585 .sync_page = block_sync_page, 586 .releasepage = metapage_releasepage, 587 .invalidatepage = metapage_invalidatepage, 588 .set_page_dirty = __set_page_dirty_nobuffers, 589 }; 590 591 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, 592 unsigned int size, int absolute, 593 unsigned long new) 594 { 595 int l2BlocksPerPage; 596 int l2bsize; 597 struct address_space *mapping; 598 struct metapage *mp = NULL; 599 struct page *page; 600 unsigned long page_index; 601 unsigned long page_offset; 602 603 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d", 604 inode->i_ino, lblock, absolute); 605 606 l2bsize = inode->i_blkbits; 607 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; 608 page_index = lblock >> l2BlocksPerPage; 609 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; 610 if ((page_offset + size) > PAGE_CACHE_SIZE) { 611 jfs_err("MetaData crosses page boundary!!"); 612 jfs_err("lblock = %lx, size = %d", lblock, size); 613 dump_stack(); 614 return NULL; 615 } 616 if (absolute) 617 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping; 618 else { 619 /* 620 * If an nfs client tries to read an inode that is larger 621 * than any existing inodes, we may try to read past the 622 * end of the inode map 623 */ 624 if ((lblock << inode->i_blkbits) >= inode->i_size) 625 return NULL; 626 mapping = inode->i_mapping; 627 } 628 629 if (new && (PSIZE == PAGE_CACHE_SIZE)) { 630 page = grab_cache_page(mapping, page_index); 631 if (!page) { 632 jfs_err("grab_cache_page failed!"); 633 return NULL; 634 } 635 SetPageUptodate(page); 636 } else { 637 page = read_mapping_page(mapping, page_index, NULL); 638 if (IS_ERR(page) || !PageUptodate(page)) { 639 jfs_err("read_mapping_page failed!"); 640 return NULL; 641 } 642 lock_page(page); 643 } 644 645 mp = page_to_mp(page, page_offset); 646 if (mp) { 647 if (mp->logical_size != size) { 648 jfs_error(inode->i_sb, 649 "__get_metapage: mp->logical_size != size"); 650 jfs_err("logical_size = %d, size = %d", 651 mp->logical_size, size); 652 dump_stack(); 653 goto unlock; 654 } 655 mp->count++; 656 lock_metapage(mp); 657 if (test_bit(META_discard, &mp->flag)) { 658 if (!new) { 659 jfs_error(inode->i_sb, 660 "__get_metapage: using a " 661 "discarded metapage"); 662 discard_metapage(mp); 663 goto unlock; 664 } 665 clear_bit(META_discard, &mp->flag); 666 } 667 } else { 668 INCREMENT(mpStat.pagealloc); 669 mp = alloc_metapage(GFP_NOFS); 670 mp->page = page; 671 mp->flag = 0; 672 mp->xflag = COMMIT_PAGE; 673 mp->count = 1; 674 mp->nohomeok = 0; 675 mp->logical_size = size; 676 mp->data = page_address(page) + page_offset; 677 mp->index = lblock; 678 if (unlikely(insert_metapage(page, mp))) { 679 free_metapage(mp); 680 goto unlock; 681 } 682 lock_metapage(mp); 683 } 684 685 if (new) { 686 jfs_info("zeroing mp = 0x%p", mp); 687 memset(mp->data, 0, PSIZE); 688 } 689 690 unlock_page(page); 691 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data); 692 return mp; 693 694 unlock: 695 unlock_page(page); 696 return NULL; 697 } 698 699 void grab_metapage(struct metapage * mp) 700 { 701 jfs_info("grab_metapage: mp = 0x%p", mp); 702 page_cache_get(mp->page); 703 lock_page(mp->page); 704 mp->count++; 705 lock_metapage(mp); 706 unlock_page(mp->page); 707 } 708 709 void force_metapage(struct metapage *mp) 710 { 711 struct page *page = mp->page; 712 jfs_info("force_metapage: mp = 0x%p", mp); 713 set_bit(META_forcewrite, &mp->flag); 714 clear_bit(META_sync, &mp->flag); 715 page_cache_get(page); 716 lock_page(page); 717 set_page_dirty(page); 718 write_one_page(page, 1); 719 clear_bit(META_forcewrite, &mp->flag); 720 page_cache_release(page); 721 } 722 723 void hold_metapage(struct metapage *mp) 724 { 725 lock_page(mp->page); 726 } 727 728 void put_metapage(struct metapage *mp) 729 { 730 if (mp->count || mp->nohomeok) { 731 /* Someone else will release this */ 732 unlock_page(mp->page); 733 return; 734 } 735 page_cache_get(mp->page); 736 mp->count++; 737 lock_metapage(mp); 738 unlock_page(mp->page); 739 release_metapage(mp); 740 } 741 742 void release_metapage(struct metapage * mp) 743 { 744 struct page *page = mp->page; 745 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag); 746 747 BUG_ON(!page); 748 749 lock_page(page); 750 unlock_metapage(mp); 751 752 assert(mp->count); 753 if (--mp->count || mp->nohomeok) { 754 unlock_page(page); 755 page_cache_release(page); 756 return; 757 } 758 759 if (test_bit(META_dirty, &mp->flag)) { 760 set_page_dirty(page); 761 if (test_bit(META_sync, &mp->flag)) { 762 clear_bit(META_sync, &mp->flag); 763 write_one_page(page, 1); 764 lock_page(page); /* write_one_page unlocks the page */ 765 } 766 } else if (mp->lsn) /* discard_metapage doesn't remove it */ 767 remove_from_logsync(mp); 768 769 /* Try to keep metapages from using up too much memory */ 770 drop_metapage(page, mp); 771 772 unlock_page(page); 773 page_cache_release(page); 774 } 775 776 void __invalidate_metapages(struct inode *ip, s64 addr, int len) 777 { 778 sector_t lblock; 779 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; 780 int BlocksPerPage = 1 << l2BlocksPerPage; 781 /* All callers are interested in block device's mapping */ 782 struct address_space *mapping = 783 JFS_SBI(ip->i_sb)->direct_inode->i_mapping; 784 struct metapage *mp; 785 struct page *page; 786 unsigned int offset; 787 788 /* 789 * Mark metapages to discard. They will eventually be 790 * released, but should not be written. 791 */ 792 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len; 793 lblock += BlocksPerPage) { 794 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); 795 if (!page) 796 continue; 797 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 798 mp = page_to_mp(page, offset); 799 if (!mp) 800 continue; 801 if (mp->index < addr) 802 continue; 803 if (mp->index >= addr + len) 804 break; 805 806 clear_bit(META_dirty, &mp->flag); 807 set_bit(META_discard, &mp->flag); 808 if (mp->lsn) 809 remove_from_logsync(mp); 810 } 811 unlock_page(page); 812 page_cache_release(page); 813 } 814 } 815 816 #ifdef CONFIG_JFS_STATISTICS 817 static int jfs_mpstat_proc_show(struct seq_file *m, void *v) 818 { 819 seq_printf(m, 820 "JFS Metapage statistics\n" 821 "=======================\n" 822 "page allocations = %d\n" 823 "page frees = %d\n" 824 "lock waits = %d\n", 825 mpStat.pagealloc, 826 mpStat.pagefree, 827 mpStat.lockwait); 828 return 0; 829 } 830 831 static int jfs_mpstat_proc_open(struct inode *inode, struct file *file) 832 { 833 return single_open(file, jfs_mpstat_proc_show, NULL); 834 } 835 836 const struct file_operations jfs_mpstat_proc_fops = { 837 .owner = THIS_MODULE, 838 .open = jfs_mpstat_proc_open, 839 .read = seq_read, 840 .llseek = seq_lseek, 841 .release = single_release, 842 }; 843 #endif 844