1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * page.c - buffer/page management specific to NILFS 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi and Seiji Kihara. 8 */ 9 10 #include <linux/pagemap.h> 11 #include <linux/writeback.h> 12 #include <linux/swap.h> 13 #include <linux/bitops.h> 14 #include <linux/page-flags.h> 15 #include <linux/list.h> 16 #include <linux/highmem.h> 17 #include <linux/pagevec.h> 18 #include <linux/gfp.h> 19 #include "nilfs.h" 20 #include "page.h" 21 #include "mdt.h" 22 23 24 #define NILFS_BUFFER_INHERENT_BITS \ 25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \ 26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked)) 27 28 static struct buffer_head * 29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, 30 int blkbits, unsigned long b_state) 31 32 { 33 unsigned long first_block; 34 struct buffer_head *bh; 35 36 if (!page_has_buffers(page)) 37 create_empty_buffers(page, 1 << blkbits, b_state); 38 39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); 40 bh = nilfs_page_get_nth_block(page, block - first_block); 41 42 touch_buffer(bh); 43 wait_on_buffer(bh); 44 return bh; 45 } 46 47 struct buffer_head *nilfs_grab_buffer(struct inode *inode, 48 struct address_space *mapping, 49 unsigned long blkoff, 50 unsigned long b_state) 51 { 52 int blkbits = inode->i_blkbits; 53 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); 54 struct page *page; 55 struct buffer_head *bh; 56 57 page = grab_cache_page(mapping, index); 58 if (unlikely(!page)) 59 return NULL; 60 61 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); 62 if (unlikely(!bh)) { 63 unlock_page(page); 64 put_page(page); 65 return NULL; 66 } 67 return bh; 68 } 69 70 /** 71 * nilfs_forget_buffer - discard dirty state 72 * @inode: owner inode of the buffer 73 * @bh: buffer head of the buffer to be discarded 74 */ 75 void nilfs_forget_buffer(struct buffer_head *bh) 76 { 77 struct page *page = bh->b_page; 78 const unsigned long clear_bits = 79 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | 80 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | 81 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); 82 83 lock_buffer(bh); 84 set_mask_bits(&bh->b_state, clear_bits, 0); 85 if (nilfs_page_buffers_clean(page)) 86 __nilfs_clear_page_dirty(page); 87 88 bh->b_blocknr = -1; 89 ClearPageUptodate(page); 90 ClearPageMappedToDisk(page); 91 unlock_buffer(bh); 92 brelse(bh); 93 } 94 95 /** 96 * nilfs_copy_buffer -- copy buffer data and flags 97 * @dbh: destination buffer 98 * @sbh: source buffer 99 */ 100 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) 101 { 102 void *kaddr0, *kaddr1; 103 unsigned long bits; 104 struct page *spage = sbh->b_page, *dpage = dbh->b_page; 105 struct buffer_head *bh; 106 107 kaddr0 = kmap_atomic(spage); 108 kaddr1 = kmap_atomic(dpage); 109 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); 110 kunmap_atomic(kaddr1); 111 kunmap_atomic(kaddr0); 112 113 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; 114 dbh->b_blocknr = sbh->b_blocknr; 115 dbh->b_bdev = sbh->b_bdev; 116 117 bh = dbh; 118 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped)); 119 while ((bh = bh->b_this_page) != dbh) { 120 lock_buffer(bh); 121 bits &= bh->b_state; 122 unlock_buffer(bh); 123 } 124 if (bits & BIT(BH_Uptodate)) 125 SetPageUptodate(dpage); 126 else 127 ClearPageUptodate(dpage); 128 if (bits & BIT(BH_Mapped)) 129 SetPageMappedToDisk(dpage); 130 else 131 ClearPageMappedToDisk(dpage); 132 } 133 134 /** 135 * nilfs_page_buffers_clean - check if a page has dirty buffers or not. 136 * @page: page to be checked 137 * 138 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. 139 * Otherwise, it returns non-zero value. 140 */ 141 int nilfs_page_buffers_clean(struct page *page) 142 { 143 struct buffer_head *bh, *head; 144 145 bh = head = page_buffers(page); 146 do { 147 if (buffer_dirty(bh)) 148 return 0; 149 bh = bh->b_this_page; 150 } while (bh != head); 151 return 1; 152 } 153 154 void nilfs_page_bug(struct page *page) 155 { 156 struct address_space *m; 157 unsigned long ino; 158 159 if (unlikely(!page)) { 160 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); 161 return; 162 } 163 164 m = page->mapping; 165 ino = m ? m->host->i_ino : 0; 166 167 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " 168 "mapping=%p ino=%lu\n", 169 page, page_ref_count(page), 170 (unsigned long long)page->index, page->flags, m, ino); 171 172 if (page_has_buffers(page)) { 173 struct buffer_head *bh, *head; 174 int i = 0; 175 176 bh = head = page_buffers(page); 177 do { 178 printk(KERN_CRIT 179 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", 180 i++, bh, atomic_read(&bh->b_count), 181 (unsigned long long)bh->b_blocknr, bh->b_state); 182 bh = bh->b_this_page; 183 } while (bh != head); 184 } 185 } 186 187 /** 188 * nilfs_copy_page -- copy the page with buffers 189 * @dst: destination page 190 * @src: source page 191 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. 192 * 193 * This function is for both data pages and btnode pages. The dirty flag 194 * should be treated by caller. The page must not be under i/o. 195 * Both src and dst page must be locked 196 */ 197 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) 198 { 199 struct buffer_head *dbh, *dbufs, *sbh, *sbufs; 200 unsigned long mask = NILFS_BUFFER_INHERENT_BITS; 201 202 BUG_ON(PageWriteback(dst)); 203 204 sbh = sbufs = page_buffers(src); 205 if (!page_has_buffers(dst)) 206 create_empty_buffers(dst, sbh->b_size, 0); 207 208 if (copy_dirty) 209 mask |= BIT(BH_Dirty); 210 211 dbh = dbufs = page_buffers(dst); 212 do { 213 lock_buffer(sbh); 214 lock_buffer(dbh); 215 dbh->b_state = sbh->b_state & mask; 216 dbh->b_blocknr = sbh->b_blocknr; 217 dbh->b_bdev = sbh->b_bdev; 218 sbh = sbh->b_this_page; 219 dbh = dbh->b_this_page; 220 } while (dbh != dbufs); 221 222 copy_highpage(dst, src); 223 224 if (PageUptodate(src) && !PageUptodate(dst)) 225 SetPageUptodate(dst); 226 else if (!PageUptodate(src) && PageUptodate(dst)) 227 ClearPageUptodate(dst); 228 if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) 229 SetPageMappedToDisk(dst); 230 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) 231 ClearPageMappedToDisk(dst); 232 233 do { 234 unlock_buffer(sbh); 235 unlock_buffer(dbh); 236 sbh = sbh->b_this_page; 237 dbh = dbh->b_this_page; 238 } while (dbh != dbufs); 239 } 240 241 int nilfs_copy_dirty_pages(struct address_space *dmap, 242 struct address_space *smap) 243 { 244 struct pagevec pvec; 245 unsigned int i; 246 pgoff_t index = 0; 247 int err = 0; 248 249 pagevec_init(&pvec); 250 repeat: 251 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) 252 return 0; 253 254 for (i = 0; i < pagevec_count(&pvec); i++) { 255 struct page *page = pvec.pages[i], *dpage; 256 257 lock_page(page); 258 if (unlikely(!PageDirty(page))) 259 NILFS_PAGE_BUG(page, "inconsistent dirty state"); 260 261 dpage = grab_cache_page(dmap, page->index); 262 if (unlikely(!dpage)) { 263 /* No empty page is added to the page cache */ 264 err = -ENOMEM; 265 unlock_page(page); 266 break; 267 } 268 if (unlikely(!page_has_buffers(page))) 269 NILFS_PAGE_BUG(page, 270 "found empty page in dat page cache"); 271 272 nilfs_copy_page(dpage, page, 1); 273 __set_page_dirty_nobuffers(dpage); 274 275 unlock_page(dpage); 276 put_page(dpage); 277 unlock_page(page); 278 } 279 pagevec_release(&pvec); 280 cond_resched(); 281 282 if (likely(!err)) 283 goto repeat; 284 return err; 285 } 286 287 /** 288 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache 289 * @dmap: destination page cache 290 * @smap: source page cache 291 * 292 * No pages must no be added to the cache during this process. 293 * This must be ensured by the caller. 294 */ 295 void nilfs_copy_back_pages(struct address_space *dmap, 296 struct address_space *smap) 297 { 298 struct pagevec pvec; 299 unsigned int i, n; 300 pgoff_t index = 0; 301 int err; 302 303 pagevec_init(&pvec); 304 repeat: 305 n = pagevec_lookup(&pvec, smap, &index); 306 if (!n) 307 return; 308 309 for (i = 0; i < pagevec_count(&pvec); i++) { 310 struct page *page = pvec.pages[i], *dpage; 311 pgoff_t offset = page->index; 312 313 lock_page(page); 314 dpage = find_lock_page(dmap, offset); 315 if (dpage) { 316 /* override existing page on the destination cache */ 317 WARN_ON(PageDirty(dpage)); 318 nilfs_copy_page(dpage, page, 0); 319 unlock_page(dpage); 320 put_page(dpage); 321 } else { 322 struct page *page2; 323 324 /* move the page to the destination cache */ 325 xa_lock_irq(&smap->i_pages); 326 page2 = radix_tree_delete(&smap->i_pages, offset); 327 WARN_ON(page2 != page); 328 329 smap->nrpages--; 330 xa_unlock_irq(&smap->i_pages); 331 332 xa_lock_irq(&dmap->i_pages); 333 err = radix_tree_insert(&dmap->i_pages, offset, page); 334 if (unlikely(err < 0)) { 335 WARN_ON(err == -EEXIST); 336 page->mapping = NULL; 337 put_page(page); /* for cache */ 338 } else { 339 page->mapping = dmap; 340 dmap->nrpages++; 341 if (PageDirty(page)) 342 radix_tree_tag_set(&dmap->i_pages, 343 offset, 344 PAGECACHE_TAG_DIRTY); 345 } 346 xa_unlock_irq(&dmap->i_pages); 347 } 348 unlock_page(page); 349 } 350 pagevec_release(&pvec); 351 cond_resched(); 352 353 goto repeat; 354 } 355 356 /** 357 * nilfs_clear_dirty_pages - discard dirty pages in address space 358 * @mapping: address space with dirty pages for discarding 359 * @silent: suppress [true] or print [false] warning messages 360 */ 361 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) 362 { 363 struct pagevec pvec; 364 unsigned int i; 365 pgoff_t index = 0; 366 367 pagevec_init(&pvec); 368 369 while (pagevec_lookup_tag(&pvec, mapping, &index, 370 PAGECACHE_TAG_DIRTY)) { 371 for (i = 0; i < pagevec_count(&pvec); i++) { 372 struct page *page = pvec.pages[i]; 373 374 lock_page(page); 375 nilfs_clear_dirty_page(page, silent); 376 unlock_page(page); 377 } 378 pagevec_release(&pvec); 379 cond_resched(); 380 } 381 } 382 383 /** 384 * nilfs_clear_dirty_page - discard dirty page 385 * @page: dirty page that will be discarded 386 * @silent: suppress [true] or print [false] warning messages 387 */ 388 void nilfs_clear_dirty_page(struct page *page, bool silent) 389 { 390 struct inode *inode = page->mapping->host; 391 struct super_block *sb = inode->i_sb; 392 393 BUG_ON(!PageLocked(page)); 394 395 if (!silent) 396 nilfs_msg(sb, KERN_WARNING, 397 "discard dirty page: offset=%lld, ino=%lu", 398 page_offset(page), inode->i_ino); 399 400 ClearPageUptodate(page); 401 ClearPageMappedToDisk(page); 402 403 if (page_has_buffers(page)) { 404 struct buffer_head *bh, *head; 405 const unsigned long clear_bits = 406 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | 407 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | 408 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); 409 410 bh = head = page_buffers(page); 411 do { 412 lock_buffer(bh); 413 if (!silent) 414 nilfs_msg(sb, KERN_WARNING, 415 "discard dirty block: blocknr=%llu, size=%zu", 416 (u64)bh->b_blocknr, bh->b_size); 417 418 set_mask_bits(&bh->b_state, clear_bits, 0); 419 unlock_buffer(bh); 420 } while (bh = bh->b_this_page, bh != head); 421 } 422 423 __nilfs_clear_page_dirty(page); 424 } 425 426 unsigned int nilfs_page_count_clean_buffers(struct page *page, 427 unsigned int from, unsigned int to) 428 { 429 unsigned int block_start, block_end; 430 struct buffer_head *bh, *head; 431 unsigned int nc = 0; 432 433 for (bh = head = page_buffers(page), block_start = 0; 434 bh != head || !block_start; 435 block_start = block_end, bh = bh->b_this_page) { 436 block_end = block_start + bh->b_size; 437 if (block_end > from && block_start < to && !buffer_dirty(bh)) 438 nc++; 439 } 440 return nc; 441 } 442 443 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) 444 { 445 mapping->host = inode; 446 mapping->flags = 0; 447 mapping_set_gfp_mask(mapping, GFP_NOFS); 448 mapping->private_data = NULL; 449 mapping->a_ops = &empty_aops; 450 } 451 452 /* 453 * NILFS2 needs clear_page_dirty() in the following two cases: 454 * 455 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears 456 * page dirty flags when it copies back pages from the shadow cache 457 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache 458 * (dat->{i_mapping,i_btnode_cache}). 459 * 460 * 2) Some B-tree operations like insertion or deletion may dispose buffers 461 * in dirty state, and this needs to cancel the dirty state of their pages. 462 */ 463 int __nilfs_clear_page_dirty(struct page *page) 464 { 465 struct address_space *mapping = page->mapping; 466 467 if (mapping) { 468 xa_lock_irq(&mapping->i_pages); 469 if (test_bit(PG_dirty, &page->flags)) { 470 radix_tree_tag_clear(&mapping->i_pages, 471 page_index(page), 472 PAGECACHE_TAG_DIRTY); 473 xa_unlock_irq(&mapping->i_pages); 474 return clear_page_dirty_for_io(page); 475 } 476 xa_unlock_irq(&mapping->i_pages); 477 return 0; 478 } 479 return TestClearPageDirty(page); 480 } 481 482 /** 483 * nilfs_find_uncommitted_extent - find extent of uncommitted data 484 * @inode: inode 485 * @start_blk: start block offset (in) 486 * @blkoff: start offset of the found extent (out) 487 * 488 * This function searches an extent of buffers marked "delayed" which 489 * starts from a block offset equal to or larger than @start_blk. If 490 * such an extent was found, this will store the start offset in 491 * @blkoff and return its length in blocks. Otherwise, zero is 492 * returned. 493 */ 494 unsigned long nilfs_find_uncommitted_extent(struct inode *inode, 495 sector_t start_blk, 496 sector_t *blkoff) 497 { 498 unsigned int i; 499 pgoff_t index; 500 unsigned int nblocks_in_page; 501 unsigned long length = 0; 502 sector_t b; 503 struct pagevec pvec; 504 struct page *page; 505 506 if (inode->i_mapping->nrpages == 0) 507 return 0; 508 509 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); 510 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); 511 512 pagevec_init(&pvec); 513 514 repeat: 515 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, 516 pvec.pages); 517 if (pvec.nr == 0) 518 return length; 519 520 if (length > 0 && pvec.pages[0]->index > index) 521 goto out; 522 523 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); 524 i = 0; 525 do { 526 page = pvec.pages[i]; 527 528 lock_page(page); 529 if (page_has_buffers(page)) { 530 struct buffer_head *bh, *head; 531 532 bh = head = page_buffers(page); 533 do { 534 if (b < start_blk) 535 continue; 536 if (buffer_delay(bh)) { 537 if (length == 0) 538 *blkoff = b; 539 length++; 540 } else if (length > 0) { 541 goto out_locked; 542 } 543 } while (++b, bh = bh->b_this_page, bh != head); 544 } else { 545 if (length > 0) 546 goto out_locked; 547 548 b += nblocks_in_page; 549 } 550 unlock_page(page); 551 552 } while (++i < pagevec_count(&pvec)); 553 554 index = page->index + 1; 555 pagevec_release(&pvec); 556 cond_resched(); 557 goto repeat; 558 559 out_locked: 560 unlock_page(page); 561 out: 562 pagevec_release(&pvec); 563 return length; 564 } 565