1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/backing-dev.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "iostat.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 return -EFSCORRUPTED; 40 } 41 return 0; 42 } 43 44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 45 { 46 struct f2fs_nm_info *nm_i = NM_I(sbi); 47 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 48 struct sysinfo val; 49 unsigned long avail_ram; 50 unsigned long mem_size = 0; 51 bool res = false; 52 53 if (!nm_i) 54 return true; 55 56 si_meminfo(&val); 57 58 /* only uses low memory */ 59 avail_ram = val.totalram - val.totalhigh; 60 61 /* 62 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 63 */ 64 if (type == FREE_NIDS) { 65 mem_size = (nm_i->nid_cnt[FREE_NID] * 66 sizeof(struct free_nid)) >> PAGE_SHIFT; 67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 68 } else if (type == NAT_ENTRIES) { 69 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * 70 sizeof(struct nat_entry)) >> PAGE_SHIFT; 71 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 72 if (excess_cached_nats(sbi)) 73 res = false; 74 } else if (type == DIRTY_DENTS) { 75 if (sbi->sb->s_bdi->wb.dirty_exceeded) 76 return false; 77 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 78 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 79 } else if (type == INO_ENTRIES) { 80 int i; 81 82 for (i = 0; i < MAX_INO_ENTRY; i++) 83 mem_size += sbi->im[i].ino_num * 84 sizeof(struct ino_entry); 85 mem_size >>= PAGE_SHIFT; 86 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 87 } else if (type == EXTENT_CACHE) { 88 mem_size = (atomic_read(&sbi->total_ext_tree) * 89 sizeof(struct extent_tree) + 90 atomic_read(&sbi->total_ext_node) * 91 sizeof(struct extent_node)) >> PAGE_SHIFT; 92 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 93 } else if (type == INMEM_PAGES) { 94 /* it allows 20% / total_ram for inmemory pages */ 95 mem_size = get_pages(sbi, F2FS_INMEM_PAGES); 96 res = mem_size < (val.totalram / 5); 97 } else if (type == DISCARD_CACHE) { 98 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * 99 sizeof(struct discard_cmd)) >> PAGE_SHIFT; 100 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); 101 } else if (type == COMPRESS_PAGE) { 102 #ifdef CONFIG_F2FS_FS_COMPRESSION 103 unsigned long free_ram = val.freeram; 104 105 /* 106 * free memory is lower than watermark or cached page count 107 * exceed threshold, deny caching compress page. 108 */ 109 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && 110 (COMPRESS_MAPPING(sbi)->nrpages < 111 free_ram * sbi->compress_percent / 100); 112 #else 113 res = false; 114 #endif 115 } else { 116 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 117 return true; 118 } 119 return res; 120 } 121 122 static void clear_node_page_dirty(struct page *page) 123 { 124 if (PageDirty(page)) { 125 f2fs_clear_page_cache_dirty_tag(page); 126 clear_page_dirty_for_io(page); 127 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 128 } 129 ClearPageUptodate(page); 130 } 131 132 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 133 { 134 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); 135 } 136 137 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 138 { 139 struct page *src_page; 140 struct page *dst_page; 141 pgoff_t dst_off; 142 void *src_addr; 143 void *dst_addr; 144 struct f2fs_nm_info *nm_i = NM_I(sbi); 145 146 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 147 148 /* get current nat block page with lock */ 149 src_page = get_current_nat_page(sbi, nid); 150 if (IS_ERR(src_page)) 151 return src_page; 152 dst_page = f2fs_grab_meta_page(sbi, dst_off); 153 f2fs_bug_on(sbi, PageDirty(src_page)); 154 155 src_addr = page_address(src_page); 156 dst_addr = page_address(dst_page); 157 memcpy(dst_addr, src_addr, PAGE_SIZE); 158 set_page_dirty(dst_page); 159 f2fs_put_page(src_page, 1); 160 161 set_to_next_nat(nm_i, nid); 162 163 return dst_page; 164 } 165 166 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, 167 nid_t nid, bool no_fail) 168 { 169 struct nat_entry *new; 170 171 new = f2fs_kmem_cache_alloc(nat_entry_slab, 172 GFP_F2FS_ZERO, no_fail, sbi); 173 if (new) { 174 nat_set_nid(new, nid); 175 nat_reset_flag(new); 176 } 177 return new; 178 } 179 180 static void __free_nat_entry(struct nat_entry *e) 181 { 182 kmem_cache_free(nat_entry_slab, e); 183 } 184 185 /* must be locked by nat_tree_lock */ 186 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 187 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 188 { 189 if (no_fail) 190 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 191 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 192 return NULL; 193 194 if (raw_ne) 195 node_info_from_raw_nat(&ne->ni, raw_ne); 196 197 spin_lock(&nm_i->nat_list_lock); 198 list_add_tail(&ne->list, &nm_i->nat_entries); 199 spin_unlock(&nm_i->nat_list_lock); 200 201 nm_i->nat_cnt[TOTAL_NAT]++; 202 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 203 return ne; 204 } 205 206 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 207 { 208 struct nat_entry *ne; 209 210 ne = radix_tree_lookup(&nm_i->nat_root, n); 211 212 /* for recent accessed nat entry, move it to tail of lru list */ 213 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 214 spin_lock(&nm_i->nat_list_lock); 215 if (!list_empty(&ne->list)) 216 list_move_tail(&ne->list, &nm_i->nat_entries); 217 spin_unlock(&nm_i->nat_list_lock); 218 } 219 220 return ne; 221 } 222 223 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 224 nid_t start, unsigned int nr, struct nat_entry **ep) 225 { 226 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 227 } 228 229 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 230 { 231 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 232 nm_i->nat_cnt[TOTAL_NAT]--; 233 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 234 __free_nat_entry(e); 235 } 236 237 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 238 struct nat_entry *ne) 239 { 240 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 241 struct nat_entry_set *head; 242 243 head = radix_tree_lookup(&nm_i->nat_set_root, set); 244 if (!head) { 245 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, 246 GFP_NOFS, true, NULL); 247 248 INIT_LIST_HEAD(&head->entry_list); 249 INIT_LIST_HEAD(&head->set_list); 250 head->set = set; 251 head->entry_cnt = 0; 252 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 253 } 254 return head; 255 } 256 257 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 258 struct nat_entry *ne) 259 { 260 struct nat_entry_set *head; 261 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 262 263 if (!new_ne) 264 head = __grab_nat_entry_set(nm_i, ne); 265 266 /* 267 * update entry_cnt in below condition: 268 * 1. update NEW_ADDR to valid block address; 269 * 2. update old block address to new one; 270 */ 271 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 272 !get_nat_flag(ne, IS_DIRTY))) 273 head->entry_cnt++; 274 275 set_nat_flag(ne, IS_PREALLOC, new_ne); 276 277 if (get_nat_flag(ne, IS_DIRTY)) 278 goto refresh_list; 279 280 nm_i->nat_cnt[DIRTY_NAT]++; 281 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 282 set_nat_flag(ne, IS_DIRTY, true); 283 refresh_list: 284 spin_lock(&nm_i->nat_list_lock); 285 if (new_ne) 286 list_del_init(&ne->list); 287 else 288 list_move_tail(&ne->list, &head->entry_list); 289 spin_unlock(&nm_i->nat_list_lock); 290 } 291 292 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 293 struct nat_entry_set *set, struct nat_entry *ne) 294 { 295 spin_lock(&nm_i->nat_list_lock); 296 list_move_tail(&ne->list, &nm_i->nat_entries); 297 spin_unlock(&nm_i->nat_list_lock); 298 299 set_nat_flag(ne, IS_DIRTY, false); 300 set->entry_cnt--; 301 nm_i->nat_cnt[DIRTY_NAT]--; 302 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 303 } 304 305 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 306 nid_t start, unsigned int nr, struct nat_entry_set **ep) 307 { 308 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 309 start, nr); 310 } 311 312 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 313 { 314 return NODE_MAPPING(sbi) == page->mapping && 315 IS_DNODE(page) && is_cold_node(page); 316 } 317 318 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 319 { 320 spin_lock_init(&sbi->fsync_node_lock); 321 INIT_LIST_HEAD(&sbi->fsync_node_list); 322 sbi->fsync_seg_id = 0; 323 sbi->fsync_node_num = 0; 324 } 325 326 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 327 struct page *page) 328 { 329 struct fsync_node_entry *fn; 330 unsigned long flags; 331 unsigned int seq_id; 332 333 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, 334 GFP_NOFS, true, NULL); 335 336 get_page(page); 337 fn->page = page; 338 INIT_LIST_HEAD(&fn->list); 339 340 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 341 list_add_tail(&fn->list, &sbi->fsync_node_list); 342 fn->seq_id = sbi->fsync_seg_id++; 343 seq_id = fn->seq_id; 344 sbi->fsync_node_num++; 345 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 346 347 return seq_id; 348 } 349 350 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 351 { 352 struct fsync_node_entry *fn; 353 unsigned long flags; 354 355 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 356 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 357 if (fn->page == page) { 358 list_del(&fn->list); 359 sbi->fsync_node_num--; 360 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 361 kmem_cache_free(fsync_node_entry_slab, fn); 362 put_page(page); 363 return; 364 } 365 } 366 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 367 f2fs_bug_on(sbi, 1); 368 } 369 370 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 371 { 372 unsigned long flags; 373 374 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 375 sbi->fsync_seg_id = 0; 376 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 377 } 378 379 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 380 { 381 struct f2fs_nm_info *nm_i = NM_I(sbi); 382 struct nat_entry *e; 383 bool need = false; 384 385 down_read(&nm_i->nat_tree_lock); 386 e = __lookup_nat_cache(nm_i, nid); 387 if (e) { 388 if (!get_nat_flag(e, IS_CHECKPOINTED) && 389 !get_nat_flag(e, HAS_FSYNCED_INODE)) 390 need = true; 391 } 392 up_read(&nm_i->nat_tree_lock); 393 return need; 394 } 395 396 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 397 { 398 struct f2fs_nm_info *nm_i = NM_I(sbi); 399 struct nat_entry *e; 400 bool is_cp = true; 401 402 down_read(&nm_i->nat_tree_lock); 403 e = __lookup_nat_cache(nm_i, nid); 404 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 405 is_cp = false; 406 up_read(&nm_i->nat_tree_lock); 407 return is_cp; 408 } 409 410 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 411 { 412 struct f2fs_nm_info *nm_i = NM_I(sbi); 413 struct nat_entry *e; 414 bool need_update = true; 415 416 down_read(&nm_i->nat_tree_lock); 417 e = __lookup_nat_cache(nm_i, ino); 418 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 419 (get_nat_flag(e, IS_CHECKPOINTED) || 420 get_nat_flag(e, HAS_FSYNCED_INODE))) 421 need_update = false; 422 up_read(&nm_i->nat_tree_lock); 423 return need_update; 424 } 425 426 /* must be locked by nat_tree_lock */ 427 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 428 struct f2fs_nat_entry *ne) 429 { 430 struct f2fs_nm_info *nm_i = NM_I(sbi); 431 struct nat_entry *new, *e; 432 433 new = __alloc_nat_entry(sbi, nid, false); 434 if (!new) 435 return; 436 437 down_write(&nm_i->nat_tree_lock); 438 e = __lookup_nat_cache(nm_i, nid); 439 if (!e) 440 e = __init_nat_entry(nm_i, new, ne, false); 441 else 442 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 443 nat_get_blkaddr(e) != 444 le32_to_cpu(ne->block_addr) || 445 nat_get_version(e) != ne->version); 446 up_write(&nm_i->nat_tree_lock); 447 if (e != new) 448 __free_nat_entry(new); 449 } 450 451 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 452 block_t new_blkaddr, bool fsync_done) 453 { 454 struct f2fs_nm_info *nm_i = NM_I(sbi); 455 struct nat_entry *e; 456 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); 457 458 down_write(&nm_i->nat_tree_lock); 459 e = __lookup_nat_cache(nm_i, ni->nid); 460 if (!e) { 461 e = __init_nat_entry(nm_i, new, NULL, true); 462 copy_node_info(&e->ni, ni); 463 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 464 } else if (new_blkaddr == NEW_ADDR) { 465 /* 466 * when nid is reallocated, 467 * previous nat entry can be remained in nat cache. 468 * So, reinitialize it with new information. 469 */ 470 copy_node_info(&e->ni, ni); 471 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 472 } 473 /* let's free early to reduce memory consumption */ 474 if (e != new) 475 __free_nat_entry(new); 476 477 /* sanity check */ 478 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 479 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 480 new_blkaddr == NULL_ADDR); 481 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 482 new_blkaddr == NEW_ADDR); 483 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 484 new_blkaddr == NEW_ADDR); 485 486 /* increment version no as node is removed */ 487 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 488 unsigned char version = nat_get_version(e); 489 490 nat_set_version(e, inc_node_version(version)); 491 } 492 493 /* change address */ 494 nat_set_blkaddr(e, new_blkaddr); 495 if (!__is_valid_data_blkaddr(new_blkaddr)) 496 set_nat_flag(e, IS_CHECKPOINTED, false); 497 __set_nat_cache_dirty(nm_i, e); 498 499 /* update fsync_mark if its inode nat entry is still alive */ 500 if (ni->nid != ni->ino) 501 e = __lookup_nat_cache(nm_i, ni->ino); 502 if (e) { 503 if (fsync_done && ni->nid == ni->ino) 504 set_nat_flag(e, HAS_FSYNCED_INODE, true); 505 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 506 } 507 up_write(&nm_i->nat_tree_lock); 508 } 509 510 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 511 { 512 struct f2fs_nm_info *nm_i = NM_I(sbi); 513 int nr = nr_shrink; 514 515 if (!down_write_trylock(&nm_i->nat_tree_lock)) 516 return 0; 517 518 spin_lock(&nm_i->nat_list_lock); 519 while (nr_shrink) { 520 struct nat_entry *ne; 521 522 if (list_empty(&nm_i->nat_entries)) 523 break; 524 525 ne = list_first_entry(&nm_i->nat_entries, 526 struct nat_entry, list); 527 list_del(&ne->list); 528 spin_unlock(&nm_i->nat_list_lock); 529 530 __del_from_nat_cache(nm_i, ne); 531 nr_shrink--; 532 533 spin_lock(&nm_i->nat_list_lock); 534 } 535 spin_unlock(&nm_i->nat_list_lock); 536 537 up_write(&nm_i->nat_tree_lock); 538 return nr - nr_shrink; 539 } 540 541 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 542 struct node_info *ni) 543 { 544 struct f2fs_nm_info *nm_i = NM_I(sbi); 545 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 546 struct f2fs_journal *journal = curseg->journal; 547 nid_t start_nid = START_NID(nid); 548 struct f2fs_nat_block *nat_blk; 549 struct page *page = NULL; 550 struct f2fs_nat_entry ne; 551 struct nat_entry *e; 552 pgoff_t index; 553 block_t blkaddr; 554 int i; 555 556 ni->nid = nid; 557 retry: 558 /* Check nat cache */ 559 down_read(&nm_i->nat_tree_lock); 560 e = __lookup_nat_cache(nm_i, nid); 561 if (e) { 562 ni->ino = nat_get_ino(e); 563 ni->blk_addr = nat_get_blkaddr(e); 564 ni->version = nat_get_version(e); 565 up_read(&nm_i->nat_tree_lock); 566 return 0; 567 } 568 569 /* 570 * Check current segment summary by trying to grab journal_rwsem first. 571 * This sem is on the critical path on the checkpoint requiring the above 572 * nat_tree_lock. Therefore, we should retry, if we failed to grab here 573 * while not bothering checkpoint. 574 */ 575 if (!rwsem_is_locked(&sbi->cp_global_sem)) { 576 down_read(&curseg->journal_rwsem); 577 } else if (!down_read_trylock(&curseg->journal_rwsem)) { 578 up_read(&nm_i->nat_tree_lock); 579 goto retry; 580 } 581 582 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 583 if (i >= 0) { 584 ne = nat_in_journal(journal, i); 585 node_info_from_raw_nat(ni, &ne); 586 } 587 up_read(&curseg->journal_rwsem); 588 if (i >= 0) { 589 up_read(&nm_i->nat_tree_lock); 590 goto cache; 591 } 592 593 /* Fill node_info from nat page */ 594 index = current_nat_addr(sbi, nid); 595 up_read(&nm_i->nat_tree_lock); 596 597 page = f2fs_get_meta_page(sbi, index); 598 if (IS_ERR(page)) 599 return PTR_ERR(page); 600 601 nat_blk = (struct f2fs_nat_block *)page_address(page); 602 ne = nat_blk->entries[nid - start_nid]; 603 node_info_from_raw_nat(ni, &ne); 604 f2fs_put_page(page, 1); 605 cache: 606 blkaddr = le32_to_cpu(ne.block_addr); 607 if (__is_valid_data_blkaddr(blkaddr) && 608 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 609 return -EFAULT; 610 611 /* cache nat entry */ 612 cache_nat_entry(sbi, nid, &ne); 613 return 0; 614 } 615 616 /* 617 * readahead MAX_RA_NODE number of node pages. 618 */ 619 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 620 { 621 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 622 struct blk_plug plug; 623 int i, end; 624 nid_t nid; 625 626 blk_start_plug(&plug); 627 628 /* Then, try readahead for siblings of the desired node */ 629 end = start + n; 630 end = min(end, NIDS_PER_BLOCK); 631 for (i = start; i < end; i++) { 632 nid = get_nid(parent, i, false); 633 f2fs_ra_node_page(sbi, nid); 634 } 635 636 blk_finish_plug(&plug); 637 } 638 639 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 640 { 641 const long direct_index = ADDRS_PER_INODE(dn->inode); 642 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 643 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 644 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 645 int cur_level = dn->cur_level; 646 int max_level = dn->max_level; 647 pgoff_t base = 0; 648 649 if (!dn->max_level) 650 return pgofs + 1; 651 652 while (max_level-- > cur_level) 653 skipped_unit *= NIDS_PER_BLOCK; 654 655 switch (dn->max_level) { 656 case 3: 657 base += 2 * indirect_blks; 658 fallthrough; 659 case 2: 660 base += 2 * direct_blks; 661 fallthrough; 662 case 1: 663 base += direct_index; 664 break; 665 default: 666 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 667 } 668 669 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 670 } 671 672 /* 673 * The maximum depth is four. 674 * Offset[0] will have raw inode offset. 675 */ 676 static int get_node_path(struct inode *inode, long block, 677 int offset[4], unsigned int noffset[4]) 678 { 679 const long direct_index = ADDRS_PER_INODE(inode); 680 const long direct_blks = ADDRS_PER_BLOCK(inode); 681 const long dptrs_per_blk = NIDS_PER_BLOCK; 682 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 683 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 684 int n = 0; 685 int level = 0; 686 687 noffset[0] = 0; 688 689 if (block < direct_index) { 690 offset[n] = block; 691 goto got; 692 } 693 block -= direct_index; 694 if (block < direct_blks) { 695 offset[n++] = NODE_DIR1_BLOCK; 696 noffset[n] = 1; 697 offset[n] = block; 698 level = 1; 699 goto got; 700 } 701 block -= direct_blks; 702 if (block < direct_blks) { 703 offset[n++] = NODE_DIR2_BLOCK; 704 noffset[n] = 2; 705 offset[n] = block; 706 level = 1; 707 goto got; 708 } 709 block -= direct_blks; 710 if (block < indirect_blks) { 711 offset[n++] = NODE_IND1_BLOCK; 712 noffset[n] = 3; 713 offset[n++] = block / direct_blks; 714 noffset[n] = 4 + offset[n - 1]; 715 offset[n] = block % direct_blks; 716 level = 2; 717 goto got; 718 } 719 block -= indirect_blks; 720 if (block < indirect_blks) { 721 offset[n++] = NODE_IND2_BLOCK; 722 noffset[n] = 4 + dptrs_per_blk; 723 offset[n++] = block / direct_blks; 724 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 725 offset[n] = block % direct_blks; 726 level = 2; 727 goto got; 728 } 729 block -= indirect_blks; 730 if (block < dindirect_blks) { 731 offset[n++] = NODE_DIND_BLOCK; 732 noffset[n] = 5 + (dptrs_per_blk * 2); 733 offset[n++] = block / indirect_blks; 734 noffset[n] = 6 + (dptrs_per_blk * 2) + 735 offset[n - 1] * (dptrs_per_blk + 1); 736 offset[n++] = (block / direct_blks) % dptrs_per_blk; 737 noffset[n] = 7 + (dptrs_per_blk * 2) + 738 offset[n - 2] * (dptrs_per_blk + 1) + 739 offset[n - 1]; 740 offset[n] = block % direct_blks; 741 level = 3; 742 goto got; 743 } else { 744 return -E2BIG; 745 } 746 got: 747 return level; 748 } 749 750 /* 751 * Caller should call f2fs_put_dnode(dn). 752 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 753 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 754 */ 755 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 756 { 757 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 758 struct page *npage[4]; 759 struct page *parent = NULL; 760 int offset[4]; 761 unsigned int noffset[4]; 762 nid_t nids[4]; 763 int level, i = 0; 764 int err = 0; 765 766 level = get_node_path(dn->inode, index, offset, noffset); 767 if (level < 0) 768 return level; 769 770 nids[0] = dn->inode->i_ino; 771 npage[0] = dn->inode_page; 772 773 if (!npage[0]) { 774 npage[0] = f2fs_get_node_page(sbi, nids[0]); 775 if (IS_ERR(npage[0])) 776 return PTR_ERR(npage[0]); 777 } 778 779 /* if inline_data is set, should not report any block indices */ 780 if (f2fs_has_inline_data(dn->inode) && index) { 781 err = -ENOENT; 782 f2fs_put_page(npage[0], 1); 783 goto release_out; 784 } 785 786 parent = npage[0]; 787 if (level != 0) 788 nids[1] = get_nid(parent, offset[0], true); 789 dn->inode_page = npage[0]; 790 dn->inode_page_locked = true; 791 792 /* get indirect or direct nodes */ 793 for (i = 1; i <= level; i++) { 794 bool done = false; 795 796 if (!nids[i] && mode == ALLOC_NODE) { 797 /* alloc new node */ 798 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 799 err = -ENOSPC; 800 goto release_pages; 801 } 802 803 dn->nid = nids[i]; 804 npage[i] = f2fs_new_node_page(dn, noffset[i]); 805 if (IS_ERR(npage[i])) { 806 f2fs_alloc_nid_failed(sbi, nids[i]); 807 err = PTR_ERR(npage[i]); 808 goto release_pages; 809 } 810 811 set_nid(parent, offset[i - 1], nids[i], i == 1); 812 f2fs_alloc_nid_done(sbi, nids[i]); 813 done = true; 814 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 815 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 816 if (IS_ERR(npage[i])) { 817 err = PTR_ERR(npage[i]); 818 goto release_pages; 819 } 820 done = true; 821 } 822 if (i == 1) { 823 dn->inode_page_locked = false; 824 unlock_page(parent); 825 } else { 826 f2fs_put_page(parent, 1); 827 } 828 829 if (!done) { 830 npage[i] = f2fs_get_node_page(sbi, nids[i]); 831 if (IS_ERR(npage[i])) { 832 err = PTR_ERR(npage[i]); 833 f2fs_put_page(npage[0], 0); 834 goto release_out; 835 } 836 } 837 if (i < level) { 838 parent = npage[i]; 839 nids[i + 1] = get_nid(parent, offset[i], false); 840 } 841 } 842 dn->nid = nids[level]; 843 dn->ofs_in_node = offset[level]; 844 dn->node_page = npage[level]; 845 dn->data_blkaddr = f2fs_data_blkaddr(dn); 846 847 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && 848 f2fs_sb_has_readonly(sbi)) { 849 unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn); 850 block_t blkaddr; 851 852 if (!c_len) 853 goto out; 854 855 blkaddr = f2fs_data_blkaddr(dn); 856 if (blkaddr == COMPRESS_ADDR) 857 blkaddr = data_blkaddr(dn->inode, dn->node_page, 858 dn->ofs_in_node + 1); 859 860 f2fs_update_extent_tree_range_compressed(dn->inode, 861 index, blkaddr, 862 F2FS_I(dn->inode)->i_cluster_size, 863 c_len); 864 } 865 out: 866 return 0; 867 868 release_pages: 869 f2fs_put_page(parent, 1); 870 if (i > 1) 871 f2fs_put_page(npage[0], 0); 872 release_out: 873 dn->inode_page = NULL; 874 dn->node_page = NULL; 875 if (err == -ENOENT) { 876 dn->cur_level = i; 877 dn->max_level = level; 878 dn->ofs_in_node = offset[level]; 879 } 880 return err; 881 } 882 883 static int truncate_node(struct dnode_of_data *dn) 884 { 885 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 886 struct node_info ni; 887 int err; 888 pgoff_t index; 889 890 err = f2fs_get_node_info(sbi, dn->nid, &ni); 891 if (err) 892 return err; 893 894 /* Deallocate node address */ 895 f2fs_invalidate_blocks(sbi, ni.blk_addr); 896 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 897 set_node_addr(sbi, &ni, NULL_ADDR, false); 898 899 if (dn->nid == dn->inode->i_ino) { 900 f2fs_remove_orphan_inode(sbi, dn->nid); 901 dec_valid_inode_count(sbi); 902 f2fs_inode_synced(dn->inode); 903 } 904 905 clear_node_page_dirty(dn->node_page); 906 set_sbi_flag(sbi, SBI_IS_DIRTY); 907 908 index = dn->node_page->index; 909 f2fs_put_page(dn->node_page, 1); 910 911 invalidate_mapping_pages(NODE_MAPPING(sbi), 912 index, index); 913 914 dn->node_page = NULL; 915 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 916 917 return 0; 918 } 919 920 static int truncate_dnode(struct dnode_of_data *dn) 921 { 922 struct page *page; 923 int err; 924 925 if (dn->nid == 0) 926 return 1; 927 928 /* get direct node */ 929 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 930 if (PTR_ERR(page) == -ENOENT) 931 return 1; 932 else if (IS_ERR(page)) 933 return PTR_ERR(page); 934 935 /* Make dnode_of_data for parameter */ 936 dn->node_page = page; 937 dn->ofs_in_node = 0; 938 f2fs_truncate_data_blocks(dn); 939 err = truncate_node(dn); 940 if (err) 941 return err; 942 943 return 1; 944 } 945 946 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 947 int ofs, int depth) 948 { 949 struct dnode_of_data rdn = *dn; 950 struct page *page; 951 struct f2fs_node *rn; 952 nid_t child_nid; 953 unsigned int child_nofs; 954 int freed = 0; 955 int i, ret; 956 957 if (dn->nid == 0) 958 return NIDS_PER_BLOCK + 1; 959 960 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 961 962 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 963 if (IS_ERR(page)) { 964 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 965 return PTR_ERR(page); 966 } 967 968 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 969 970 rn = F2FS_NODE(page); 971 if (depth < 3) { 972 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 973 child_nid = le32_to_cpu(rn->in.nid[i]); 974 if (child_nid == 0) 975 continue; 976 rdn.nid = child_nid; 977 ret = truncate_dnode(&rdn); 978 if (ret < 0) 979 goto out_err; 980 if (set_nid(page, i, 0, false)) 981 dn->node_changed = true; 982 } 983 } else { 984 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 985 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 986 child_nid = le32_to_cpu(rn->in.nid[i]); 987 if (child_nid == 0) { 988 child_nofs += NIDS_PER_BLOCK + 1; 989 continue; 990 } 991 rdn.nid = child_nid; 992 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 993 if (ret == (NIDS_PER_BLOCK + 1)) { 994 if (set_nid(page, i, 0, false)) 995 dn->node_changed = true; 996 child_nofs += ret; 997 } else if (ret < 0 && ret != -ENOENT) { 998 goto out_err; 999 } 1000 } 1001 freed = child_nofs; 1002 } 1003 1004 if (!ofs) { 1005 /* remove current indirect node */ 1006 dn->node_page = page; 1007 ret = truncate_node(dn); 1008 if (ret) 1009 goto out_err; 1010 freed++; 1011 } else { 1012 f2fs_put_page(page, 1); 1013 } 1014 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 1015 return freed; 1016 1017 out_err: 1018 f2fs_put_page(page, 1); 1019 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 1020 return ret; 1021 } 1022 1023 static int truncate_partial_nodes(struct dnode_of_data *dn, 1024 struct f2fs_inode *ri, int *offset, int depth) 1025 { 1026 struct page *pages[2]; 1027 nid_t nid[3]; 1028 nid_t child_nid; 1029 int err = 0; 1030 int i; 1031 int idx = depth - 2; 1032 1033 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1034 if (!nid[0]) 1035 return 0; 1036 1037 /* get indirect nodes in the path */ 1038 for (i = 0; i < idx + 1; i++) { 1039 /* reference count'll be increased */ 1040 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 1041 if (IS_ERR(pages[i])) { 1042 err = PTR_ERR(pages[i]); 1043 idx = i - 1; 1044 goto fail; 1045 } 1046 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 1047 } 1048 1049 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 1050 1051 /* free direct nodes linked to a partial indirect node */ 1052 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 1053 child_nid = get_nid(pages[idx], i, false); 1054 if (!child_nid) 1055 continue; 1056 dn->nid = child_nid; 1057 err = truncate_dnode(dn); 1058 if (err < 0) 1059 goto fail; 1060 if (set_nid(pages[idx], i, 0, false)) 1061 dn->node_changed = true; 1062 } 1063 1064 if (offset[idx + 1] == 0) { 1065 dn->node_page = pages[idx]; 1066 dn->nid = nid[idx]; 1067 err = truncate_node(dn); 1068 if (err) 1069 goto fail; 1070 } else { 1071 f2fs_put_page(pages[idx], 1); 1072 } 1073 offset[idx]++; 1074 offset[idx + 1] = 0; 1075 idx--; 1076 fail: 1077 for (i = idx; i >= 0; i--) 1078 f2fs_put_page(pages[i], 1); 1079 1080 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1081 1082 return err; 1083 } 1084 1085 /* 1086 * All the block addresses of data and nodes should be nullified. 1087 */ 1088 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1089 { 1090 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1091 int err = 0, cont = 1; 1092 int level, offset[4], noffset[4]; 1093 unsigned int nofs = 0; 1094 struct f2fs_inode *ri; 1095 struct dnode_of_data dn; 1096 struct page *page; 1097 1098 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1099 1100 level = get_node_path(inode, from, offset, noffset); 1101 if (level < 0) { 1102 trace_f2fs_truncate_inode_blocks_exit(inode, level); 1103 return level; 1104 } 1105 1106 page = f2fs_get_node_page(sbi, inode->i_ino); 1107 if (IS_ERR(page)) { 1108 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1109 return PTR_ERR(page); 1110 } 1111 1112 set_new_dnode(&dn, inode, page, NULL, 0); 1113 unlock_page(page); 1114 1115 ri = F2FS_INODE(page); 1116 switch (level) { 1117 case 0: 1118 case 1: 1119 nofs = noffset[1]; 1120 break; 1121 case 2: 1122 nofs = noffset[1]; 1123 if (!offset[level - 1]) 1124 goto skip_partial; 1125 err = truncate_partial_nodes(&dn, ri, offset, level); 1126 if (err < 0 && err != -ENOENT) 1127 goto fail; 1128 nofs += 1 + NIDS_PER_BLOCK; 1129 break; 1130 case 3: 1131 nofs = 5 + 2 * NIDS_PER_BLOCK; 1132 if (!offset[level - 1]) 1133 goto skip_partial; 1134 err = truncate_partial_nodes(&dn, ri, offset, level); 1135 if (err < 0 && err != -ENOENT) 1136 goto fail; 1137 break; 1138 default: 1139 BUG(); 1140 } 1141 1142 skip_partial: 1143 while (cont) { 1144 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1145 switch (offset[0]) { 1146 case NODE_DIR1_BLOCK: 1147 case NODE_DIR2_BLOCK: 1148 err = truncate_dnode(&dn); 1149 break; 1150 1151 case NODE_IND1_BLOCK: 1152 case NODE_IND2_BLOCK: 1153 err = truncate_nodes(&dn, nofs, offset[1], 2); 1154 break; 1155 1156 case NODE_DIND_BLOCK: 1157 err = truncate_nodes(&dn, nofs, offset[1], 3); 1158 cont = 0; 1159 break; 1160 1161 default: 1162 BUG(); 1163 } 1164 if (err < 0 && err != -ENOENT) 1165 goto fail; 1166 if (offset[1] == 0 && 1167 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 1168 lock_page(page); 1169 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1170 f2fs_wait_on_page_writeback(page, NODE, true, true); 1171 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 1172 set_page_dirty(page); 1173 unlock_page(page); 1174 } 1175 offset[1] = 0; 1176 offset[0]++; 1177 nofs += err; 1178 } 1179 fail: 1180 f2fs_put_page(page, 0); 1181 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1182 return err > 0 ? 0 : err; 1183 } 1184 1185 /* caller must lock inode page */ 1186 int f2fs_truncate_xattr_node(struct inode *inode) 1187 { 1188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1189 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1190 struct dnode_of_data dn; 1191 struct page *npage; 1192 int err; 1193 1194 if (!nid) 1195 return 0; 1196 1197 npage = f2fs_get_node_page(sbi, nid); 1198 if (IS_ERR(npage)) 1199 return PTR_ERR(npage); 1200 1201 set_new_dnode(&dn, inode, NULL, npage, nid); 1202 err = truncate_node(&dn); 1203 if (err) { 1204 f2fs_put_page(npage, 1); 1205 return err; 1206 } 1207 1208 f2fs_i_xnid_write(inode, 0); 1209 1210 return 0; 1211 } 1212 1213 /* 1214 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1215 * f2fs_unlock_op(). 1216 */ 1217 int f2fs_remove_inode_page(struct inode *inode) 1218 { 1219 struct dnode_of_data dn; 1220 int err; 1221 1222 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1223 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1224 if (err) 1225 return err; 1226 1227 err = f2fs_truncate_xattr_node(inode); 1228 if (err) { 1229 f2fs_put_dnode(&dn); 1230 return err; 1231 } 1232 1233 /* remove potential inline_data blocks */ 1234 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1235 S_ISLNK(inode->i_mode)) 1236 f2fs_truncate_data_blocks_range(&dn, 1); 1237 1238 /* 0 is possible, after f2fs_new_inode() has failed */ 1239 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1240 f2fs_put_dnode(&dn); 1241 return -EIO; 1242 } 1243 1244 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1245 f2fs_warn(F2FS_I_SB(inode), 1246 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1247 inode->i_ino, (unsigned long long)inode->i_blocks); 1248 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1249 } 1250 1251 /* will put inode & node pages */ 1252 err = truncate_node(&dn); 1253 if (err) { 1254 f2fs_put_dnode(&dn); 1255 return err; 1256 } 1257 return 0; 1258 } 1259 1260 struct page *f2fs_new_inode_page(struct inode *inode) 1261 { 1262 struct dnode_of_data dn; 1263 1264 /* allocate inode page for new inode */ 1265 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1266 1267 /* caller should f2fs_put_page(page, 1); */ 1268 return f2fs_new_node_page(&dn, 0); 1269 } 1270 1271 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1272 { 1273 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1274 struct node_info new_ni; 1275 struct page *page; 1276 int err; 1277 1278 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1279 return ERR_PTR(-EPERM); 1280 1281 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1282 if (!page) 1283 return ERR_PTR(-ENOMEM); 1284 1285 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1286 goto fail; 1287 1288 #ifdef CONFIG_F2FS_CHECK_FS 1289 err = f2fs_get_node_info(sbi, dn->nid, &new_ni); 1290 if (err) { 1291 dec_valid_node_count(sbi, dn->inode, !ofs); 1292 goto fail; 1293 } 1294 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); 1295 #endif 1296 new_ni.nid = dn->nid; 1297 new_ni.ino = dn->inode->i_ino; 1298 new_ni.blk_addr = NULL_ADDR; 1299 new_ni.flag = 0; 1300 new_ni.version = 0; 1301 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1302 1303 f2fs_wait_on_page_writeback(page, NODE, true, true); 1304 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1305 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1306 if (!PageUptodate(page)) 1307 SetPageUptodate(page); 1308 if (set_page_dirty(page)) 1309 dn->node_changed = true; 1310 1311 if (f2fs_has_xattr_block(ofs)) 1312 f2fs_i_xnid_write(dn->inode, dn->nid); 1313 1314 if (ofs == 0) 1315 inc_valid_inode_count(sbi); 1316 return page; 1317 1318 fail: 1319 clear_node_page_dirty(page); 1320 f2fs_put_page(page, 1); 1321 return ERR_PTR(err); 1322 } 1323 1324 /* 1325 * Caller should do after getting the following values. 1326 * 0: f2fs_put_page(page, 0) 1327 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1328 */ 1329 static int read_node_page(struct page *page, int op_flags) 1330 { 1331 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1332 struct node_info ni; 1333 struct f2fs_io_info fio = { 1334 .sbi = sbi, 1335 .type = NODE, 1336 .op = REQ_OP_READ, 1337 .op_flags = op_flags, 1338 .page = page, 1339 .encrypted_page = NULL, 1340 }; 1341 int err; 1342 1343 if (PageUptodate(page)) { 1344 if (!f2fs_inode_chksum_verify(sbi, page)) { 1345 ClearPageUptodate(page); 1346 return -EFSBADCRC; 1347 } 1348 return LOCKED_PAGE; 1349 } 1350 1351 err = f2fs_get_node_info(sbi, page->index, &ni); 1352 if (err) 1353 return err; 1354 1355 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ 1356 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) || 1357 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { 1358 ClearPageUptodate(page); 1359 return -ENOENT; 1360 } 1361 1362 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1363 1364 err = f2fs_submit_page_bio(&fio); 1365 1366 if (!err) 1367 f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); 1368 1369 return err; 1370 } 1371 1372 /* 1373 * Readahead a node page 1374 */ 1375 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1376 { 1377 struct page *apage; 1378 int err; 1379 1380 if (!nid) 1381 return; 1382 if (f2fs_check_nid_range(sbi, nid)) 1383 return; 1384 1385 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1386 if (apage) 1387 return; 1388 1389 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1390 if (!apage) 1391 return; 1392 1393 err = read_node_page(apage, REQ_RAHEAD); 1394 f2fs_put_page(apage, err ? 1 : 0); 1395 } 1396 1397 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1398 struct page *parent, int start) 1399 { 1400 struct page *page; 1401 int err; 1402 1403 if (!nid) 1404 return ERR_PTR(-ENOENT); 1405 if (f2fs_check_nid_range(sbi, nid)) 1406 return ERR_PTR(-EINVAL); 1407 repeat: 1408 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1409 if (!page) 1410 return ERR_PTR(-ENOMEM); 1411 1412 err = read_node_page(page, 0); 1413 if (err < 0) { 1414 f2fs_put_page(page, 1); 1415 return ERR_PTR(err); 1416 } else if (err == LOCKED_PAGE) { 1417 err = 0; 1418 goto page_hit; 1419 } 1420 1421 if (parent) 1422 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1423 1424 lock_page(page); 1425 1426 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1427 f2fs_put_page(page, 1); 1428 goto repeat; 1429 } 1430 1431 if (unlikely(!PageUptodate(page))) { 1432 err = -EIO; 1433 goto out_err; 1434 } 1435 1436 if (!f2fs_inode_chksum_verify(sbi, page)) { 1437 err = -EFSBADCRC; 1438 goto out_err; 1439 } 1440 page_hit: 1441 if (unlikely(nid != nid_of_node(page))) { 1442 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1443 nid, nid_of_node(page), ino_of_node(page), 1444 ofs_of_node(page), cpver_of_node(page), 1445 next_blkaddr_of_node(page)); 1446 set_sbi_flag(sbi, SBI_NEED_FSCK); 1447 err = -EINVAL; 1448 out_err: 1449 ClearPageUptodate(page); 1450 f2fs_put_page(page, 1); 1451 return ERR_PTR(err); 1452 } 1453 return page; 1454 } 1455 1456 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1457 { 1458 return __get_node_page(sbi, nid, NULL, 0); 1459 } 1460 1461 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1462 { 1463 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1464 nid_t nid = get_nid(parent, start, false); 1465 1466 return __get_node_page(sbi, nid, parent, start); 1467 } 1468 1469 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1470 { 1471 struct inode *inode; 1472 struct page *page; 1473 int ret; 1474 1475 /* should flush inline_data before evict_inode */ 1476 inode = ilookup(sbi->sb, ino); 1477 if (!inode) 1478 return; 1479 1480 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1481 FGP_LOCK|FGP_NOWAIT, 0); 1482 if (!page) 1483 goto iput_out; 1484 1485 if (!PageUptodate(page)) 1486 goto page_out; 1487 1488 if (!PageDirty(page)) 1489 goto page_out; 1490 1491 if (!clear_page_dirty_for_io(page)) 1492 goto page_out; 1493 1494 ret = f2fs_write_inline_data(inode, page); 1495 inode_dec_dirty_pages(inode); 1496 f2fs_remove_dirty_inode(inode); 1497 if (ret) 1498 set_page_dirty(page); 1499 page_out: 1500 f2fs_put_page(page, 1); 1501 iput_out: 1502 iput(inode); 1503 } 1504 1505 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1506 { 1507 pgoff_t index; 1508 struct pagevec pvec; 1509 struct page *last_page = NULL; 1510 int nr_pages; 1511 1512 pagevec_init(&pvec); 1513 index = 0; 1514 1515 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1516 PAGECACHE_TAG_DIRTY))) { 1517 int i; 1518 1519 for (i = 0; i < nr_pages; i++) { 1520 struct page *page = pvec.pages[i]; 1521 1522 if (unlikely(f2fs_cp_error(sbi))) { 1523 f2fs_put_page(last_page, 0); 1524 pagevec_release(&pvec); 1525 return ERR_PTR(-EIO); 1526 } 1527 1528 if (!IS_DNODE(page) || !is_cold_node(page)) 1529 continue; 1530 if (ino_of_node(page) != ino) 1531 continue; 1532 1533 lock_page(page); 1534 1535 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1536 continue_unlock: 1537 unlock_page(page); 1538 continue; 1539 } 1540 if (ino_of_node(page) != ino) 1541 goto continue_unlock; 1542 1543 if (!PageDirty(page)) { 1544 /* someone wrote it for us */ 1545 goto continue_unlock; 1546 } 1547 1548 if (last_page) 1549 f2fs_put_page(last_page, 0); 1550 1551 get_page(page); 1552 last_page = page; 1553 unlock_page(page); 1554 } 1555 pagevec_release(&pvec); 1556 cond_resched(); 1557 } 1558 return last_page; 1559 } 1560 1561 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1562 struct writeback_control *wbc, bool do_balance, 1563 enum iostat_type io_type, unsigned int *seq_id) 1564 { 1565 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1566 nid_t nid; 1567 struct node_info ni; 1568 struct f2fs_io_info fio = { 1569 .sbi = sbi, 1570 .ino = ino_of_node(page), 1571 .type = NODE, 1572 .op = REQ_OP_WRITE, 1573 .op_flags = wbc_to_write_flags(wbc), 1574 .page = page, 1575 .encrypted_page = NULL, 1576 .submitted = false, 1577 .io_type = io_type, 1578 .io_wbc = wbc, 1579 }; 1580 unsigned int seq; 1581 1582 trace_f2fs_writepage(page, NODE); 1583 1584 if (unlikely(f2fs_cp_error(sbi))) { 1585 ClearPageUptodate(page); 1586 dec_page_count(sbi, F2FS_DIRTY_NODES); 1587 unlock_page(page); 1588 return 0; 1589 } 1590 1591 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1592 goto redirty_out; 1593 1594 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1595 wbc->sync_mode == WB_SYNC_NONE && 1596 IS_DNODE(page) && is_cold_node(page)) 1597 goto redirty_out; 1598 1599 /* get old block addr of this node page */ 1600 nid = nid_of_node(page); 1601 f2fs_bug_on(sbi, page->index != nid); 1602 1603 if (f2fs_get_node_info(sbi, nid, &ni)) 1604 goto redirty_out; 1605 1606 if (wbc->for_reclaim) { 1607 if (!down_read_trylock(&sbi->node_write)) 1608 goto redirty_out; 1609 } else { 1610 down_read(&sbi->node_write); 1611 } 1612 1613 /* This page is already truncated */ 1614 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1615 ClearPageUptodate(page); 1616 dec_page_count(sbi, F2FS_DIRTY_NODES); 1617 up_read(&sbi->node_write); 1618 unlock_page(page); 1619 return 0; 1620 } 1621 1622 if (__is_valid_data_blkaddr(ni.blk_addr) && 1623 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1624 DATA_GENERIC_ENHANCE)) { 1625 up_read(&sbi->node_write); 1626 goto redirty_out; 1627 } 1628 1629 if (atomic && !test_opt(sbi, NOBARRIER)) 1630 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1631 1632 /* should add to global list before clearing PAGECACHE status */ 1633 if (f2fs_in_warm_node_list(sbi, page)) { 1634 seq = f2fs_add_fsync_node_entry(sbi, page); 1635 if (seq_id) 1636 *seq_id = seq; 1637 } 1638 1639 set_page_writeback(page); 1640 ClearPageError(page); 1641 1642 fio.old_blkaddr = ni.blk_addr; 1643 f2fs_do_write_node_page(nid, &fio); 1644 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1645 dec_page_count(sbi, F2FS_DIRTY_NODES); 1646 up_read(&sbi->node_write); 1647 1648 if (wbc->for_reclaim) { 1649 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1650 submitted = NULL; 1651 } 1652 1653 unlock_page(page); 1654 1655 if (unlikely(f2fs_cp_error(sbi))) { 1656 f2fs_submit_merged_write(sbi, NODE); 1657 submitted = NULL; 1658 } 1659 if (submitted) 1660 *submitted = fio.submitted; 1661 1662 if (do_balance) 1663 f2fs_balance_fs(sbi, false); 1664 return 0; 1665 1666 redirty_out: 1667 redirty_page_for_writepage(wbc, page); 1668 return AOP_WRITEPAGE_ACTIVATE; 1669 } 1670 1671 int f2fs_move_node_page(struct page *node_page, int gc_type) 1672 { 1673 int err = 0; 1674 1675 if (gc_type == FG_GC) { 1676 struct writeback_control wbc = { 1677 .sync_mode = WB_SYNC_ALL, 1678 .nr_to_write = 1, 1679 .for_reclaim = 0, 1680 }; 1681 1682 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1683 1684 set_page_dirty(node_page); 1685 1686 if (!clear_page_dirty_for_io(node_page)) { 1687 err = -EAGAIN; 1688 goto out_page; 1689 } 1690 1691 if (__write_node_page(node_page, false, NULL, 1692 &wbc, false, FS_GC_NODE_IO, NULL)) { 1693 err = -EAGAIN; 1694 unlock_page(node_page); 1695 } 1696 goto release_page; 1697 } else { 1698 /* set page dirty and write it */ 1699 if (!PageWriteback(node_page)) 1700 set_page_dirty(node_page); 1701 } 1702 out_page: 1703 unlock_page(node_page); 1704 release_page: 1705 f2fs_put_page(node_page, 0); 1706 return err; 1707 } 1708 1709 static int f2fs_write_node_page(struct page *page, 1710 struct writeback_control *wbc) 1711 { 1712 return __write_node_page(page, false, NULL, wbc, false, 1713 FS_NODE_IO, NULL); 1714 } 1715 1716 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1717 struct writeback_control *wbc, bool atomic, 1718 unsigned int *seq_id) 1719 { 1720 pgoff_t index; 1721 struct pagevec pvec; 1722 int ret = 0; 1723 struct page *last_page = NULL; 1724 bool marked = false; 1725 nid_t ino = inode->i_ino; 1726 int nr_pages; 1727 int nwritten = 0; 1728 1729 if (atomic) { 1730 last_page = last_fsync_dnode(sbi, ino); 1731 if (IS_ERR_OR_NULL(last_page)) 1732 return PTR_ERR_OR_ZERO(last_page); 1733 } 1734 retry: 1735 pagevec_init(&pvec); 1736 index = 0; 1737 1738 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1739 PAGECACHE_TAG_DIRTY))) { 1740 int i; 1741 1742 for (i = 0; i < nr_pages; i++) { 1743 struct page *page = pvec.pages[i]; 1744 bool submitted = false; 1745 1746 if (unlikely(f2fs_cp_error(sbi))) { 1747 f2fs_put_page(last_page, 0); 1748 pagevec_release(&pvec); 1749 ret = -EIO; 1750 goto out; 1751 } 1752 1753 if (!IS_DNODE(page) || !is_cold_node(page)) 1754 continue; 1755 if (ino_of_node(page) != ino) 1756 continue; 1757 1758 lock_page(page); 1759 1760 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1761 continue_unlock: 1762 unlock_page(page); 1763 continue; 1764 } 1765 if (ino_of_node(page) != ino) 1766 goto continue_unlock; 1767 1768 if (!PageDirty(page) && page != last_page) { 1769 /* someone wrote it for us */ 1770 goto continue_unlock; 1771 } 1772 1773 f2fs_wait_on_page_writeback(page, NODE, true, true); 1774 1775 set_fsync_mark(page, 0); 1776 set_dentry_mark(page, 0); 1777 1778 if (!atomic || page == last_page) { 1779 set_fsync_mark(page, 1); 1780 if (IS_INODE(page)) { 1781 if (is_inode_flag_set(inode, 1782 FI_DIRTY_INODE)) 1783 f2fs_update_inode(inode, page); 1784 set_dentry_mark(page, 1785 f2fs_need_dentry_mark(sbi, ino)); 1786 } 1787 /* may be written by other thread */ 1788 if (!PageDirty(page)) 1789 set_page_dirty(page); 1790 } 1791 1792 if (!clear_page_dirty_for_io(page)) 1793 goto continue_unlock; 1794 1795 ret = __write_node_page(page, atomic && 1796 page == last_page, 1797 &submitted, wbc, true, 1798 FS_NODE_IO, seq_id); 1799 if (ret) { 1800 unlock_page(page); 1801 f2fs_put_page(last_page, 0); 1802 break; 1803 } else if (submitted) { 1804 nwritten++; 1805 } 1806 1807 if (page == last_page) { 1808 f2fs_put_page(page, 0); 1809 marked = true; 1810 break; 1811 } 1812 } 1813 pagevec_release(&pvec); 1814 cond_resched(); 1815 1816 if (ret || marked) 1817 break; 1818 } 1819 if (!ret && atomic && !marked) { 1820 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1821 ino, last_page->index); 1822 lock_page(last_page); 1823 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1824 set_page_dirty(last_page); 1825 unlock_page(last_page); 1826 goto retry; 1827 } 1828 out: 1829 if (nwritten) 1830 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1831 return ret ? -EIO : 0; 1832 } 1833 1834 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1835 { 1836 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1837 bool clean; 1838 1839 if (inode->i_ino != ino) 1840 return 0; 1841 1842 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1843 return 0; 1844 1845 spin_lock(&sbi->inode_lock[DIRTY_META]); 1846 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1847 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1848 1849 if (clean) 1850 return 0; 1851 1852 inode = igrab(inode); 1853 if (!inode) 1854 return 0; 1855 return 1; 1856 } 1857 1858 static bool flush_dirty_inode(struct page *page) 1859 { 1860 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1861 struct inode *inode; 1862 nid_t ino = ino_of_node(page); 1863 1864 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1865 if (!inode) 1866 return false; 1867 1868 f2fs_update_inode(inode, page); 1869 unlock_page(page); 1870 1871 iput(inode); 1872 return true; 1873 } 1874 1875 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1876 { 1877 pgoff_t index = 0; 1878 struct pagevec pvec; 1879 int nr_pages; 1880 1881 pagevec_init(&pvec); 1882 1883 while ((nr_pages = pagevec_lookup_tag(&pvec, 1884 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1885 int i; 1886 1887 for (i = 0; i < nr_pages; i++) { 1888 struct page *page = pvec.pages[i]; 1889 1890 if (!IS_DNODE(page)) 1891 continue; 1892 1893 lock_page(page); 1894 1895 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1896 continue_unlock: 1897 unlock_page(page); 1898 continue; 1899 } 1900 1901 if (!PageDirty(page)) { 1902 /* someone wrote it for us */ 1903 goto continue_unlock; 1904 } 1905 1906 /* flush inline_data, if it's async context. */ 1907 if (page_private_inline(page)) { 1908 clear_page_private_inline(page); 1909 unlock_page(page); 1910 flush_inline_data(sbi, ino_of_node(page)); 1911 continue; 1912 } 1913 unlock_page(page); 1914 } 1915 pagevec_release(&pvec); 1916 cond_resched(); 1917 } 1918 } 1919 1920 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1921 struct writeback_control *wbc, 1922 bool do_balance, enum iostat_type io_type) 1923 { 1924 pgoff_t index; 1925 struct pagevec pvec; 1926 int step = 0; 1927 int nwritten = 0; 1928 int ret = 0; 1929 int nr_pages, done = 0; 1930 1931 pagevec_init(&pvec); 1932 1933 next_step: 1934 index = 0; 1935 1936 while (!done && (nr_pages = pagevec_lookup_tag(&pvec, 1937 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1938 int i; 1939 1940 for (i = 0; i < nr_pages; i++) { 1941 struct page *page = pvec.pages[i]; 1942 bool submitted = false; 1943 bool may_dirty = true; 1944 1945 /* give a priority to WB_SYNC threads */ 1946 if (atomic_read(&sbi->wb_sync_req[NODE]) && 1947 wbc->sync_mode == WB_SYNC_NONE) { 1948 done = 1; 1949 break; 1950 } 1951 1952 /* 1953 * flushing sequence with step: 1954 * 0. indirect nodes 1955 * 1. dentry dnodes 1956 * 2. file dnodes 1957 */ 1958 if (step == 0 && IS_DNODE(page)) 1959 continue; 1960 if (step == 1 && (!IS_DNODE(page) || 1961 is_cold_node(page))) 1962 continue; 1963 if (step == 2 && (!IS_DNODE(page) || 1964 !is_cold_node(page))) 1965 continue; 1966 lock_node: 1967 if (wbc->sync_mode == WB_SYNC_ALL) 1968 lock_page(page); 1969 else if (!trylock_page(page)) 1970 continue; 1971 1972 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1973 continue_unlock: 1974 unlock_page(page); 1975 continue; 1976 } 1977 1978 if (!PageDirty(page)) { 1979 /* someone wrote it for us */ 1980 goto continue_unlock; 1981 } 1982 1983 /* flush inline_data/inode, if it's async context. */ 1984 if (!do_balance) 1985 goto write_node; 1986 1987 /* flush inline_data */ 1988 if (page_private_inline(page)) { 1989 clear_page_private_inline(page); 1990 unlock_page(page); 1991 flush_inline_data(sbi, ino_of_node(page)); 1992 goto lock_node; 1993 } 1994 1995 /* flush dirty inode */ 1996 if (IS_INODE(page) && may_dirty) { 1997 may_dirty = false; 1998 if (flush_dirty_inode(page)) 1999 goto lock_node; 2000 } 2001 write_node: 2002 f2fs_wait_on_page_writeback(page, NODE, true, true); 2003 2004 if (!clear_page_dirty_for_io(page)) 2005 goto continue_unlock; 2006 2007 set_fsync_mark(page, 0); 2008 set_dentry_mark(page, 0); 2009 2010 ret = __write_node_page(page, false, &submitted, 2011 wbc, do_balance, io_type, NULL); 2012 if (ret) 2013 unlock_page(page); 2014 else if (submitted) 2015 nwritten++; 2016 2017 if (--wbc->nr_to_write == 0) 2018 break; 2019 } 2020 pagevec_release(&pvec); 2021 cond_resched(); 2022 2023 if (wbc->nr_to_write == 0) { 2024 step = 2; 2025 break; 2026 } 2027 } 2028 2029 if (step < 2) { 2030 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 2031 wbc->sync_mode == WB_SYNC_NONE && step == 1) 2032 goto out; 2033 step++; 2034 goto next_step; 2035 } 2036 out: 2037 if (nwritten) 2038 f2fs_submit_merged_write(sbi, NODE); 2039 2040 if (unlikely(f2fs_cp_error(sbi))) 2041 return -EIO; 2042 return ret; 2043 } 2044 2045 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 2046 unsigned int seq_id) 2047 { 2048 struct fsync_node_entry *fn; 2049 struct page *page; 2050 struct list_head *head = &sbi->fsync_node_list; 2051 unsigned long flags; 2052 unsigned int cur_seq_id = 0; 2053 int ret2, ret = 0; 2054 2055 while (seq_id && cur_seq_id < seq_id) { 2056 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 2057 if (list_empty(head)) { 2058 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2059 break; 2060 } 2061 fn = list_first_entry(head, struct fsync_node_entry, list); 2062 if (fn->seq_id > seq_id) { 2063 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2064 break; 2065 } 2066 cur_seq_id = fn->seq_id; 2067 page = fn->page; 2068 get_page(page); 2069 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2070 2071 f2fs_wait_on_page_writeback(page, NODE, true, false); 2072 if (TestClearPageError(page)) 2073 ret = -EIO; 2074 2075 put_page(page); 2076 2077 if (ret) 2078 break; 2079 } 2080 2081 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); 2082 if (!ret) 2083 ret = ret2; 2084 2085 return ret; 2086 } 2087 2088 static int f2fs_write_node_pages(struct address_space *mapping, 2089 struct writeback_control *wbc) 2090 { 2091 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2092 struct blk_plug plug; 2093 long diff; 2094 2095 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2096 goto skip_write; 2097 2098 /* balancing f2fs's metadata in background */ 2099 f2fs_balance_fs_bg(sbi, true); 2100 2101 /* collect a number of dirty node pages and write together */ 2102 if (wbc->sync_mode != WB_SYNC_ALL && 2103 get_pages(sbi, F2FS_DIRTY_NODES) < 2104 nr_pages_to_skip(sbi, NODE)) 2105 goto skip_write; 2106 2107 if (wbc->sync_mode == WB_SYNC_ALL) 2108 atomic_inc(&sbi->wb_sync_req[NODE]); 2109 else if (atomic_read(&sbi->wb_sync_req[NODE])) 2110 goto skip_write; 2111 2112 trace_f2fs_writepages(mapping->host, wbc, NODE); 2113 2114 diff = nr_pages_to_write(sbi, NODE, wbc); 2115 blk_start_plug(&plug); 2116 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2117 blk_finish_plug(&plug); 2118 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2119 2120 if (wbc->sync_mode == WB_SYNC_ALL) 2121 atomic_dec(&sbi->wb_sync_req[NODE]); 2122 return 0; 2123 2124 skip_write: 2125 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2126 trace_f2fs_writepages(mapping->host, wbc, NODE); 2127 return 0; 2128 } 2129 2130 static int f2fs_set_node_page_dirty(struct page *page) 2131 { 2132 trace_f2fs_set_page_dirty(page, NODE); 2133 2134 if (!PageUptodate(page)) 2135 SetPageUptodate(page); 2136 #ifdef CONFIG_F2FS_CHECK_FS 2137 if (IS_INODE(page)) 2138 f2fs_inode_chksum_set(F2FS_P_SB(page), page); 2139 #endif 2140 if (!PageDirty(page)) { 2141 __set_page_dirty_nobuffers(page); 2142 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 2143 set_page_private_reference(page); 2144 return 1; 2145 } 2146 return 0; 2147 } 2148 2149 /* 2150 * Structure of the f2fs node operations 2151 */ 2152 const struct address_space_operations f2fs_node_aops = { 2153 .writepage = f2fs_write_node_page, 2154 .writepages = f2fs_write_node_pages, 2155 .set_page_dirty = f2fs_set_node_page_dirty, 2156 .invalidatepage = f2fs_invalidate_page, 2157 .releasepage = f2fs_release_page, 2158 #ifdef CONFIG_MIGRATION 2159 .migratepage = f2fs_migrate_page, 2160 #endif 2161 }; 2162 2163 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2164 nid_t n) 2165 { 2166 return radix_tree_lookup(&nm_i->free_nid_root, n); 2167 } 2168 2169 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2170 struct free_nid *i) 2171 { 2172 struct f2fs_nm_info *nm_i = NM_I(sbi); 2173 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2174 2175 if (err) 2176 return err; 2177 2178 nm_i->nid_cnt[FREE_NID]++; 2179 list_add_tail(&i->list, &nm_i->free_nid_list); 2180 return 0; 2181 } 2182 2183 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2184 struct free_nid *i, enum nid_state state) 2185 { 2186 struct f2fs_nm_info *nm_i = NM_I(sbi); 2187 2188 f2fs_bug_on(sbi, state != i->state); 2189 nm_i->nid_cnt[state]--; 2190 if (state == FREE_NID) 2191 list_del(&i->list); 2192 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2193 } 2194 2195 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2196 enum nid_state org_state, enum nid_state dst_state) 2197 { 2198 struct f2fs_nm_info *nm_i = NM_I(sbi); 2199 2200 f2fs_bug_on(sbi, org_state != i->state); 2201 i->state = dst_state; 2202 nm_i->nid_cnt[org_state]--; 2203 nm_i->nid_cnt[dst_state]++; 2204 2205 switch (dst_state) { 2206 case PREALLOC_NID: 2207 list_del(&i->list); 2208 break; 2209 case FREE_NID: 2210 list_add_tail(&i->list, &nm_i->free_nid_list); 2211 break; 2212 default: 2213 BUG_ON(1); 2214 } 2215 } 2216 2217 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) 2218 { 2219 struct f2fs_nm_info *nm_i = NM_I(sbi); 2220 unsigned int i; 2221 bool ret = true; 2222 2223 down_read(&nm_i->nat_tree_lock); 2224 for (i = 0; i < nm_i->nat_blocks; i++) { 2225 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { 2226 ret = false; 2227 break; 2228 } 2229 } 2230 up_read(&nm_i->nat_tree_lock); 2231 2232 return ret; 2233 } 2234 2235 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2236 bool set, bool build) 2237 { 2238 struct f2fs_nm_info *nm_i = NM_I(sbi); 2239 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2240 unsigned int nid_ofs = nid - START_NID(nid); 2241 2242 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2243 return; 2244 2245 if (set) { 2246 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2247 return; 2248 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2249 nm_i->free_nid_count[nat_ofs]++; 2250 } else { 2251 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2252 return; 2253 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2254 if (!build) 2255 nm_i->free_nid_count[nat_ofs]--; 2256 } 2257 } 2258 2259 /* return if the nid is recognized as free */ 2260 static bool add_free_nid(struct f2fs_sb_info *sbi, 2261 nid_t nid, bool build, bool update) 2262 { 2263 struct f2fs_nm_info *nm_i = NM_I(sbi); 2264 struct free_nid *i, *e; 2265 struct nat_entry *ne; 2266 int err = -EINVAL; 2267 bool ret = false; 2268 2269 /* 0 nid should not be used */ 2270 if (unlikely(nid == 0)) 2271 return false; 2272 2273 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2274 return false; 2275 2276 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL); 2277 i->nid = nid; 2278 i->state = FREE_NID; 2279 2280 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2281 2282 spin_lock(&nm_i->nid_list_lock); 2283 2284 if (build) { 2285 /* 2286 * Thread A Thread B 2287 * - f2fs_create 2288 * - f2fs_new_inode 2289 * - f2fs_alloc_nid 2290 * - __insert_nid_to_list(PREALLOC_NID) 2291 * - f2fs_balance_fs_bg 2292 * - f2fs_build_free_nids 2293 * - __f2fs_build_free_nids 2294 * - scan_nat_page 2295 * - add_free_nid 2296 * - __lookup_nat_cache 2297 * - f2fs_add_link 2298 * - f2fs_init_inode_metadata 2299 * - f2fs_new_inode_page 2300 * - f2fs_new_node_page 2301 * - set_node_addr 2302 * - f2fs_alloc_nid_done 2303 * - __remove_nid_from_list(PREALLOC_NID) 2304 * - __insert_nid_to_list(FREE_NID) 2305 */ 2306 ne = __lookup_nat_cache(nm_i, nid); 2307 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2308 nat_get_blkaddr(ne) != NULL_ADDR)) 2309 goto err_out; 2310 2311 e = __lookup_free_nid_list(nm_i, nid); 2312 if (e) { 2313 if (e->state == FREE_NID) 2314 ret = true; 2315 goto err_out; 2316 } 2317 } 2318 ret = true; 2319 err = __insert_free_nid(sbi, i); 2320 err_out: 2321 if (update) { 2322 update_free_nid_bitmap(sbi, nid, ret, build); 2323 if (!build) 2324 nm_i->available_nids++; 2325 } 2326 spin_unlock(&nm_i->nid_list_lock); 2327 radix_tree_preload_end(); 2328 2329 if (err) 2330 kmem_cache_free(free_nid_slab, i); 2331 return ret; 2332 } 2333 2334 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2335 { 2336 struct f2fs_nm_info *nm_i = NM_I(sbi); 2337 struct free_nid *i; 2338 bool need_free = false; 2339 2340 spin_lock(&nm_i->nid_list_lock); 2341 i = __lookup_free_nid_list(nm_i, nid); 2342 if (i && i->state == FREE_NID) { 2343 __remove_free_nid(sbi, i, FREE_NID); 2344 need_free = true; 2345 } 2346 spin_unlock(&nm_i->nid_list_lock); 2347 2348 if (need_free) 2349 kmem_cache_free(free_nid_slab, i); 2350 } 2351 2352 static int scan_nat_page(struct f2fs_sb_info *sbi, 2353 struct page *nat_page, nid_t start_nid) 2354 { 2355 struct f2fs_nm_info *nm_i = NM_I(sbi); 2356 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2357 block_t blk_addr; 2358 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2359 int i; 2360 2361 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2362 2363 i = start_nid % NAT_ENTRY_PER_BLOCK; 2364 2365 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2366 if (unlikely(start_nid >= nm_i->max_nid)) 2367 break; 2368 2369 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2370 2371 if (blk_addr == NEW_ADDR) 2372 return -EINVAL; 2373 2374 if (blk_addr == NULL_ADDR) { 2375 add_free_nid(sbi, start_nid, true, true); 2376 } else { 2377 spin_lock(&NM_I(sbi)->nid_list_lock); 2378 update_free_nid_bitmap(sbi, start_nid, false, true); 2379 spin_unlock(&NM_I(sbi)->nid_list_lock); 2380 } 2381 } 2382 2383 return 0; 2384 } 2385 2386 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2387 { 2388 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2389 struct f2fs_journal *journal = curseg->journal; 2390 int i; 2391 2392 down_read(&curseg->journal_rwsem); 2393 for (i = 0; i < nats_in_cursum(journal); i++) { 2394 block_t addr; 2395 nid_t nid; 2396 2397 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2398 nid = le32_to_cpu(nid_in_journal(journal, i)); 2399 if (addr == NULL_ADDR) 2400 add_free_nid(sbi, nid, true, false); 2401 else 2402 remove_free_nid(sbi, nid); 2403 } 2404 up_read(&curseg->journal_rwsem); 2405 } 2406 2407 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2408 { 2409 struct f2fs_nm_info *nm_i = NM_I(sbi); 2410 unsigned int i, idx; 2411 nid_t nid; 2412 2413 down_read(&nm_i->nat_tree_lock); 2414 2415 for (i = 0; i < nm_i->nat_blocks; i++) { 2416 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2417 continue; 2418 if (!nm_i->free_nid_count[i]) 2419 continue; 2420 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2421 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2422 NAT_ENTRY_PER_BLOCK, idx); 2423 if (idx >= NAT_ENTRY_PER_BLOCK) 2424 break; 2425 2426 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2427 add_free_nid(sbi, nid, true, false); 2428 2429 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2430 goto out; 2431 } 2432 } 2433 out: 2434 scan_curseg_cache(sbi); 2435 2436 up_read(&nm_i->nat_tree_lock); 2437 } 2438 2439 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2440 bool sync, bool mount) 2441 { 2442 struct f2fs_nm_info *nm_i = NM_I(sbi); 2443 int i = 0, ret; 2444 nid_t nid = nm_i->next_scan_nid; 2445 2446 if (unlikely(nid >= nm_i->max_nid)) 2447 nid = 0; 2448 2449 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) 2450 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; 2451 2452 /* Enough entries */ 2453 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2454 return 0; 2455 2456 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2457 return 0; 2458 2459 if (!mount) { 2460 /* try to find free nids in free_nid_bitmap */ 2461 scan_free_nid_bits(sbi); 2462 2463 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2464 return 0; 2465 } 2466 2467 /* readahead nat pages to be scanned */ 2468 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2469 META_NAT, true); 2470 2471 down_read(&nm_i->nat_tree_lock); 2472 2473 while (1) { 2474 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2475 nm_i->nat_block_bitmap)) { 2476 struct page *page = get_current_nat_page(sbi, nid); 2477 2478 if (IS_ERR(page)) { 2479 ret = PTR_ERR(page); 2480 } else { 2481 ret = scan_nat_page(sbi, page, nid); 2482 f2fs_put_page(page, 1); 2483 } 2484 2485 if (ret) { 2486 up_read(&nm_i->nat_tree_lock); 2487 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2488 return ret; 2489 } 2490 } 2491 2492 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2493 if (unlikely(nid >= nm_i->max_nid)) 2494 nid = 0; 2495 2496 if (++i >= FREE_NID_PAGES) 2497 break; 2498 } 2499 2500 /* go to the next free nat pages to find free nids abundantly */ 2501 nm_i->next_scan_nid = nid; 2502 2503 /* find free nids from current sum_pages */ 2504 scan_curseg_cache(sbi); 2505 2506 up_read(&nm_i->nat_tree_lock); 2507 2508 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2509 nm_i->ra_nid_pages, META_NAT, false); 2510 2511 return 0; 2512 } 2513 2514 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2515 { 2516 int ret; 2517 2518 mutex_lock(&NM_I(sbi)->build_lock); 2519 ret = __f2fs_build_free_nids(sbi, sync, mount); 2520 mutex_unlock(&NM_I(sbi)->build_lock); 2521 2522 return ret; 2523 } 2524 2525 /* 2526 * If this function returns success, caller can obtain a new nid 2527 * from second parameter of this function. 2528 * The returned nid could be used ino as well as nid when inode is created. 2529 */ 2530 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2531 { 2532 struct f2fs_nm_info *nm_i = NM_I(sbi); 2533 struct free_nid *i = NULL; 2534 retry: 2535 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { 2536 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID); 2537 return false; 2538 } 2539 2540 spin_lock(&nm_i->nid_list_lock); 2541 2542 if (unlikely(nm_i->available_nids == 0)) { 2543 spin_unlock(&nm_i->nid_list_lock); 2544 return false; 2545 } 2546 2547 /* We should not use stale free nids created by f2fs_build_free_nids */ 2548 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2549 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2550 i = list_first_entry(&nm_i->free_nid_list, 2551 struct free_nid, list); 2552 *nid = i->nid; 2553 2554 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2555 nm_i->available_nids--; 2556 2557 update_free_nid_bitmap(sbi, *nid, false, false); 2558 2559 spin_unlock(&nm_i->nid_list_lock); 2560 return true; 2561 } 2562 spin_unlock(&nm_i->nid_list_lock); 2563 2564 /* Let's scan nat pages and its caches to get free nids */ 2565 if (!f2fs_build_free_nids(sbi, true, false)) 2566 goto retry; 2567 return false; 2568 } 2569 2570 /* 2571 * f2fs_alloc_nid() should be called prior to this function. 2572 */ 2573 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2574 { 2575 struct f2fs_nm_info *nm_i = NM_I(sbi); 2576 struct free_nid *i; 2577 2578 spin_lock(&nm_i->nid_list_lock); 2579 i = __lookup_free_nid_list(nm_i, nid); 2580 f2fs_bug_on(sbi, !i); 2581 __remove_free_nid(sbi, i, PREALLOC_NID); 2582 spin_unlock(&nm_i->nid_list_lock); 2583 2584 kmem_cache_free(free_nid_slab, i); 2585 } 2586 2587 /* 2588 * f2fs_alloc_nid() should be called prior to this function. 2589 */ 2590 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2591 { 2592 struct f2fs_nm_info *nm_i = NM_I(sbi); 2593 struct free_nid *i; 2594 bool need_free = false; 2595 2596 if (!nid) 2597 return; 2598 2599 spin_lock(&nm_i->nid_list_lock); 2600 i = __lookup_free_nid_list(nm_i, nid); 2601 f2fs_bug_on(sbi, !i); 2602 2603 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2604 __remove_free_nid(sbi, i, PREALLOC_NID); 2605 need_free = true; 2606 } else { 2607 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2608 } 2609 2610 nm_i->available_nids++; 2611 2612 update_free_nid_bitmap(sbi, nid, true, false); 2613 2614 spin_unlock(&nm_i->nid_list_lock); 2615 2616 if (need_free) 2617 kmem_cache_free(free_nid_slab, i); 2618 } 2619 2620 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2621 { 2622 struct f2fs_nm_info *nm_i = NM_I(sbi); 2623 int nr = nr_shrink; 2624 2625 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2626 return 0; 2627 2628 if (!mutex_trylock(&nm_i->build_lock)) 2629 return 0; 2630 2631 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2632 struct free_nid *i, *next; 2633 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2634 2635 spin_lock(&nm_i->nid_list_lock); 2636 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2637 if (!nr_shrink || !batch || 2638 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2639 break; 2640 __remove_free_nid(sbi, i, FREE_NID); 2641 kmem_cache_free(free_nid_slab, i); 2642 nr_shrink--; 2643 batch--; 2644 } 2645 spin_unlock(&nm_i->nid_list_lock); 2646 } 2647 2648 mutex_unlock(&nm_i->build_lock); 2649 2650 return nr - nr_shrink; 2651 } 2652 2653 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2654 { 2655 void *src_addr, *dst_addr; 2656 size_t inline_size; 2657 struct page *ipage; 2658 struct f2fs_inode *ri; 2659 2660 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2661 if (IS_ERR(ipage)) 2662 return PTR_ERR(ipage); 2663 2664 ri = F2FS_INODE(page); 2665 if (ri->i_inline & F2FS_INLINE_XATTR) { 2666 if (!f2fs_has_inline_xattr(inode)) { 2667 set_inode_flag(inode, FI_INLINE_XATTR); 2668 stat_inc_inline_xattr(inode); 2669 } 2670 } else { 2671 if (f2fs_has_inline_xattr(inode)) { 2672 stat_dec_inline_xattr(inode); 2673 clear_inode_flag(inode, FI_INLINE_XATTR); 2674 } 2675 goto update_inode; 2676 } 2677 2678 dst_addr = inline_xattr_addr(inode, ipage); 2679 src_addr = inline_xattr_addr(inode, page); 2680 inline_size = inline_xattr_size(inode); 2681 2682 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2683 memcpy(dst_addr, src_addr, inline_size); 2684 update_inode: 2685 f2fs_update_inode(inode, ipage); 2686 f2fs_put_page(ipage, 1); 2687 return 0; 2688 } 2689 2690 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2691 { 2692 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2693 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2694 nid_t new_xnid; 2695 struct dnode_of_data dn; 2696 struct node_info ni; 2697 struct page *xpage; 2698 int err; 2699 2700 if (!prev_xnid) 2701 goto recover_xnid; 2702 2703 /* 1: invalidate the previous xattr nid */ 2704 err = f2fs_get_node_info(sbi, prev_xnid, &ni); 2705 if (err) 2706 return err; 2707 2708 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2709 dec_valid_node_count(sbi, inode, false); 2710 set_node_addr(sbi, &ni, NULL_ADDR, false); 2711 2712 recover_xnid: 2713 /* 2: update xattr nid in inode */ 2714 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2715 return -ENOSPC; 2716 2717 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2718 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2719 if (IS_ERR(xpage)) { 2720 f2fs_alloc_nid_failed(sbi, new_xnid); 2721 return PTR_ERR(xpage); 2722 } 2723 2724 f2fs_alloc_nid_done(sbi, new_xnid); 2725 f2fs_update_inode_page(inode); 2726 2727 /* 3: update and set xattr node page dirty */ 2728 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE); 2729 2730 set_page_dirty(xpage); 2731 f2fs_put_page(xpage, 1); 2732 2733 return 0; 2734 } 2735 2736 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2737 { 2738 struct f2fs_inode *src, *dst; 2739 nid_t ino = ino_of_node(page); 2740 struct node_info old_ni, new_ni; 2741 struct page *ipage; 2742 int err; 2743 2744 err = f2fs_get_node_info(sbi, ino, &old_ni); 2745 if (err) 2746 return err; 2747 2748 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2749 return -EINVAL; 2750 retry: 2751 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2752 if (!ipage) { 2753 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); 2754 goto retry; 2755 } 2756 2757 /* Should not use this inode from free nid list */ 2758 remove_free_nid(sbi, ino); 2759 2760 if (!PageUptodate(ipage)) 2761 SetPageUptodate(ipage); 2762 fill_node_footer(ipage, ino, ino, 0, true); 2763 set_cold_node(ipage, false); 2764 2765 src = F2FS_INODE(page); 2766 dst = F2FS_INODE(ipage); 2767 2768 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); 2769 dst->i_size = 0; 2770 dst->i_blocks = cpu_to_le64(1); 2771 dst->i_links = cpu_to_le32(1); 2772 dst->i_xattr_nid = 0; 2773 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2774 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2775 dst->i_extra_isize = src->i_extra_isize; 2776 2777 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2778 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2779 i_inline_xattr_size)) 2780 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2781 2782 if (f2fs_sb_has_project_quota(sbi) && 2783 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2784 i_projid)) 2785 dst->i_projid = src->i_projid; 2786 2787 if (f2fs_sb_has_inode_crtime(sbi) && 2788 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2789 i_crtime_nsec)) { 2790 dst->i_crtime = src->i_crtime; 2791 dst->i_crtime_nsec = src->i_crtime_nsec; 2792 } 2793 } 2794 2795 new_ni = old_ni; 2796 new_ni.ino = ino; 2797 2798 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2799 WARN_ON(1); 2800 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2801 inc_valid_inode_count(sbi); 2802 set_page_dirty(ipage); 2803 f2fs_put_page(ipage, 1); 2804 return 0; 2805 } 2806 2807 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2808 unsigned int segno, struct f2fs_summary_block *sum) 2809 { 2810 struct f2fs_node *rn; 2811 struct f2fs_summary *sum_entry; 2812 block_t addr; 2813 int i, idx, last_offset, nrpages; 2814 2815 /* scan the node segment */ 2816 last_offset = sbi->blocks_per_seg; 2817 addr = START_BLOCK(sbi, segno); 2818 sum_entry = &sum->entries[0]; 2819 2820 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2821 nrpages = bio_max_segs(last_offset - i); 2822 2823 /* readahead node pages */ 2824 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2825 2826 for (idx = addr; idx < addr + nrpages; idx++) { 2827 struct page *page = f2fs_get_tmp_page(sbi, idx); 2828 2829 if (IS_ERR(page)) 2830 return PTR_ERR(page); 2831 2832 rn = F2FS_NODE(page); 2833 sum_entry->nid = rn->footer.nid; 2834 sum_entry->version = 0; 2835 sum_entry->ofs_in_node = 0; 2836 sum_entry++; 2837 f2fs_put_page(page, 1); 2838 } 2839 2840 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2841 addr + nrpages); 2842 } 2843 return 0; 2844 } 2845 2846 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2847 { 2848 struct f2fs_nm_info *nm_i = NM_I(sbi); 2849 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2850 struct f2fs_journal *journal = curseg->journal; 2851 int i; 2852 2853 down_write(&curseg->journal_rwsem); 2854 for (i = 0; i < nats_in_cursum(journal); i++) { 2855 struct nat_entry *ne; 2856 struct f2fs_nat_entry raw_ne; 2857 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2858 2859 if (f2fs_check_nid_range(sbi, nid)) 2860 continue; 2861 2862 raw_ne = nat_in_journal(journal, i); 2863 2864 ne = __lookup_nat_cache(nm_i, nid); 2865 if (!ne) { 2866 ne = __alloc_nat_entry(sbi, nid, true); 2867 __init_nat_entry(nm_i, ne, &raw_ne, true); 2868 } 2869 2870 /* 2871 * if a free nat in journal has not been used after last 2872 * checkpoint, we should remove it from available nids, 2873 * since later we will add it again. 2874 */ 2875 if (!get_nat_flag(ne, IS_DIRTY) && 2876 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2877 spin_lock(&nm_i->nid_list_lock); 2878 nm_i->available_nids--; 2879 spin_unlock(&nm_i->nid_list_lock); 2880 } 2881 2882 __set_nat_cache_dirty(nm_i, ne); 2883 } 2884 update_nats_in_cursum(journal, -i); 2885 up_write(&curseg->journal_rwsem); 2886 } 2887 2888 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2889 struct list_head *head, int max) 2890 { 2891 struct nat_entry_set *cur; 2892 2893 if (nes->entry_cnt >= max) 2894 goto add_out; 2895 2896 list_for_each_entry(cur, head, set_list) { 2897 if (cur->entry_cnt >= nes->entry_cnt) { 2898 list_add(&nes->set_list, cur->set_list.prev); 2899 return; 2900 } 2901 } 2902 add_out: 2903 list_add_tail(&nes->set_list, head); 2904 } 2905 2906 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, 2907 unsigned int valid) 2908 { 2909 if (valid == 0) { 2910 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); 2911 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2912 return; 2913 } 2914 2915 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); 2916 if (valid == NAT_ENTRY_PER_BLOCK) 2917 __set_bit_le(nat_ofs, nm_i->full_nat_bits); 2918 else 2919 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2920 } 2921 2922 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2923 struct page *page) 2924 { 2925 struct f2fs_nm_info *nm_i = NM_I(sbi); 2926 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2927 struct f2fs_nat_block *nat_blk = page_address(page); 2928 int valid = 0; 2929 int i = 0; 2930 2931 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 2932 return; 2933 2934 if (nat_index == 0) { 2935 valid = 1; 2936 i = 1; 2937 } 2938 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2939 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2940 valid++; 2941 } 2942 2943 __update_nat_bits(nm_i, nat_index, valid); 2944 } 2945 2946 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) 2947 { 2948 struct f2fs_nm_info *nm_i = NM_I(sbi); 2949 unsigned int nat_ofs; 2950 2951 down_read(&nm_i->nat_tree_lock); 2952 2953 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { 2954 unsigned int valid = 0, nid_ofs = 0; 2955 2956 /* handle nid zero due to it should never be used */ 2957 if (unlikely(nat_ofs == 0)) { 2958 valid = 1; 2959 nid_ofs = 1; 2960 } 2961 2962 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { 2963 if (!test_bit_le(nid_ofs, 2964 nm_i->free_nid_bitmap[nat_ofs])) 2965 valid++; 2966 } 2967 2968 __update_nat_bits(nm_i, nat_ofs, valid); 2969 } 2970 2971 up_read(&nm_i->nat_tree_lock); 2972 } 2973 2974 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2975 struct nat_entry_set *set, struct cp_control *cpc) 2976 { 2977 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2978 struct f2fs_journal *journal = curseg->journal; 2979 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2980 bool to_journal = true; 2981 struct f2fs_nat_block *nat_blk; 2982 struct nat_entry *ne, *cur; 2983 struct page *page = NULL; 2984 2985 /* 2986 * there are two steps to flush nat entries: 2987 * #1, flush nat entries to journal in current hot data summary block. 2988 * #2, flush nat entries to nat page. 2989 */ 2990 if ((cpc->reason & CP_UMOUNT) || 2991 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2992 to_journal = false; 2993 2994 if (to_journal) { 2995 down_write(&curseg->journal_rwsem); 2996 } else { 2997 page = get_next_nat_page(sbi, start_nid); 2998 if (IS_ERR(page)) 2999 return PTR_ERR(page); 3000 3001 nat_blk = page_address(page); 3002 f2fs_bug_on(sbi, !nat_blk); 3003 } 3004 3005 /* flush dirty nats in nat entry set */ 3006 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 3007 struct f2fs_nat_entry *raw_ne; 3008 nid_t nid = nat_get_nid(ne); 3009 int offset; 3010 3011 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 3012 3013 if (to_journal) { 3014 offset = f2fs_lookup_journal_in_cursum(journal, 3015 NAT_JOURNAL, nid, 1); 3016 f2fs_bug_on(sbi, offset < 0); 3017 raw_ne = &nat_in_journal(journal, offset); 3018 nid_in_journal(journal, offset) = cpu_to_le32(nid); 3019 } else { 3020 raw_ne = &nat_blk->entries[nid - start_nid]; 3021 } 3022 raw_nat_from_node_info(raw_ne, &ne->ni); 3023 nat_reset_flag(ne); 3024 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 3025 if (nat_get_blkaddr(ne) == NULL_ADDR) { 3026 add_free_nid(sbi, nid, false, true); 3027 } else { 3028 spin_lock(&NM_I(sbi)->nid_list_lock); 3029 update_free_nid_bitmap(sbi, nid, false, false); 3030 spin_unlock(&NM_I(sbi)->nid_list_lock); 3031 } 3032 } 3033 3034 if (to_journal) { 3035 up_write(&curseg->journal_rwsem); 3036 } else { 3037 update_nat_bits(sbi, start_nid, page); 3038 f2fs_put_page(page, 1); 3039 } 3040 3041 /* Allow dirty nats by node block allocation in write_begin */ 3042 if (!set->entry_cnt) { 3043 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 3044 kmem_cache_free(nat_entry_set_slab, set); 3045 } 3046 return 0; 3047 } 3048 3049 /* 3050 * This function is called during the checkpointing process. 3051 */ 3052 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 3053 { 3054 struct f2fs_nm_info *nm_i = NM_I(sbi); 3055 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3056 struct f2fs_journal *journal = curseg->journal; 3057 struct nat_entry_set *setvec[SETVEC_SIZE]; 3058 struct nat_entry_set *set, *tmp; 3059 unsigned int found; 3060 nid_t set_idx = 0; 3061 LIST_HEAD(sets); 3062 int err = 0; 3063 3064 /* 3065 * during unmount, let's flush nat_bits before checking 3066 * nat_cnt[DIRTY_NAT]. 3067 */ 3068 if (cpc->reason & CP_UMOUNT) { 3069 down_write(&nm_i->nat_tree_lock); 3070 remove_nats_in_journal(sbi); 3071 up_write(&nm_i->nat_tree_lock); 3072 } 3073 3074 if (!nm_i->nat_cnt[DIRTY_NAT]) 3075 return 0; 3076 3077 down_write(&nm_i->nat_tree_lock); 3078 3079 /* 3080 * if there are no enough space in journal to store dirty nat 3081 * entries, remove all entries from journal and merge them 3082 * into nat entry set. 3083 */ 3084 if (cpc->reason & CP_UMOUNT || 3085 !__has_cursum_space(journal, 3086 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) 3087 remove_nats_in_journal(sbi); 3088 3089 while ((found = __gang_lookup_nat_set(nm_i, 3090 set_idx, SETVEC_SIZE, setvec))) { 3091 unsigned idx; 3092 3093 set_idx = setvec[found - 1]->set + 1; 3094 for (idx = 0; idx < found; idx++) 3095 __adjust_nat_entry_set(setvec[idx], &sets, 3096 MAX_NAT_JENTRIES(journal)); 3097 } 3098 3099 /* flush dirty nats in nat entry set */ 3100 list_for_each_entry_safe(set, tmp, &sets, set_list) { 3101 err = __flush_nat_entry_set(sbi, set, cpc); 3102 if (err) 3103 break; 3104 } 3105 3106 up_write(&nm_i->nat_tree_lock); 3107 /* Allow dirty nats by node block allocation in write_begin */ 3108 3109 return err; 3110 } 3111 3112 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 3113 { 3114 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3115 struct f2fs_nm_info *nm_i = NM_I(sbi); 3116 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 3117 unsigned int i; 3118 __u64 cp_ver = cur_cp_version(ckpt); 3119 block_t nat_bits_addr; 3120 3121 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 3122 nm_i->nat_bits = f2fs_kvzalloc(sbi, 3123 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 3124 if (!nm_i->nat_bits) 3125 return -ENOMEM; 3126 3127 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3128 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3129 3130 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3131 return 0; 3132 3133 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - 3134 nm_i->nat_bits_blocks; 3135 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3136 struct page *page; 3137 3138 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3139 if (IS_ERR(page)) 3140 return PTR_ERR(page); 3141 3142 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 3143 page_address(page), F2FS_BLKSIZE); 3144 f2fs_put_page(page, 1); 3145 } 3146 3147 cp_ver |= (cur_cp_crc(ckpt) << 32); 3148 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3149 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 3150 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", 3151 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); 3152 return 0; 3153 } 3154 3155 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3156 return 0; 3157 } 3158 3159 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3160 { 3161 struct f2fs_nm_info *nm_i = NM_I(sbi); 3162 unsigned int i = 0; 3163 nid_t nid, last_nid; 3164 3165 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3166 return; 3167 3168 for (i = 0; i < nm_i->nat_blocks; i++) { 3169 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3170 if (i >= nm_i->nat_blocks) 3171 break; 3172 3173 __set_bit_le(i, nm_i->nat_block_bitmap); 3174 3175 nid = i * NAT_ENTRY_PER_BLOCK; 3176 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3177 3178 spin_lock(&NM_I(sbi)->nid_list_lock); 3179 for (; nid < last_nid; nid++) 3180 update_free_nid_bitmap(sbi, nid, true, true); 3181 spin_unlock(&NM_I(sbi)->nid_list_lock); 3182 } 3183 3184 for (i = 0; i < nm_i->nat_blocks; i++) { 3185 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3186 if (i >= nm_i->nat_blocks) 3187 break; 3188 3189 __set_bit_le(i, nm_i->nat_block_bitmap); 3190 } 3191 } 3192 3193 static int init_node_manager(struct f2fs_sb_info *sbi) 3194 { 3195 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3196 struct f2fs_nm_info *nm_i = NM_I(sbi); 3197 unsigned char *version_bitmap; 3198 unsigned int nat_segs; 3199 int err; 3200 3201 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3202 3203 /* segment_count_nat includes pair segment so divide to 2. */ 3204 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3205 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3206 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3207 3208 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3209 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3210 F2FS_RESERVED_NODE_NUM; 3211 nm_i->nid_cnt[FREE_NID] = 0; 3212 nm_i->nid_cnt[PREALLOC_NID] = 0; 3213 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3214 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3215 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3216 3217 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3218 INIT_LIST_HEAD(&nm_i->free_nid_list); 3219 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3220 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3221 INIT_LIST_HEAD(&nm_i->nat_entries); 3222 spin_lock_init(&nm_i->nat_list_lock); 3223 3224 mutex_init(&nm_i->build_lock); 3225 spin_lock_init(&nm_i->nid_list_lock); 3226 init_rwsem(&nm_i->nat_tree_lock); 3227 3228 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3229 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3230 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3231 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3232 GFP_KERNEL); 3233 if (!nm_i->nat_bitmap) 3234 return -ENOMEM; 3235 3236 err = __get_nat_bitmaps(sbi); 3237 if (err) 3238 return err; 3239 3240 #ifdef CONFIG_F2FS_CHECK_FS 3241 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3242 GFP_KERNEL); 3243 if (!nm_i->nat_bitmap_mir) 3244 return -ENOMEM; 3245 #endif 3246 3247 return 0; 3248 } 3249 3250 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3251 { 3252 struct f2fs_nm_info *nm_i = NM_I(sbi); 3253 int i; 3254 3255 nm_i->free_nid_bitmap = 3256 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3257 nm_i->nat_blocks), 3258 GFP_KERNEL); 3259 if (!nm_i->free_nid_bitmap) 3260 return -ENOMEM; 3261 3262 for (i = 0; i < nm_i->nat_blocks; i++) { 3263 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3264 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3265 if (!nm_i->free_nid_bitmap[i]) 3266 return -ENOMEM; 3267 } 3268 3269 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3270 GFP_KERNEL); 3271 if (!nm_i->nat_block_bitmap) 3272 return -ENOMEM; 3273 3274 nm_i->free_nid_count = 3275 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3276 nm_i->nat_blocks), 3277 GFP_KERNEL); 3278 if (!nm_i->free_nid_count) 3279 return -ENOMEM; 3280 return 0; 3281 } 3282 3283 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3284 { 3285 int err; 3286 3287 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3288 GFP_KERNEL); 3289 if (!sbi->nm_info) 3290 return -ENOMEM; 3291 3292 err = init_node_manager(sbi); 3293 if (err) 3294 return err; 3295 3296 err = init_free_nid_cache(sbi); 3297 if (err) 3298 return err; 3299 3300 /* load free nid status from nat_bits table */ 3301 load_free_nid_bitmap(sbi); 3302 3303 return f2fs_build_free_nids(sbi, true, true); 3304 } 3305 3306 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3307 { 3308 struct f2fs_nm_info *nm_i = NM_I(sbi); 3309 struct free_nid *i, *next_i; 3310 struct nat_entry *natvec[NATVEC_SIZE]; 3311 struct nat_entry_set *setvec[SETVEC_SIZE]; 3312 nid_t nid = 0; 3313 unsigned int found; 3314 3315 if (!nm_i) 3316 return; 3317 3318 /* destroy free nid list */ 3319 spin_lock(&nm_i->nid_list_lock); 3320 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3321 __remove_free_nid(sbi, i, FREE_NID); 3322 spin_unlock(&nm_i->nid_list_lock); 3323 kmem_cache_free(free_nid_slab, i); 3324 spin_lock(&nm_i->nid_list_lock); 3325 } 3326 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3327 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3328 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3329 spin_unlock(&nm_i->nid_list_lock); 3330 3331 /* destroy nat cache */ 3332 down_write(&nm_i->nat_tree_lock); 3333 while ((found = __gang_lookup_nat_cache(nm_i, 3334 nid, NATVEC_SIZE, natvec))) { 3335 unsigned idx; 3336 3337 nid = nat_get_nid(natvec[found - 1]) + 1; 3338 for (idx = 0; idx < found; idx++) { 3339 spin_lock(&nm_i->nat_list_lock); 3340 list_del(&natvec[idx]->list); 3341 spin_unlock(&nm_i->nat_list_lock); 3342 3343 __del_from_nat_cache(nm_i, natvec[idx]); 3344 } 3345 } 3346 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); 3347 3348 /* destroy nat set cache */ 3349 nid = 0; 3350 while ((found = __gang_lookup_nat_set(nm_i, 3351 nid, SETVEC_SIZE, setvec))) { 3352 unsigned idx; 3353 3354 nid = setvec[found - 1]->set + 1; 3355 for (idx = 0; idx < found; idx++) { 3356 /* entry_cnt is not zero, when cp_error was occurred */ 3357 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3358 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3359 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3360 } 3361 } 3362 up_write(&nm_i->nat_tree_lock); 3363 3364 kvfree(nm_i->nat_block_bitmap); 3365 if (nm_i->free_nid_bitmap) { 3366 int i; 3367 3368 for (i = 0; i < nm_i->nat_blocks; i++) 3369 kvfree(nm_i->free_nid_bitmap[i]); 3370 kvfree(nm_i->free_nid_bitmap); 3371 } 3372 kvfree(nm_i->free_nid_count); 3373 3374 kvfree(nm_i->nat_bitmap); 3375 kvfree(nm_i->nat_bits); 3376 #ifdef CONFIG_F2FS_CHECK_FS 3377 kvfree(nm_i->nat_bitmap_mir); 3378 #endif 3379 sbi->nm_info = NULL; 3380 kfree(nm_i); 3381 } 3382 3383 int __init f2fs_create_node_manager_caches(void) 3384 { 3385 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3386 sizeof(struct nat_entry)); 3387 if (!nat_entry_slab) 3388 goto fail; 3389 3390 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3391 sizeof(struct free_nid)); 3392 if (!free_nid_slab) 3393 goto destroy_nat_entry; 3394 3395 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3396 sizeof(struct nat_entry_set)); 3397 if (!nat_entry_set_slab) 3398 goto destroy_free_nid; 3399 3400 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3401 sizeof(struct fsync_node_entry)); 3402 if (!fsync_node_entry_slab) 3403 goto destroy_nat_entry_set; 3404 return 0; 3405 3406 destroy_nat_entry_set: 3407 kmem_cache_destroy(nat_entry_set_slab); 3408 destroy_free_nid: 3409 kmem_cache_destroy(free_nid_slab); 3410 destroy_nat_entry: 3411 kmem_cache_destroy(nat_entry_slab); 3412 fail: 3413 return -ENOMEM; 3414 } 3415 3416 void f2fs_destroy_node_manager_caches(void) 3417 { 3418 kmem_cache_destroy(fsync_node_entry_slab); 3419 kmem_cache_destroy(nat_entry_set_slab); 3420 kmem_cache_destroy(free_nid_slab); 3421 kmem_cache_destroy(nat_entry_slab); 3422 } 3423