1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/backing-dev.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "trace.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 return -EFSCORRUPTED; 40 } 41 return 0; 42 } 43 44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 45 { 46 struct f2fs_nm_info *nm_i = NM_I(sbi); 47 struct sysinfo val; 48 unsigned long avail_ram; 49 unsigned long mem_size = 0; 50 bool res = false; 51 52 si_meminfo(&val); 53 54 /* only uses low memory */ 55 avail_ram = val.totalram - val.totalhigh; 56 57 /* 58 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 59 */ 60 if (type == FREE_NIDS) { 61 mem_size = (nm_i->nid_cnt[FREE_NID] * 62 sizeof(struct free_nid)) >> PAGE_SHIFT; 63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 64 } else if (type == NAT_ENTRIES) { 65 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 66 PAGE_SHIFT; 67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 68 if (excess_cached_nats(sbi)) 69 res = false; 70 } else if (type == DIRTY_DENTS) { 71 if (sbi->sb->s_bdi->wb.dirty_exceeded) 72 return false; 73 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 74 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 75 } else if (type == INO_ENTRIES) { 76 int i; 77 78 for (i = 0; i < MAX_INO_ENTRY; i++) 79 mem_size += sbi->im[i].ino_num * 80 sizeof(struct ino_entry); 81 mem_size >>= PAGE_SHIFT; 82 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 83 } else if (type == EXTENT_CACHE) { 84 mem_size = (atomic_read(&sbi->total_ext_tree) * 85 sizeof(struct extent_tree) + 86 atomic_read(&sbi->total_ext_node) * 87 sizeof(struct extent_node)) >> PAGE_SHIFT; 88 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 89 } else if (type == INMEM_PAGES) { 90 /* it allows 20% / total_ram for inmemory pages */ 91 mem_size = get_pages(sbi, F2FS_INMEM_PAGES); 92 res = mem_size < (val.totalram / 5); 93 } else { 94 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 95 return true; 96 } 97 return res; 98 } 99 100 static void clear_node_page_dirty(struct page *page) 101 { 102 if (PageDirty(page)) { 103 f2fs_clear_page_cache_dirty_tag(page); 104 clear_page_dirty_for_io(page); 105 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 106 } 107 ClearPageUptodate(page); 108 } 109 110 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 111 { 112 return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid)); 113 } 114 115 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 116 { 117 struct page *src_page; 118 struct page *dst_page; 119 pgoff_t dst_off; 120 void *src_addr; 121 void *dst_addr; 122 struct f2fs_nm_info *nm_i = NM_I(sbi); 123 124 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 125 126 /* get current nat block page with lock */ 127 src_page = get_current_nat_page(sbi, nid); 128 if (IS_ERR(src_page)) 129 return src_page; 130 dst_page = f2fs_grab_meta_page(sbi, dst_off); 131 f2fs_bug_on(sbi, PageDirty(src_page)); 132 133 src_addr = page_address(src_page); 134 dst_addr = page_address(dst_page); 135 memcpy(dst_addr, src_addr, PAGE_SIZE); 136 set_page_dirty(dst_page); 137 f2fs_put_page(src_page, 1); 138 139 set_to_next_nat(nm_i, nid); 140 141 return dst_page; 142 } 143 144 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) 145 { 146 struct nat_entry *new; 147 148 if (no_fail) 149 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 150 else 151 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO); 152 if (new) { 153 nat_set_nid(new, nid); 154 nat_reset_flag(new); 155 } 156 return new; 157 } 158 159 static void __free_nat_entry(struct nat_entry *e) 160 { 161 kmem_cache_free(nat_entry_slab, e); 162 } 163 164 /* must be locked by nat_tree_lock */ 165 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 166 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 167 { 168 if (no_fail) 169 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 170 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 171 return NULL; 172 173 if (raw_ne) 174 node_info_from_raw_nat(&ne->ni, raw_ne); 175 176 spin_lock(&nm_i->nat_list_lock); 177 list_add_tail(&ne->list, &nm_i->nat_entries); 178 spin_unlock(&nm_i->nat_list_lock); 179 180 nm_i->nat_cnt++; 181 return ne; 182 } 183 184 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 185 { 186 struct nat_entry *ne; 187 188 ne = radix_tree_lookup(&nm_i->nat_root, n); 189 190 /* for recent accessed nat entry, move it to tail of lru list */ 191 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 192 spin_lock(&nm_i->nat_list_lock); 193 if (!list_empty(&ne->list)) 194 list_move_tail(&ne->list, &nm_i->nat_entries); 195 spin_unlock(&nm_i->nat_list_lock); 196 } 197 198 return ne; 199 } 200 201 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 202 nid_t start, unsigned int nr, struct nat_entry **ep) 203 { 204 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 205 } 206 207 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 208 { 209 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 210 nm_i->nat_cnt--; 211 __free_nat_entry(e); 212 } 213 214 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 215 struct nat_entry *ne) 216 { 217 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 218 struct nat_entry_set *head; 219 220 head = radix_tree_lookup(&nm_i->nat_set_root, set); 221 if (!head) { 222 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 223 224 INIT_LIST_HEAD(&head->entry_list); 225 INIT_LIST_HEAD(&head->set_list); 226 head->set = set; 227 head->entry_cnt = 0; 228 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 229 } 230 return head; 231 } 232 233 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 234 struct nat_entry *ne) 235 { 236 struct nat_entry_set *head; 237 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 238 239 if (!new_ne) 240 head = __grab_nat_entry_set(nm_i, ne); 241 242 /* 243 * update entry_cnt in below condition: 244 * 1. update NEW_ADDR to valid block address; 245 * 2. update old block address to new one; 246 */ 247 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 248 !get_nat_flag(ne, IS_DIRTY))) 249 head->entry_cnt++; 250 251 set_nat_flag(ne, IS_PREALLOC, new_ne); 252 253 if (get_nat_flag(ne, IS_DIRTY)) 254 goto refresh_list; 255 256 nm_i->dirty_nat_cnt++; 257 set_nat_flag(ne, IS_DIRTY, true); 258 refresh_list: 259 spin_lock(&nm_i->nat_list_lock); 260 if (new_ne) 261 list_del_init(&ne->list); 262 else 263 list_move_tail(&ne->list, &head->entry_list); 264 spin_unlock(&nm_i->nat_list_lock); 265 } 266 267 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 268 struct nat_entry_set *set, struct nat_entry *ne) 269 { 270 spin_lock(&nm_i->nat_list_lock); 271 list_move_tail(&ne->list, &nm_i->nat_entries); 272 spin_unlock(&nm_i->nat_list_lock); 273 274 set_nat_flag(ne, IS_DIRTY, false); 275 set->entry_cnt--; 276 nm_i->dirty_nat_cnt--; 277 } 278 279 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 280 nid_t start, unsigned int nr, struct nat_entry_set **ep) 281 { 282 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 283 start, nr); 284 } 285 286 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 287 { 288 return NODE_MAPPING(sbi) == page->mapping && 289 IS_DNODE(page) && is_cold_node(page); 290 } 291 292 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 293 { 294 spin_lock_init(&sbi->fsync_node_lock); 295 INIT_LIST_HEAD(&sbi->fsync_node_list); 296 sbi->fsync_seg_id = 0; 297 sbi->fsync_node_num = 0; 298 } 299 300 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 301 struct page *page) 302 { 303 struct fsync_node_entry *fn; 304 unsigned long flags; 305 unsigned int seq_id; 306 307 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS); 308 309 get_page(page); 310 fn->page = page; 311 INIT_LIST_HEAD(&fn->list); 312 313 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 314 list_add_tail(&fn->list, &sbi->fsync_node_list); 315 fn->seq_id = sbi->fsync_seg_id++; 316 seq_id = fn->seq_id; 317 sbi->fsync_node_num++; 318 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 319 320 return seq_id; 321 } 322 323 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 324 { 325 struct fsync_node_entry *fn; 326 unsigned long flags; 327 328 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 329 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 330 if (fn->page == page) { 331 list_del(&fn->list); 332 sbi->fsync_node_num--; 333 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 334 kmem_cache_free(fsync_node_entry_slab, fn); 335 put_page(page); 336 return; 337 } 338 } 339 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 340 f2fs_bug_on(sbi, 1); 341 } 342 343 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 344 { 345 unsigned long flags; 346 347 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 348 sbi->fsync_seg_id = 0; 349 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 350 } 351 352 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 353 { 354 struct f2fs_nm_info *nm_i = NM_I(sbi); 355 struct nat_entry *e; 356 bool need = false; 357 358 down_read(&nm_i->nat_tree_lock); 359 e = __lookup_nat_cache(nm_i, nid); 360 if (e) { 361 if (!get_nat_flag(e, IS_CHECKPOINTED) && 362 !get_nat_flag(e, HAS_FSYNCED_INODE)) 363 need = true; 364 } 365 up_read(&nm_i->nat_tree_lock); 366 return need; 367 } 368 369 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 370 { 371 struct f2fs_nm_info *nm_i = NM_I(sbi); 372 struct nat_entry *e; 373 bool is_cp = true; 374 375 down_read(&nm_i->nat_tree_lock); 376 e = __lookup_nat_cache(nm_i, nid); 377 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 378 is_cp = false; 379 up_read(&nm_i->nat_tree_lock); 380 return is_cp; 381 } 382 383 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 384 { 385 struct f2fs_nm_info *nm_i = NM_I(sbi); 386 struct nat_entry *e; 387 bool need_update = true; 388 389 down_read(&nm_i->nat_tree_lock); 390 e = __lookup_nat_cache(nm_i, ino); 391 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 392 (get_nat_flag(e, IS_CHECKPOINTED) || 393 get_nat_flag(e, HAS_FSYNCED_INODE))) 394 need_update = false; 395 up_read(&nm_i->nat_tree_lock); 396 return need_update; 397 } 398 399 /* must be locked by nat_tree_lock */ 400 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 401 struct f2fs_nat_entry *ne) 402 { 403 struct f2fs_nm_info *nm_i = NM_I(sbi); 404 struct nat_entry *new, *e; 405 406 new = __alloc_nat_entry(nid, false); 407 if (!new) 408 return; 409 410 down_write(&nm_i->nat_tree_lock); 411 e = __lookup_nat_cache(nm_i, nid); 412 if (!e) 413 e = __init_nat_entry(nm_i, new, ne, false); 414 else 415 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 416 nat_get_blkaddr(e) != 417 le32_to_cpu(ne->block_addr) || 418 nat_get_version(e) != ne->version); 419 up_write(&nm_i->nat_tree_lock); 420 if (e != new) 421 __free_nat_entry(new); 422 } 423 424 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 425 block_t new_blkaddr, bool fsync_done) 426 { 427 struct f2fs_nm_info *nm_i = NM_I(sbi); 428 struct nat_entry *e; 429 struct nat_entry *new = __alloc_nat_entry(ni->nid, true); 430 431 down_write(&nm_i->nat_tree_lock); 432 e = __lookup_nat_cache(nm_i, ni->nid); 433 if (!e) { 434 e = __init_nat_entry(nm_i, new, NULL, true); 435 copy_node_info(&e->ni, ni); 436 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 437 } else if (new_blkaddr == NEW_ADDR) { 438 /* 439 * when nid is reallocated, 440 * previous nat entry can be remained in nat cache. 441 * So, reinitialize it with new information. 442 */ 443 copy_node_info(&e->ni, ni); 444 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 445 } 446 /* let's free early to reduce memory consumption */ 447 if (e != new) 448 __free_nat_entry(new); 449 450 /* sanity check */ 451 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 452 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 453 new_blkaddr == NULL_ADDR); 454 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 455 new_blkaddr == NEW_ADDR); 456 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 457 new_blkaddr == NEW_ADDR); 458 459 /* increment version no as node is removed */ 460 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 461 unsigned char version = nat_get_version(e); 462 nat_set_version(e, inc_node_version(version)); 463 } 464 465 /* change address */ 466 nat_set_blkaddr(e, new_blkaddr); 467 if (!__is_valid_data_blkaddr(new_blkaddr)) 468 set_nat_flag(e, IS_CHECKPOINTED, false); 469 __set_nat_cache_dirty(nm_i, e); 470 471 /* update fsync_mark if its inode nat entry is still alive */ 472 if (ni->nid != ni->ino) 473 e = __lookup_nat_cache(nm_i, ni->ino); 474 if (e) { 475 if (fsync_done && ni->nid == ni->ino) 476 set_nat_flag(e, HAS_FSYNCED_INODE, true); 477 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 478 } 479 up_write(&nm_i->nat_tree_lock); 480 } 481 482 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 483 { 484 struct f2fs_nm_info *nm_i = NM_I(sbi); 485 int nr = nr_shrink; 486 487 if (!down_write_trylock(&nm_i->nat_tree_lock)) 488 return 0; 489 490 spin_lock(&nm_i->nat_list_lock); 491 while (nr_shrink) { 492 struct nat_entry *ne; 493 494 if (list_empty(&nm_i->nat_entries)) 495 break; 496 497 ne = list_first_entry(&nm_i->nat_entries, 498 struct nat_entry, list); 499 list_del(&ne->list); 500 spin_unlock(&nm_i->nat_list_lock); 501 502 __del_from_nat_cache(nm_i, ne); 503 nr_shrink--; 504 505 spin_lock(&nm_i->nat_list_lock); 506 } 507 spin_unlock(&nm_i->nat_list_lock); 508 509 up_write(&nm_i->nat_tree_lock); 510 return nr - nr_shrink; 511 } 512 513 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 514 struct node_info *ni) 515 { 516 struct f2fs_nm_info *nm_i = NM_I(sbi); 517 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 518 struct f2fs_journal *journal = curseg->journal; 519 nid_t start_nid = START_NID(nid); 520 struct f2fs_nat_block *nat_blk; 521 struct page *page = NULL; 522 struct f2fs_nat_entry ne; 523 struct nat_entry *e; 524 pgoff_t index; 525 block_t blkaddr; 526 int i; 527 528 ni->nid = nid; 529 530 /* Check nat cache */ 531 down_read(&nm_i->nat_tree_lock); 532 e = __lookup_nat_cache(nm_i, nid); 533 if (e) { 534 ni->ino = nat_get_ino(e); 535 ni->blk_addr = nat_get_blkaddr(e); 536 ni->version = nat_get_version(e); 537 up_read(&nm_i->nat_tree_lock); 538 return 0; 539 } 540 541 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 542 543 /* Check current segment summary */ 544 down_read(&curseg->journal_rwsem); 545 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 546 if (i >= 0) { 547 ne = nat_in_journal(journal, i); 548 node_info_from_raw_nat(ni, &ne); 549 } 550 up_read(&curseg->journal_rwsem); 551 if (i >= 0) { 552 up_read(&nm_i->nat_tree_lock); 553 goto cache; 554 } 555 556 /* Fill node_info from nat page */ 557 index = current_nat_addr(sbi, nid); 558 up_read(&nm_i->nat_tree_lock); 559 560 page = f2fs_get_meta_page(sbi, index); 561 if (IS_ERR(page)) 562 return PTR_ERR(page); 563 564 nat_blk = (struct f2fs_nat_block *)page_address(page); 565 ne = nat_blk->entries[nid - start_nid]; 566 node_info_from_raw_nat(ni, &ne); 567 f2fs_put_page(page, 1); 568 cache: 569 blkaddr = le32_to_cpu(ne.block_addr); 570 if (__is_valid_data_blkaddr(blkaddr) && 571 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 572 return -EFAULT; 573 574 /* cache nat entry */ 575 cache_nat_entry(sbi, nid, &ne); 576 return 0; 577 } 578 579 /* 580 * readahead MAX_RA_NODE number of node pages. 581 */ 582 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 583 { 584 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 585 struct blk_plug plug; 586 int i, end; 587 nid_t nid; 588 589 blk_start_plug(&plug); 590 591 /* Then, try readahead for siblings of the desired node */ 592 end = start + n; 593 end = min(end, NIDS_PER_BLOCK); 594 for (i = start; i < end; i++) { 595 nid = get_nid(parent, i, false); 596 f2fs_ra_node_page(sbi, nid); 597 } 598 599 blk_finish_plug(&plug); 600 } 601 602 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 603 { 604 const long direct_index = ADDRS_PER_INODE(dn->inode); 605 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 606 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 607 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 608 int cur_level = dn->cur_level; 609 int max_level = dn->max_level; 610 pgoff_t base = 0; 611 612 if (!dn->max_level) 613 return pgofs + 1; 614 615 while (max_level-- > cur_level) 616 skipped_unit *= NIDS_PER_BLOCK; 617 618 switch (dn->max_level) { 619 case 3: 620 base += 2 * indirect_blks; 621 /* fall through */ 622 case 2: 623 base += 2 * direct_blks; 624 /* fall through */ 625 case 1: 626 base += direct_index; 627 break; 628 default: 629 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 630 } 631 632 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 633 } 634 635 /* 636 * The maximum depth is four. 637 * Offset[0] will have raw inode offset. 638 */ 639 static int get_node_path(struct inode *inode, long block, 640 int offset[4], unsigned int noffset[4]) 641 { 642 const long direct_index = ADDRS_PER_INODE(inode); 643 const long direct_blks = ADDRS_PER_BLOCK(inode); 644 const long dptrs_per_blk = NIDS_PER_BLOCK; 645 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 646 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 647 int n = 0; 648 int level = 0; 649 650 noffset[0] = 0; 651 652 if (block < direct_index) { 653 offset[n] = block; 654 goto got; 655 } 656 block -= direct_index; 657 if (block < direct_blks) { 658 offset[n++] = NODE_DIR1_BLOCK; 659 noffset[n] = 1; 660 offset[n] = block; 661 level = 1; 662 goto got; 663 } 664 block -= direct_blks; 665 if (block < direct_blks) { 666 offset[n++] = NODE_DIR2_BLOCK; 667 noffset[n] = 2; 668 offset[n] = block; 669 level = 1; 670 goto got; 671 } 672 block -= direct_blks; 673 if (block < indirect_blks) { 674 offset[n++] = NODE_IND1_BLOCK; 675 noffset[n] = 3; 676 offset[n++] = block / direct_blks; 677 noffset[n] = 4 + offset[n - 1]; 678 offset[n] = block % direct_blks; 679 level = 2; 680 goto got; 681 } 682 block -= indirect_blks; 683 if (block < indirect_blks) { 684 offset[n++] = NODE_IND2_BLOCK; 685 noffset[n] = 4 + dptrs_per_blk; 686 offset[n++] = block / direct_blks; 687 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 688 offset[n] = block % direct_blks; 689 level = 2; 690 goto got; 691 } 692 block -= indirect_blks; 693 if (block < dindirect_blks) { 694 offset[n++] = NODE_DIND_BLOCK; 695 noffset[n] = 5 + (dptrs_per_blk * 2); 696 offset[n++] = block / indirect_blks; 697 noffset[n] = 6 + (dptrs_per_blk * 2) + 698 offset[n - 1] * (dptrs_per_blk + 1); 699 offset[n++] = (block / direct_blks) % dptrs_per_blk; 700 noffset[n] = 7 + (dptrs_per_blk * 2) + 701 offset[n - 2] * (dptrs_per_blk + 1) + 702 offset[n - 1]; 703 offset[n] = block % direct_blks; 704 level = 3; 705 goto got; 706 } else { 707 return -E2BIG; 708 } 709 got: 710 return level; 711 } 712 713 /* 714 * Caller should call f2fs_put_dnode(dn). 715 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 716 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 717 */ 718 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 719 { 720 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 721 struct page *npage[4]; 722 struct page *parent = NULL; 723 int offset[4]; 724 unsigned int noffset[4]; 725 nid_t nids[4]; 726 int level, i = 0; 727 int err = 0; 728 729 level = get_node_path(dn->inode, index, offset, noffset); 730 if (level < 0) 731 return level; 732 733 nids[0] = dn->inode->i_ino; 734 npage[0] = dn->inode_page; 735 736 if (!npage[0]) { 737 npage[0] = f2fs_get_node_page(sbi, nids[0]); 738 if (IS_ERR(npage[0])) 739 return PTR_ERR(npage[0]); 740 } 741 742 /* if inline_data is set, should not report any block indices */ 743 if (f2fs_has_inline_data(dn->inode) && index) { 744 err = -ENOENT; 745 f2fs_put_page(npage[0], 1); 746 goto release_out; 747 } 748 749 parent = npage[0]; 750 if (level != 0) 751 nids[1] = get_nid(parent, offset[0], true); 752 dn->inode_page = npage[0]; 753 dn->inode_page_locked = true; 754 755 /* get indirect or direct nodes */ 756 for (i = 1; i <= level; i++) { 757 bool done = false; 758 759 if (!nids[i] && mode == ALLOC_NODE) { 760 /* alloc new node */ 761 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 762 err = -ENOSPC; 763 goto release_pages; 764 } 765 766 dn->nid = nids[i]; 767 npage[i] = f2fs_new_node_page(dn, noffset[i]); 768 if (IS_ERR(npage[i])) { 769 f2fs_alloc_nid_failed(sbi, nids[i]); 770 err = PTR_ERR(npage[i]); 771 goto release_pages; 772 } 773 774 set_nid(parent, offset[i - 1], nids[i], i == 1); 775 f2fs_alloc_nid_done(sbi, nids[i]); 776 done = true; 777 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 778 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 779 if (IS_ERR(npage[i])) { 780 err = PTR_ERR(npage[i]); 781 goto release_pages; 782 } 783 done = true; 784 } 785 if (i == 1) { 786 dn->inode_page_locked = false; 787 unlock_page(parent); 788 } else { 789 f2fs_put_page(parent, 1); 790 } 791 792 if (!done) { 793 npage[i] = f2fs_get_node_page(sbi, nids[i]); 794 if (IS_ERR(npage[i])) { 795 err = PTR_ERR(npage[i]); 796 f2fs_put_page(npage[0], 0); 797 goto release_out; 798 } 799 } 800 if (i < level) { 801 parent = npage[i]; 802 nids[i + 1] = get_nid(parent, offset[i], false); 803 } 804 } 805 dn->nid = nids[level]; 806 dn->ofs_in_node = offset[level]; 807 dn->node_page = npage[level]; 808 dn->data_blkaddr = f2fs_data_blkaddr(dn); 809 return 0; 810 811 release_pages: 812 f2fs_put_page(parent, 1); 813 if (i > 1) 814 f2fs_put_page(npage[0], 0); 815 release_out: 816 dn->inode_page = NULL; 817 dn->node_page = NULL; 818 if (err == -ENOENT) { 819 dn->cur_level = i; 820 dn->max_level = level; 821 dn->ofs_in_node = offset[level]; 822 } 823 return err; 824 } 825 826 static int truncate_node(struct dnode_of_data *dn) 827 { 828 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 829 struct node_info ni; 830 int err; 831 pgoff_t index; 832 833 err = f2fs_get_node_info(sbi, dn->nid, &ni); 834 if (err) 835 return err; 836 837 /* Deallocate node address */ 838 f2fs_invalidate_blocks(sbi, ni.blk_addr); 839 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 840 set_node_addr(sbi, &ni, NULL_ADDR, false); 841 842 if (dn->nid == dn->inode->i_ino) { 843 f2fs_remove_orphan_inode(sbi, dn->nid); 844 dec_valid_inode_count(sbi); 845 f2fs_inode_synced(dn->inode); 846 } 847 848 clear_node_page_dirty(dn->node_page); 849 set_sbi_flag(sbi, SBI_IS_DIRTY); 850 851 index = dn->node_page->index; 852 f2fs_put_page(dn->node_page, 1); 853 854 invalidate_mapping_pages(NODE_MAPPING(sbi), 855 index, index); 856 857 dn->node_page = NULL; 858 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 859 860 return 0; 861 } 862 863 static int truncate_dnode(struct dnode_of_data *dn) 864 { 865 struct page *page; 866 int err; 867 868 if (dn->nid == 0) 869 return 1; 870 871 /* get direct node */ 872 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 873 if (PTR_ERR(page) == -ENOENT) 874 return 1; 875 else if (IS_ERR(page)) 876 return PTR_ERR(page); 877 878 /* Make dnode_of_data for parameter */ 879 dn->node_page = page; 880 dn->ofs_in_node = 0; 881 f2fs_truncate_data_blocks(dn); 882 err = truncate_node(dn); 883 if (err) 884 return err; 885 886 return 1; 887 } 888 889 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 890 int ofs, int depth) 891 { 892 struct dnode_of_data rdn = *dn; 893 struct page *page; 894 struct f2fs_node *rn; 895 nid_t child_nid; 896 unsigned int child_nofs; 897 int freed = 0; 898 int i, ret; 899 900 if (dn->nid == 0) 901 return NIDS_PER_BLOCK + 1; 902 903 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 904 905 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 906 if (IS_ERR(page)) { 907 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 908 return PTR_ERR(page); 909 } 910 911 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 912 913 rn = F2FS_NODE(page); 914 if (depth < 3) { 915 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 916 child_nid = le32_to_cpu(rn->in.nid[i]); 917 if (child_nid == 0) 918 continue; 919 rdn.nid = child_nid; 920 ret = truncate_dnode(&rdn); 921 if (ret < 0) 922 goto out_err; 923 if (set_nid(page, i, 0, false)) 924 dn->node_changed = true; 925 } 926 } else { 927 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 928 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 929 child_nid = le32_to_cpu(rn->in.nid[i]); 930 if (child_nid == 0) { 931 child_nofs += NIDS_PER_BLOCK + 1; 932 continue; 933 } 934 rdn.nid = child_nid; 935 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 936 if (ret == (NIDS_PER_BLOCK + 1)) { 937 if (set_nid(page, i, 0, false)) 938 dn->node_changed = true; 939 child_nofs += ret; 940 } else if (ret < 0 && ret != -ENOENT) { 941 goto out_err; 942 } 943 } 944 freed = child_nofs; 945 } 946 947 if (!ofs) { 948 /* remove current indirect node */ 949 dn->node_page = page; 950 ret = truncate_node(dn); 951 if (ret) 952 goto out_err; 953 freed++; 954 } else { 955 f2fs_put_page(page, 1); 956 } 957 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 958 return freed; 959 960 out_err: 961 f2fs_put_page(page, 1); 962 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 963 return ret; 964 } 965 966 static int truncate_partial_nodes(struct dnode_of_data *dn, 967 struct f2fs_inode *ri, int *offset, int depth) 968 { 969 struct page *pages[2]; 970 nid_t nid[3]; 971 nid_t child_nid; 972 int err = 0; 973 int i; 974 int idx = depth - 2; 975 976 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 977 if (!nid[0]) 978 return 0; 979 980 /* get indirect nodes in the path */ 981 for (i = 0; i < idx + 1; i++) { 982 /* reference count'll be increased */ 983 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 984 if (IS_ERR(pages[i])) { 985 err = PTR_ERR(pages[i]); 986 idx = i - 1; 987 goto fail; 988 } 989 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 990 } 991 992 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 993 994 /* free direct nodes linked to a partial indirect node */ 995 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 996 child_nid = get_nid(pages[idx], i, false); 997 if (!child_nid) 998 continue; 999 dn->nid = child_nid; 1000 err = truncate_dnode(dn); 1001 if (err < 0) 1002 goto fail; 1003 if (set_nid(pages[idx], i, 0, false)) 1004 dn->node_changed = true; 1005 } 1006 1007 if (offset[idx + 1] == 0) { 1008 dn->node_page = pages[idx]; 1009 dn->nid = nid[idx]; 1010 err = truncate_node(dn); 1011 if (err) 1012 goto fail; 1013 } else { 1014 f2fs_put_page(pages[idx], 1); 1015 } 1016 offset[idx]++; 1017 offset[idx + 1] = 0; 1018 idx--; 1019 fail: 1020 for (i = idx; i >= 0; i--) 1021 f2fs_put_page(pages[i], 1); 1022 1023 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1024 1025 return err; 1026 } 1027 1028 /* 1029 * All the block addresses of data and nodes should be nullified. 1030 */ 1031 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1032 { 1033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1034 int err = 0, cont = 1; 1035 int level, offset[4], noffset[4]; 1036 unsigned int nofs = 0; 1037 struct f2fs_inode *ri; 1038 struct dnode_of_data dn; 1039 struct page *page; 1040 1041 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1042 1043 level = get_node_path(inode, from, offset, noffset); 1044 if (level < 0) 1045 return level; 1046 1047 page = f2fs_get_node_page(sbi, inode->i_ino); 1048 if (IS_ERR(page)) { 1049 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1050 return PTR_ERR(page); 1051 } 1052 1053 set_new_dnode(&dn, inode, page, NULL, 0); 1054 unlock_page(page); 1055 1056 ri = F2FS_INODE(page); 1057 switch (level) { 1058 case 0: 1059 case 1: 1060 nofs = noffset[1]; 1061 break; 1062 case 2: 1063 nofs = noffset[1]; 1064 if (!offset[level - 1]) 1065 goto skip_partial; 1066 err = truncate_partial_nodes(&dn, ri, offset, level); 1067 if (err < 0 && err != -ENOENT) 1068 goto fail; 1069 nofs += 1 + NIDS_PER_BLOCK; 1070 break; 1071 case 3: 1072 nofs = 5 + 2 * NIDS_PER_BLOCK; 1073 if (!offset[level - 1]) 1074 goto skip_partial; 1075 err = truncate_partial_nodes(&dn, ri, offset, level); 1076 if (err < 0 && err != -ENOENT) 1077 goto fail; 1078 break; 1079 default: 1080 BUG(); 1081 } 1082 1083 skip_partial: 1084 while (cont) { 1085 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1086 switch (offset[0]) { 1087 case NODE_DIR1_BLOCK: 1088 case NODE_DIR2_BLOCK: 1089 err = truncate_dnode(&dn); 1090 break; 1091 1092 case NODE_IND1_BLOCK: 1093 case NODE_IND2_BLOCK: 1094 err = truncate_nodes(&dn, nofs, offset[1], 2); 1095 break; 1096 1097 case NODE_DIND_BLOCK: 1098 err = truncate_nodes(&dn, nofs, offset[1], 3); 1099 cont = 0; 1100 break; 1101 1102 default: 1103 BUG(); 1104 } 1105 if (err < 0 && err != -ENOENT) 1106 goto fail; 1107 if (offset[1] == 0 && 1108 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 1109 lock_page(page); 1110 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1111 f2fs_wait_on_page_writeback(page, NODE, true, true); 1112 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 1113 set_page_dirty(page); 1114 unlock_page(page); 1115 } 1116 offset[1] = 0; 1117 offset[0]++; 1118 nofs += err; 1119 } 1120 fail: 1121 f2fs_put_page(page, 0); 1122 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1123 return err > 0 ? 0 : err; 1124 } 1125 1126 /* caller must lock inode page */ 1127 int f2fs_truncate_xattr_node(struct inode *inode) 1128 { 1129 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1130 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1131 struct dnode_of_data dn; 1132 struct page *npage; 1133 int err; 1134 1135 if (!nid) 1136 return 0; 1137 1138 npage = f2fs_get_node_page(sbi, nid); 1139 if (IS_ERR(npage)) 1140 return PTR_ERR(npage); 1141 1142 set_new_dnode(&dn, inode, NULL, npage, nid); 1143 err = truncate_node(&dn); 1144 if (err) { 1145 f2fs_put_page(npage, 1); 1146 return err; 1147 } 1148 1149 f2fs_i_xnid_write(inode, 0); 1150 1151 return 0; 1152 } 1153 1154 /* 1155 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1156 * f2fs_unlock_op(). 1157 */ 1158 int f2fs_remove_inode_page(struct inode *inode) 1159 { 1160 struct dnode_of_data dn; 1161 int err; 1162 1163 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1164 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1165 if (err) 1166 return err; 1167 1168 err = f2fs_truncate_xattr_node(inode); 1169 if (err) { 1170 f2fs_put_dnode(&dn); 1171 return err; 1172 } 1173 1174 /* remove potential inline_data blocks */ 1175 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1176 S_ISLNK(inode->i_mode)) 1177 f2fs_truncate_data_blocks_range(&dn, 1); 1178 1179 /* 0 is possible, after f2fs_new_inode() has failed */ 1180 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1181 f2fs_put_dnode(&dn); 1182 return -EIO; 1183 } 1184 1185 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1186 f2fs_warn(F2FS_I_SB(inode), 1187 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1188 inode->i_ino, (unsigned long long)inode->i_blocks); 1189 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1190 } 1191 1192 /* will put inode & node pages */ 1193 err = truncate_node(&dn); 1194 if (err) { 1195 f2fs_put_dnode(&dn); 1196 return err; 1197 } 1198 return 0; 1199 } 1200 1201 struct page *f2fs_new_inode_page(struct inode *inode) 1202 { 1203 struct dnode_of_data dn; 1204 1205 /* allocate inode page for new inode */ 1206 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1207 1208 /* caller should f2fs_put_page(page, 1); */ 1209 return f2fs_new_node_page(&dn, 0); 1210 } 1211 1212 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1213 { 1214 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1215 struct node_info new_ni; 1216 struct page *page; 1217 int err; 1218 1219 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1220 return ERR_PTR(-EPERM); 1221 1222 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1223 if (!page) 1224 return ERR_PTR(-ENOMEM); 1225 1226 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1227 goto fail; 1228 1229 #ifdef CONFIG_F2FS_CHECK_FS 1230 err = f2fs_get_node_info(sbi, dn->nid, &new_ni); 1231 if (err) { 1232 dec_valid_node_count(sbi, dn->inode, !ofs); 1233 goto fail; 1234 } 1235 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); 1236 #endif 1237 new_ni.nid = dn->nid; 1238 new_ni.ino = dn->inode->i_ino; 1239 new_ni.blk_addr = NULL_ADDR; 1240 new_ni.flag = 0; 1241 new_ni.version = 0; 1242 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1243 1244 f2fs_wait_on_page_writeback(page, NODE, true, true); 1245 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1246 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1247 if (!PageUptodate(page)) 1248 SetPageUptodate(page); 1249 if (set_page_dirty(page)) 1250 dn->node_changed = true; 1251 1252 if (f2fs_has_xattr_block(ofs)) 1253 f2fs_i_xnid_write(dn->inode, dn->nid); 1254 1255 if (ofs == 0) 1256 inc_valid_inode_count(sbi); 1257 return page; 1258 1259 fail: 1260 clear_node_page_dirty(page); 1261 f2fs_put_page(page, 1); 1262 return ERR_PTR(err); 1263 } 1264 1265 /* 1266 * Caller should do after getting the following values. 1267 * 0: f2fs_put_page(page, 0) 1268 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1269 */ 1270 static int read_node_page(struct page *page, int op_flags) 1271 { 1272 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1273 struct node_info ni; 1274 struct f2fs_io_info fio = { 1275 .sbi = sbi, 1276 .type = NODE, 1277 .op = REQ_OP_READ, 1278 .op_flags = op_flags, 1279 .page = page, 1280 .encrypted_page = NULL, 1281 }; 1282 int err; 1283 1284 if (PageUptodate(page)) { 1285 if (!f2fs_inode_chksum_verify(sbi, page)) { 1286 ClearPageUptodate(page); 1287 return -EFSBADCRC; 1288 } 1289 return LOCKED_PAGE; 1290 } 1291 1292 err = f2fs_get_node_info(sbi, page->index, &ni); 1293 if (err) 1294 return err; 1295 1296 if (unlikely(ni.blk_addr == NULL_ADDR) || 1297 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { 1298 ClearPageUptodate(page); 1299 return -ENOENT; 1300 } 1301 1302 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1303 1304 err = f2fs_submit_page_bio(&fio); 1305 1306 if (!err) 1307 f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); 1308 1309 return err; 1310 } 1311 1312 /* 1313 * Readahead a node page 1314 */ 1315 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1316 { 1317 struct page *apage; 1318 int err; 1319 1320 if (!nid) 1321 return; 1322 if (f2fs_check_nid_range(sbi, nid)) 1323 return; 1324 1325 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1326 if (apage) 1327 return; 1328 1329 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1330 if (!apage) 1331 return; 1332 1333 err = read_node_page(apage, REQ_RAHEAD); 1334 f2fs_put_page(apage, err ? 1 : 0); 1335 } 1336 1337 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1338 struct page *parent, int start) 1339 { 1340 struct page *page; 1341 int err; 1342 1343 if (!nid) 1344 return ERR_PTR(-ENOENT); 1345 if (f2fs_check_nid_range(sbi, nid)) 1346 return ERR_PTR(-EINVAL); 1347 repeat: 1348 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1349 if (!page) 1350 return ERR_PTR(-ENOMEM); 1351 1352 err = read_node_page(page, 0); 1353 if (err < 0) { 1354 f2fs_put_page(page, 1); 1355 return ERR_PTR(err); 1356 } else if (err == LOCKED_PAGE) { 1357 err = 0; 1358 goto page_hit; 1359 } 1360 1361 if (parent) 1362 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1363 1364 lock_page(page); 1365 1366 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1367 f2fs_put_page(page, 1); 1368 goto repeat; 1369 } 1370 1371 if (unlikely(!PageUptodate(page))) { 1372 err = -EIO; 1373 goto out_err; 1374 } 1375 1376 if (!f2fs_inode_chksum_verify(sbi, page)) { 1377 err = -EFSBADCRC; 1378 goto out_err; 1379 } 1380 page_hit: 1381 if(unlikely(nid != nid_of_node(page))) { 1382 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1383 nid, nid_of_node(page), ino_of_node(page), 1384 ofs_of_node(page), cpver_of_node(page), 1385 next_blkaddr_of_node(page)); 1386 err = -EINVAL; 1387 out_err: 1388 ClearPageUptodate(page); 1389 f2fs_put_page(page, 1); 1390 return ERR_PTR(err); 1391 } 1392 return page; 1393 } 1394 1395 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1396 { 1397 return __get_node_page(sbi, nid, NULL, 0); 1398 } 1399 1400 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1401 { 1402 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1403 nid_t nid = get_nid(parent, start, false); 1404 1405 return __get_node_page(sbi, nid, parent, start); 1406 } 1407 1408 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1409 { 1410 struct inode *inode; 1411 struct page *page; 1412 int ret; 1413 1414 /* should flush inline_data before evict_inode */ 1415 inode = ilookup(sbi->sb, ino); 1416 if (!inode) 1417 return; 1418 1419 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1420 FGP_LOCK|FGP_NOWAIT, 0); 1421 if (!page) 1422 goto iput_out; 1423 1424 if (!PageUptodate(page)) 1425 goto page_out; 1426 1427 if (!PageDirty(page)) 1428 goto page_out; 1429 1430 if (!clear_page_dirty_for_io(page)) 1431 goto page_out; 1432 1433 ret = f2fs_write_inline_data(inode, page); 1434 inode_dec_dirty_pages(inode); 1435 f2fs_remove_dirty_inode(inode); 1436 if (ret) 1437 set_page_dirty(page); 1438 page_out: 1439 f2fs_put_page(page, 1); 1440 iput_out: 1441 iput(inode); 1442 } 1443 1444 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1445 { 1446 pgoff_t index; 1447 struct pagevec pvec; 1448 struct page *last_page = NULL; 1449 int nr_pages; 1450 1451 pagevec_init(&pvec); 1452 index = 0; 1453 1454 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1455 PAGECACHE_TAG_DIRTY))) { 1456 int i; 1457 1458 for (i = 0; i < nr_pages; i++) { 1459 struct page *page = pvec.pages[i]; 1460 1461 if (unlikely(f2fs_cp_error(sbi))) { 1462 f2fs_put_page(last_page, 0); 1463 pagevec_release(&pvec); 1464 return ERR_PTR(-EIO); 1465 } 1466 1467 if (!IS_DNODE(page) || !is_cold_node(page)) 1468 continue; 1469 if (ino_of_node(page) != ino) 1470 continue; 1471 1472 lock_page(page); 1473 1474 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1475 continue_unlock: 1476 unlock_page(page); 1477 continue; 1478 } 1479 if (ino_of_node(page) != ino) 1480 goto continue_unlock; 1481 1482 if (!PageDirty(page)) { 1483 /* someone wrote it for us */ 1484 goto continue_unlock; 1485 } 1486 1487 if (last_page) 1488 f2fs_put_page(last_page, 0); 1489 1490 get_page(page); 1491 last_page = page; 1492 unlock_page(page); 1493 } 1494 pagevec_release(&pvec); 1495 cond_resched(); 1496 } 1497 return last_page; 1498 } 1499 1500 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1501 struct writeback_control *wbc, bool do_balance, 1502 enum iostat_type io_type, unsigned int *seq_id) 1503 { 1504 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1505 nid_t nid; 1506 struct node_info ni; 1507 struct f2fs_io_info fio = { 1508 .sbi = sbi, 1509 .ino = ino_of_node(page), 1510 .type = NODE, 1511 .op = REQ_OP_WRITE, 1512 .op_flags = wbc_to_write_flags(wbc), 1513 .page = page, 1514 .encrypted_page = NULL, 1515 .submitted = false, 1516 .io_type = io_type, 1517 .io_wbc = wbc, 1518 }; 1519 unsigned int seq; 1520 1521 trace_f2fs_writepage(page, NODE); 1522 1523 if (unlikely(f2fs_cp_error(sbi))) { 1524 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { 1525 ClearPageUptodate(page); 1526 dec_page_count(sbi, F2FS_DIRTY_NODES); 1527 unlock_page(page); 1528 return 0; 1529 } 1530 goto redirty_out; 1531 } 1532 1533 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1534 goto redirty_out; 1535 1536 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1537 wbc->sync_mode == WB_SYNC_NONE && 1538 IS_DNODE(page) && is_cold_node(page)) 1539 goto redirty_out; 1540 1541 /* get old block addr of this node page */ 1542 nid = nid_of_node(page); 1543 f2fs_bug_on(sbi, page->index != nid); 1544 1545 if (f2fs_get_node_info(sbi, nid, &ni)) 1546 goto redirty_out; 1547 1548 if (wbc->for_reclaim) { 1549 if (!down_read_trylock(&sbi->node_write)) 1550 goto redirty_out; 1551 } else { 1552 down_read(&sbi->node_write); 1553 } 1554 1555 /* This page is already truncated */ 1556 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1557 ClearPageUptodate(page); 1558 dec_page_count(sbi, F2FS_DIRTY_NODES); 1559 up_read(&sbi->node_write); 1560 unlock_page(page); 1561 return 0; 1562 } 1563 1564 if (__is_valid_data_blkaddr(ni.blk_addr) && 1565 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1566 DATA_GENERIC_ENHANCE)) { 1567 up_read(&sbi->node_write); 1568 goto redirty_out; 1569 } 1570 1571 if (atomic && !test_opt(sbi, NOBARRIER)) 1572 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1573 1574 /* should add to global list before clearing PAGECACHE status */ 1575 if (f2fs_in_warm_node_list(sbi, page)) { 1576 seq = f2fs_add_fsync_node_entry(sbi, page); 1577 if (seq_id) 1578 *seq_id = seq; 1579 } 1580 1581 set_page_writeback(page); 1582 ClearPageError(page); 1583 1584 fio.old_blkaddr = ni.blk_addr; 1585 f2fs_do_write_node_page(nid, &fio); 1586 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1587 dec_page_count(sbi, F2FS_DIRTY_NODES); 1588 up_read(&sbi->node_write); 1589 1590 if (wbc->for_reclaim) { 1591 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1592 submitted = NULL; 1593 } 1594 1595 unlock_page(page); 1596 1597 if (unlikely(f2fs_cp_error(sbi))) { 1598 f2fs_submit_merged_write(sbi, NODE); 1599 submitted = NULL; 1600 } 1601 if (submitted) 1602 *submitted = fio.submitted; 1603 1604 if (do_balance) 1605 f2fs_balance_fs(sbi, false); 1606 return 0; 1607 1608 redirty_out: 1609 redirty_page_for_writepage(wbc, page); 1610 return AOP_WRITEPAGE_ACTIVATE; 1611 } 1612 1613 int f2fs_move_node_page(struct page *node_page, int gc_type) 1614 { 1615 int err = 0; 1616 1617 if (gc_type == FG_GC) { 1618 struct writeback_control wbc = { 1619 .sync_mode = WB_SYNC_ALL, 1620 .nr_to_write = 1, 1621 .for_reclaim = 0, 1622 }; 1623 1624 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1625 1626 set_page_dirty(node_page); 1627 1628 if (!clear_page_dirty_for_io(node_page)) { 1629 err = -EAGAIN; 1630 goto out_page; 1631 } 1632 1633 if (__write_node_page(node_page, false, NULL, 1634 &wbc, false, FS_GC_NODE_IO, NULL)) { 1635 err = -EAGAIN; 1636 unlock_page(node_page); 1637 } 1638 goto release_page; 1639 } else { 1640 /* set page dirty and write it */ 1641 if (!PageWriteback(node_page)) 1642 set_page_dirty(node_page); 1643 } 1644 out_page: 1645 unlock_page(node_page); 1646 release_page: 1647 f2fs_put_page(node_page, 0); 1648 return err; 1649 } 1650 1651 static int f2fs_write_node_page(struct page *page, 1652 struct writeback_control *wbc) 1653 { 1654 return __write_node_page(page, false, NULL, wbc, false, 1655 FS_NODE_IO, NULL); 1656 } 1657 1658 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1659 struct writeback_control *wbc, bool atomic, 1660 unsigned int *seq_id) 1661 { 1662 pgoff_t index; 1663 struct pagevec pvec; 1664 int ret = 0; 1665 struct page *last_page = NULL; 1666 bool marked = false; 1667 nid_t ino = inode->i_ino; 1668 int nr_pages; 1669 int nwritten = 0; 1670 1671 if (atomic) { 1672 last_page = last_fsync_dnode(sbi, ino); 1673 if (IS_ERR_OR_NULL(last_page)) 1674 return PTR_ERR_OR_ZERO(last_page); 1675 } 1676 retry: 1677 pagevec_init(&pvec); 1678 index = 0; 1679 1680 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1681 PAGECACHE_TAG_DIRTY))) { 1682 int i; 1683 1684 for (i = 0; i < nr_pages; i++) { 1685 struct page *page = pvec.pages[i]; 1686 bool submitted = false; 1687 1688 if (unlikely(f2fs_cp_error(sbi))) { 1689 f2fs_put_page(last_page, 0); 1690 pagevec_release(&pvec); 1691 ret = -EIO; 1692 goto out; 1693 } 1694 1695 if (!IS_DNODE(page) || !is_cold_node(page)) 1696 continue; 1697 if (ino_of_node(page) != ino) 1698 continue; 1699 1700 lock_page(page); 1701 1702 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1703 continue_unlock: 1704 unlock_page(page); 1705 continue; 1706 } 1707 if (ino_of_node(page) != ino) 1708 goto continue_unlock; 1709 1710 if (!PageDirty(page) && page != last_page) { 1711 /* someone wrote it for us */ 1712 goto continue_unlock; 1713 } 1714 1715 f2fs_wait_on_page_writeback(page, NODE, true, true); 1716 1717 set_fsync_mark(page, 0); 1718 set_dentry_mark(page, 0); 1719 1720 if (!atomic || page == last_page) { 1721 set_fsync_mark(page, 1); 1722 if (IS_INODE(page)) { 1723 if (is_inode_flag_set(inode, 1724 FI_DIRTY_INODE)) 1725 f2fs_update_inode(inode, page); 1726 set_dentry_mark(page, 1727 f2fs_need_dentry_mark(sbi, ino)); 1728 } 1729 /* may be written by other thread */ 1730 if (!PageDirty(page)) 1731 set_page_dirty(page); 1732 } 1733 1734 if (!clear_page_dirty_for_io(page)) 1735 goto continue_unlock; 1736 1737 ret = __write_node_page(page, atomic && 1738 page == last_page, 1739 &submitted, wbc, true, 1740 FS_NODE_IO, seq_id); 1741 if (ret) { 1742 unlock_page(page); 1743 f2fs_put_page(last_page, 0); 1744 break; 1745 } else if (submitted) { 1746 nwritten++; 1747 } 1748 1749 if (page == last_page) { 1750 f2fs_put_page(page, 0); 1751 marked = true; 1752 break; 1753 } 1754 } 1755 pagevec_release(&pvec); 1756 cond_resched(); 1757 1758 if (ret || marked) 1759 break; 1760 } 1761 if (!ret && atomic && !marked) { 1762 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1763 ino, last_page->index); 1764 lock_page(last_page); 1765 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1766 set_page_dirty(last_page); 1767 unlock_page(last_page); 1768 goto retry; 1769 } 1770 out: 1771 if (nwritten) 1772 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1773 return ret ? -EIO: 0; 1774 } 1775 1776 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1777 { 1778 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1779 bool clean; 1780 1781 if (inode->i_ino != ino) 1782 return 0; 1783 1784 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1785 return 0; 1786 1787 spin_lock(&sbi->inode_lock[DIRTY_META]); 1788 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1789 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1790 1791 if (clean) 1792 return 0; 1793 1794 inode = igrab(inode); 1795 if (!inode) 1796 return 0; 1797 return 1; 1798 } 1799 1800 static bool flush_dirty_inode(struct page *page) 1801 { 1802 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1803 struct inode *inode; 1804 nid_t ino = ino_of_node(page); 1805 1806 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1807 if (!inode) 1808 return false; 1809 1810 f2fs_update_inode(inode, page); 1811 unlock_page(page); 1812 1813 iput(inode); 1814 return true; 1815 } 1816 1817 int f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1818 { 1819 pgoff_t index = 0; 1820 struct pagevec pvec; 1821 int nr_pages; 1822 int ret = 0; 1823 1824 pagevec_init(&pvec); 1825 1826 while ((nr_pages = pagevec_lookup_tag(&pvec, 1827 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1828 int i; 1829 1830 for (i = 0; i < nr_pages; i++) { 1831 struct page *page = pvec.pages[i]; 1832 1833 if (!IS_DNODE(page)) 1834 continue; 1835 1836 lock_page(page); 1837 1838 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1839 continue_unlock: 1840 unlock_page(page); 1841 continue; 1842 } 1843 1844 if (!PageDirty(page)) { 1845 /* someone wrote it for us */ 1846 goto continue_unlock; 1847 } 1848 1849 /* flush inline_data, if it's async context. */ 1850 if (is_inline_node(page)) { 1851 clear_inline_node(page); 1852 unlock_page(page); 1853 flush_inline_data(sbi, ino_of_node(page)); 1854 continue; 1855 } 1856 unlock_page(page); 1857 } 1858 pagevec_release(&pvec); 1859 cond_resched(); 1860 } 1861 return ret; 1862 } 1863 1864 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1865 struct writeback_control *wbc, 1866 bool do_balance, enum iostat_type io_type) 1867 { 1868 pgoff_t index; 1869 struct pagevec pvec; 1870 int step = 0; 1871 int nwritten = 0; 1872 int ret = 0; 1873 int nr_pages, done = 0; 1874 1875 pagevec_init(&pvec); 1876 1877 next_step: 1878 index = 0; 1879 1880 while (!done && (nr_pages = pagevec_lookup_tag(&pvec, 1881 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { 1882 int i; 1883 1884 for (i = 0; i < nr_pages; i++) { 1885 struct page *page = pvec.pages[i]; 1886 bool submitted = false; 1887 bool may_dirty = true; 1888 1889 /* give a priority to WB_SYNC threads */ 1890 if (atomic_read(&sbi->wb_sync_req[NODE]) && 1891 wbc->sync_mode == WB_SYNC_NONE) { 1892 done = 1; 1893 break; 1894 } 1895 1896 /* 1897 * flushing sequence with step: 1898 * 0. indirect nodes 1899 * 1. dentry dnodes 1900 * 2. file dnodes 1901 */ 1902 if (step == 0 && IS_DNODE(page)) 1903 continue; 1904 if (step == 1 && (!IS_DNODE(page) || 1905 is_cold_node(page))) 1906 continue; 1907 if (step == 2 && (!IS_DNODE(page) || 1908 !is_cold_node(page))) 1909 continue; 1910 lock_node: 1911 if (wbc->sync_mode == WB_SYNC_ALL) 1912 lock_page(page); 1913 else if (!trylock_page(page)) 1914 continue; 1915 1916 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1917 continue_unlock: 1918 unlock_page(page); 1919 continue; 1920 } 1921 1922 if (!PageDirty(page)) { 1923 /* someone wrote it for us */ 1924 goto continue_unlock; 1925 } 1926 1927 /* flush inline_data, if it's async context. */ 1928 if (do_balance && is_inline_node(page)) { 1929 clear_inline_node(page); 1930 unlock_page(page); 1931 flush_inline_data(sbi, ino_of_node(page)); 1932 goto lock_node; 1933 } 1934 1935 /* flush dirty inode */ 1936 if (IS_INODE(page) && may_dirty) { 1937 may_dirty = false; 1938 if (flush_dirty_inode(page)) 1939 goto lock_node; 1940 } 1941 1942 f2fs_wait_on_page_writeback(page, NODE, true, true); 1943 1944 if (!clear_page_dirty_for_io(page)) 1945 goto continue_unlock; 1946 1947 set_fsync_mark(page, 0); 1948 set_dentry_mark(page, 0); 1949 1950 ret = __write_node_page(page, false, &submitted, 1951 wbc, do_balance, io_type, NULL); 1952 if (ret) 1953 unlock_page(page); 1954 else if (submitted) 1955 nwritten++; 1956 1957 if (--wbc->nr_to_write == 0) 1958 break; 1959 } 1960 pagevec_release(&pvec); 1961 cond_resched(); 1962 1963 if (wbc->nr_to_write == 0) { 1964 step = 2; 1965 break; 1966 } 1967 } 1968 1969 if (step < 2) { 1970 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1971 wbc->sync_mode == WB_SYNC_NONE && step == 1) 1972 goto out; 1973 step++; 1974 goto next_step; 1975 } 1976 out: 1977 if (nwritten) 1978 f2fs_submit_merged_write(sbi, NODE); 1979 1980 if (unlikely(f2fs_cp_error(sbi))) 1981 return -EIO; 1982 return ret; 1983 } 1984 1985 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 1986 unsigned int seq_id) 1987 { 1988 struct fsync_node_entry *fn; 1989 struct page *page; 1990 struct list_head *head = &sbi->fsync_node_list; 1991 unsigned long flags; 1992 unsigned int cur_seq_id = 0; 1993 int ret2, ret = 0; 1994 1995 while (seq_id && cur_seq_id < seq_id) { 1996 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 1997 if (list_empty(head)) { 1998 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 1999 break; 2000 } 2001 fn = list_first_entry(head, struct fsync_node_entry, list); 2002 if (fn->seq_id > seq_id) { 2003 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2004 break; 2005 } 2006 cur_seq_id = fn->seq_id; 2007 page = fn->page; 2008 get_page(page); 2009 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2010 2011 f2fs_wait_on_page_writeback(page, NODE, true, false); 2012 if (TestClearPageError(page)) 2013 ret = -EIO; 2014 2015 put_page(page); 2016 2017 if (ret) 2018 break; 2019 } 2020 2021 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); 2022 if (!ret) 2023 ret = ret2; 2024 2025 return ret; 2026 } 2027 2028 static int f2fs_write_node_pages(struct address_space *mapping, 2029 struct writeback_control *wbc) 2030 { 2031 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2032 struct blk_plug plug; 2033 long diff; 2034 2035 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2036 goto skip_write; 2037 2038 /* balancing f2fs's metadata in background */ 2039 f2fs_balance_fs_bg(sbi, true); 2040 2041 /* collect a number of dirty node pages and write together */ 2042 if (wbc->sync_mode != WB_SYNC_ALL && 2043 get_pages(sbi, F2FS_DIRTY_NODES) < 2044 nr_pages_to_skip(sbi, NODE)) 2045 goto skip_write; 2046 2047 if (wbc->sync_mode == WB_SYNC_ALL) 2048 atomic_inc(&sbi->wb_sync_req[NODE]); 2049 else if (atomic_read(&sbi->wb_sync_req[NODE])) 2050 goto skip_write; 2051 2052 trace_f2fs_writepages(mapping->host, wbc, NODE); 2053 2054 diff = nr_pages_to_write(sbi, NODE, wbc); 2055 blk_start_plug(&plug); 2056 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2057 blk_finish_plug(&plug); 2058 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2059 2060 if (wbc->sync_mode == WB_SYNC_ALL) 2061 atomic_dec(&sbi->wb_sync_req[NODE]); 2062 return 0; 2063 2064 skip_write: 2065 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2066 trace_f2fs_writepages(mapping->host, wbc, NODE); 2067 return 0; 2068 } 2069 2070 static int f2fs_set_node_page_dirty(struct page *page) 2071 { 2072 trace_f2fs_set_page_dirty(page, NODE); 2073 2074 if (!PageUptodate(page)) 2075 SetPageUptodate(page); 2076 #ifdef CONFIG_F2FS_CHECK_FS 2077 if (IS_INODE(page)) 2078 f2fs_inode_chksum_set(F2FS_P_SB(page), page); 2079 #endif 2080 if (!PageDirty(page)) { 2081 __set_page_dirty_nobuffers(page); 2082 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 2083 f2fs_set_page_private(page, 0); 2084 f2fs_trace_pid(page); 2085 return 1; 2086 } 2087 return 0; 2088 } 2089 2090 /* 2091 * Structure of the f2fs node operations 2092 */ 2093 const struct address_space_operations f2fs_node_aops = { 2094 .writepage = f2fs_write_node_page, 2095 .writepages = f2fs_write_node_pages, 2096 .set_page_dirty = f2fs_set_node_page_dirty, 2097 .invalidatepage = f2fs_invalidate_page, 2098 .releasepage = f2fs_release_page, 2099 #ifdef CONFIG_MIGRATION 2100 .migratepage = f2fs_migrate_page, 2101 #endif 2102 }; 2103 2104 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2105 nid_t n) 2106 { 2107 return radix_tree_lookup(&nm_i->free_nid_root, n); 2108 } 2109 2110 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2111 struct free_nid *i, enum nid_state state) 2112 { 2113 struct f2fs_nm_info *nm_i = NM_I(sbi); 2114 2115 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2116 if (err) 2117 return err; 2118 2119 f2fs_bug_on(sbi, state != i->state); 2120 nm_i->nid_cnt[state]++; 2121 if (state == FREE_NID) 2122 list_add_tail(&i->list, &nm_i->free_nid_list); 2123 return 0; 2124 } 2125 2126 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2127 struct free_nid *i, enum nid_state state) 2128 { 2129 struct f2fs_nm_info *nm_i = NM_I(sbi); 2130 2131 f2fs_bug_on(sbi, state != i->state); 2132 nm_i->nid_cnt[state]--; 2133 if (state == FREE_NID) 2134 list_del(&i->list); 2135 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2136 } 2137 2138 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2139 enum nid_state org_state, enum nid_state dst_state) 2140 { 2141 struct f2fs_nm_info *nm_i = NM_I(sbi); 2142 2143 f2fs_bug_on(sbi, org_state != i->state); 2144 i->state = dst_state; 2145 nm_i->nid_cnt[org_state]--; 2146 nm_i->nid_cnt[dst_state]++; 2147 2148 switch (dst_state) { 2149 case PREALLOC_NID: 2150 list_del(&i->list); 2151 break; 2152 case FREE_NID: 2153 list_add_tail(&i->list, &nm_i->free_nid_list); 2154 break; 2155 default: 2156 BUG_ON(1); 2157 } 2158 } 2159 2160 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2161 bool set, bool build) 2162 { 2163 struct f2fs_nm_info *nm_i = NM_I(sbi); 2164 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2165 unsigned int nid_ofs = nid - START_NID(nid); 2166 2167 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2168 return; 2169 2170 if (set) { 2171 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2172 return; 2173 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2174 nm_i->free_nid_count[nat_ofs]++; 2175 } else { 2176 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2177 return; 2178 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2179 if (!build) 2180 nm_i->free_nid_count[nat_ofs]--; 2181 } 2182 } 2183 2184 /* return if the nid is recognized as free */ 2185 static bool add_free_nid(struct f2fs_sb_info *sbi, 2186 nid_t nid, bool build, bool update) 2187 { 2188 struct f2fs_nm_info *nm_i = NM_I(sbi); 2189 struct free_nid *i, *e; 2190 struct nat_entry *ne; 2191 int err = -EINVAL; 2192 bool ret = false; 2193 2194 /* 0 nid should not be used */ 2195 if (unlikely(nid == 0)) 2196 return false; 2197 2198 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2199 return false; 2200 2201 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 2202 i->nid = nid; 2203 i->state = FREE_NID; 2204 2205 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2206 2207 spin_lock(&nm_i->nid_list_lock); 2208 2209 if (build) { 2210 /* 2211 * Thread A Thread B 2212 * - f2fs_create 2213 * - f2fs_new_inode 2214 * - f2fs_alloc_nid 2215 * - __insert_nid_to_list(PREALLOC_NID) 2216 * - f2fs_balance_fs_bg 2217 * - f2fs_build_free_nids 2218 * - __f2fs_build_free_nids 2219 * - scan_nat_page 2220 * - add_free_nid 2221 * - __lookup_nat_cache 2222 * - f2fs_add_link 2223 * - f2fs_init_inode_metadata 2224 * - f2fs_new_inode_page 2225 * - f2fs_new_node_page 2226 * - set_node_addr 2227 * - f2fs_alloc_nid_done 2228 * - __remove_nid_from_list(PREALLOC_NID) 2229 * - __insert_nid_to_list(FREE_NID) 2230 */ 2231 ne = __lookup_nat_cache(nm_i, nid); 2232 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2233 nat_get_blkaddr(ne) != NULL_ADDR)) 2234 goto err_out; 2235 2236 e = __lookup_free_nid_list(nm_i, nid); 2237 if (e) { 2238 if (e->state == FREE_NID) 2239 ret = true; 2240 goto err_out; 2241 } 2242 } 2243 ret = true; 2244 err = __insert_free_nid(sbi, i, FREE_NID); 2245 err_out: 2246 if (update) { 2247 update_free_nid_bitmap(sbi, nid, ret, build); 2248 if (!build) 2249 nm_i->available_nids++; 2250 } 2251 spin_unlock(&nm_i->nid_list_lock); 2252 radix_tree_preload_end(); 2253 2254 if (err) 2255 kmem_cache_free(free_nid_slab, i); 2256 return ret; 2257 } 2258 2259 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2260 { 2261 struct f2fs_nm_info *nm_i = NM_I(sbi); 2262 struct free_nid *i; 2263 bool need_free = false; 2264 2265 spin_lock(&nm_i->nid_list_lock); 2266 i = __lookup_free_nid_list(nm_i, nid); 2267 if (i && i->state == FREE_NID) { 2268 __remove_free_nid(sbi, i, FREE_NID); 2269 need_free = true; 2270 } 2271 spin_unlock(&nm_i->nid_list_lock); 2272 2273 if (need_free) 2274 kmem_cache_free(free_nid_slab, i); 2275 } 2276 2277 static int scan_nat_page(struct f2fs_sb_info *sbi, 2278 struct page *nat_page, nid_t start_nid) 2279 { 2280 struct f2fs_nm_info *nm_i = NM_I(sbi); 2281 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2282 block_t blk_addr; 2283 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2284 int i; 2285 2286 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2287 2288 i = start_nid % NAT_ENTRY_PER_BLOCK; 2289 2290 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2291 if (unlikely(start_nid >= nm_i->max_nid)) 2292 break; 2293 2294 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2295 2296 if (blk_addr == NEW_ADDR) 2297 return -EINVAL; 2298 2299 if (blk_addr == NULL_ADDR) { 2300 add_free_nid(sbi, start_nid, true, true); 2301 } else { 2302 spin_lock(&NM_I(sbi)->nid_list_lock); 2303 update_free_nid_bitmap(sbi, start_nid, false, true); 2304 spin_unlock(&NM_I(sbi)->nid_list_lock); 2305 } 2306 } 2307 2308 return 0; 2309 } 2310 2311 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2312 { 2313 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2314 struct f2fs_journal *journal = curseg->journal; 2315 int i; 2316 2317 down_read(&curseg->journal_rwsem); 2318 for (i = 0; i < nats_in_cursum(journal); i++) { 2319 block_t addr; 2320 nid_t nid; 2321 2322 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2323 nid = le32_to_cpu(nid_in_journal(journal, i)); 2324 if (addr == NULL_ADDR) 2325 add_free_nid(sbi, nid, true, false); 2326 else 2327 remove_free_nid(sbi, nid); 2328 } 2329 up_read(&curseg->journal_rwsem); 2330 } 2331 2332 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2333 { 2334 struct f2fs_nm_info *nm_i = NM_I(sbi); 2335 unsigned int i, idx; 2336 nid_t nid; 2337 2338 down_read(&nm_i->nat_tree_lock); 2339 2340 for (i = 0; i < nm_i->nat_blocks; i++) { 2341 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2342 continue; 2343 if (!nm_i->free_nid_count[i]) 2344 continue; 2345 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2346 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2347 NAT_ENTRY_PER_BLOCK, idx); 2348 if (idx >= NAT_ENTRY_PER_BLOCK) 2349 break; 2350 2351 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2352 add_free_nid(sbi, nid, true, false); 2353 2354 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2355 goto out; 2356 } 2357 } 2358 out: 2359 scan_curseg_cache(sbi); 2360 2361 up_read(&nm_i->nat_tree_lock); 2362 } 2363 2364 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2365 bool sync, bool mount) 2366 { 2367 struct f2fs_nm_info *nm_i = NM_I(sbi); 2368 int i = 0, ret; 2369 nid_t nid = nm_i->next_scan_nid; 2370 2371 if (unlikely(nid >= nm_i->max_nid)) 2372 nid = 0; 2373 2374 /* Enough entries */ 2375 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2376 return 0; 2377 2378 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2379 return 0; 2380 2381 if (!mount) { 2382 /* try to find free nids in free_nid_bitmap */ 2383 scan_free_nid_bits(sbi); 2384 2385 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2386 return 0; 2387 } 2388 2389 /* readahead nat pages to be scanned */ 2390 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2391 META_NAT, true); 2392 2393 down_read(&nm_i->nat_tree_lock); 2394 2395 while (1) { 2396 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2397 nm_i->nat_block_bitmap)) { 2398 struct page *page = get_current_nat_page(sbi, nid); 2399 2400 if (IS_ERR(page)) { 2401 ret = PTR_ERR(page); 2402 } else { 2403 ret = scan_nat_page(sbi, page, nid); 2404 f2fs_put_page(page, 1); 2405 } 2406 2407 if (ret) { 2408 up_read(&nm_i->nat_tree_lock); 2409 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2410 return ret; 2411 } 2412 } 2413 2414 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2415 if (unlikely(nid >= nm_i->max_nid)) 2416 nid = 0; 2417 2418 if (++i >= FREE_NID_PAGES) 2419 break; 2420 } 2421 2422 /* go to the next free nat pages to find free nids abundantly */ 2423 nm_i->next_scan_nid = nid; 2424 2425 /* find free nids from current sum_pages */ 2426 scan_curseg_cache(sbi); 2427 2428 up_read(&nm_i->nat_tree_lock); 2429 2430 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2431 nm_i->ra_nid_pages, META_NAT, false); 2432 2433 return 0; 2434 } 2435 2436 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2437 { 2438 int ret; 2439 2440 mutex_lock(&NM_I(sbi)->build_lock); 2441 ret = __f2fs_build_free_nids(sbi, sync, mount); 2442 mutex_unlock(&NM_I(sbi)->build_lock); 2443 2444 return ret; 2445 } 2446 2447 /* 2448 * If this function returns success, caller can obtain a new nid 2449 * from second parameter of this function. 2450 * The returned nid could be used ino as well as nid when inode is created. 2451 */ 2452 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2453 { 2454 struct f2fs_nm_info *nm_i = NM_I(sbi); 2455 struct free_nid *i = NULL; 2456 retry: 2457 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { 2458 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID); 2459 return false; 2460 } 2461 2462 spin_lock(&nm_i->nid_list_lock); 2463 2464 if (unlikely(nm_i->available_nids == 0)) { 2465 spin_unlock(&nm_i->nid_list_lock); 2466 return false; 2467 } 2468 2469 /* We should not use stale free nids created by f2fs_build_free_nids */ 2470 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2471 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2472 i = list_first_entry(&nm_i->free_nid_list, 2473 struct free_nid, list); 2474 *nid = i->nid; 2475 2476 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2477 nm_i->available_nids--; 2478 2479 update_free_nid_bitmap(sbi, *nid, false, false); 2480 2481 spin_unlock(&nm_i->nid_list_lock); 2482 return true; 2483 } 2484 spin_unlock(&nm_i->nid_list_lock); 2485 2486 /* Let's scan nat pages and its caches to get free nids */ 2487 if (!f2fs_build_free_nids(sbi, true, false)) 2488 goto retry; 2489 return false; 2490 } 2491 2492 /* 2493 * f2fs_alloc_nid() should be called prior to this function. 2494 */ 2495 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2496 { 2497 struct f2fs_nm_info *nm_i = NM_I(sbi); 2498 struct free_nid *i; 2499 2500 spin_lock(&nm_i->nid_list_lock); 2501 i = __lookup_free_nid_list(nm_i, nid); 2502 f2fs_bug_on(sbi, !i); 2503 __remove_free_nid(sbi, i, PREALLOC_NID); 2504 spin_unlock(&nm_i->nid_list_lock); 2505 2506 kmem_cache_free(free_nid_slab, i); 2507 } 2508 2509 /* 2510 * f2fs_alloc_nid() should be called prior to this function. 2511 */ 2512 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2513 { 2514 struct f2fs_nm_info *nm_i = NM_I(sbi); 2515 struct free_nid *i; 2516 bool need_free = false; 2517 2518 if (!nid) 2519 return; 2520 2521 spin_lock(&nm_i->nid_list_lock); 2522 i = __lookup_free_nid_list(nm_i, nid); 2523 f2fs_bug_on(sbi, !i); 2524 2525 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2526 __remove_free_nid(sbi, i, PREALLOC_NID); 2527 need_free = true; 2528 } else { 2529 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2530 } 2531 2532 nm_i->available_nids++; 2533 2534 update_free_nid_bitmap(sbi, nid, true, false); 2535 2536 spin_unlock(&nm_i->nid_list_lock); 2537 2538 if (need_free) 2539 kmem_cache_free(free_nid_slab, i); 2540 } 2541 2542 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2543 { 2544 struct f2fs_nm_info *nm_i = NM_I(sbi); 2545 int nr = nr_shrink; 2546 2547 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2548 return 0; 2549 2550 if (!mutex_trylock(&nm_i->build_lock)) 2551 return 0; 2552 2553 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2554 struct free_nid *i, *next; 2555 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2556 2557 spin_lock(&nm_i->nid_list_lock); 2558 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2559 if (!nr_shrink || !batch || 2560 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2561 break; 2562 __remove_free_nid(sbi, i, FREE_NID); 2563 kmem_cache_free(free_nid_slab, i); 2564 nr_shrink--; 2565 batch--; 2566 } 2567 spin_unlock(&nm_i->nid_list_lock); 2568 } 2569 2570 mutex_unlock(&nm_i->build_lock); 2571 2572 return nr - nr_shrink; 2573 } 2574 2575 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2576 { 2577 void *src_addr, *dst_addr; 2578 size_t inline_size; 2579 struct page *ipage; 2580 struct f2fs_inode *ri; 2581 2582 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2583 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 2584 2585 ri = F2FS_INODE(page); 2586 if (ri->i_inline & F2FS_INLINE_XATTR) { 2587 set_inode_flag(inode, FI_INLINE_XATTR); 2588 } else { 2589 clear_inode_flag(inode, FI_INLINE_XATTR); 2590 goto update_inode; 2591 } 2592 2593 dst_addr = inline_xattr_addr(inode, ipage); 2594 src_addr = inline_xattr_addr(inode, page); 2595 inline_size = inline_xattr_size(inode); 2596 2597 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2598 memcpy(dst_addr, src_addr, inline_size); 2599 update_inode: 2600 f2fs_update_inode(inode, ipage); 2601 f2fs_put_page(ipage, 1); 2602 } 2603 2604 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2605 { 2606 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2607 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2608 nid_t new_xnid; 2609 struct dnode_of_data dn; 2610 struct node_info ni; 2611 struct page *xpage; 2612 int err; 2613 2614 if (!prev_xnid) 2615 goto recover_xnid; 2616 2617 /* 1: invalidate the previous xattr nid */ 2618 err = f2fs_get_node_info(sbi, prev_xnid, &ni); 2619 if (err) 2620 return err; 2621 2622 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2623 dec_valid_node_count(sbi, inode, false); 2624 set_node_addr(sbi, &ni, NULL_ADDR, false); 2625 2626 recover_xnid: 2627 /* 2: update xattr nid in inode */ 2628 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2629 return -ENOSPC; 2630 2631 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2632 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2633 if (IS_ERR(xpage)) { 2634 f2fs_alloc_nid_failed(sbi, new_xnid); 2635 return PTR_ERR(xpage); 2636 } 2637 2638 f2fs_alloc_nid_done(sbi, new_xnid); 2639 f2fs_update_inode_page(inode); 2640 2641 /* 3: update and set xattr node page dirty */ 2642 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE); 2643 2644 set_page_dirty(xpage); 2645 f2fs_put_page(xpage, 1); 2646 2647 return 0; 2648 } 2649 2650 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2651 { 2652 struct f2fs_inode *src, *dst; 2653 nid_t ino = ino_of_node(page); 2654 struct node_info old_ni, new_ni; 2655 struct page *ipage; 2656 int err; 2657 2658 err = f2fs_get_node_info(sbi, ino, &old_ni); 2659 if (err) 2660 return err; 2661 2662 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2663 return -EINVAL; 2664 retry: 2665 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2666 if (!ipage) { 2667 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); 2668 goto retry; 2669 } 2670 2671 /* Should not use this inode from free nid list */ 2672 remove_free_nid(sbi, ino); 2673 2674 if (!PageUptodate(ipage)) 2675 SetPageUptodate(ipage); 2676 fill_node_footer(ipage, ino, ino, 0, true); 2677 set_cold_node(ipage, false); 2678 2679 src = F2FS_INODE(page); 2680 dst = F2FS_INODE(ipage); 2681 2682 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 2683 dst->i_size = 0; 2684 dst->i_blocks = cpu_to_le64(1); 2685 dst->i_links = cpu_to_le32(1); 2686 dst->i_xattr_nid = 0; 2687 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2688 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2689 dst->i_extra_isize = src->i_extra_isize; 2690 2691 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2692 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2693 i_inline_xattr_size)) 2694 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2695 2696 if (f2fs_sb_has_project_quota(sbi) && 2697 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2698 i_projid)) 2699 dst->i_projid = src->i_projid; 2700 2701 if (f2fs_sb_has_inode_crtime(sbi) && 2702 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2703 i_crtime_nsec)) { 2704 dst->i_crtime = src->i_crtime; 2705 dst->i_crtime_nsec = src->i_crtime_nsec; 2706 } 2707 } 2708 2709 new_ni = old_ni; 2710 new_ni.ino = ino; 2711 2712 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2713 WARN_ON(1); 2714 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2715 inc_valid_inode_count(sbi); 2716 set_page_dirty(ipage); 2717 f2fs_put_page(ipage, 1); 2718 return 0; 2719 } 2720 2721 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2722 unsigned int segno, struct f2fs_summary_block *sum) 2723 { 2724 struct f2fs_node *rn; 2725 struct f2fs_summary *sum_entry; 2726 block_t addr; 2727 int i, idx, last_offset, nrpages; 2728 2729 /* scan the node segment */ 2730 last_offset = sbi->blocks_per_seg; 2731 addr = START_BLOCK(sbi, segno); 2732 sum_entry = &sum->entries[0]; 2733 2734 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2735 nrpages = min(last_offset - i, BIO_MAX_PAGES); 2736 2737 /* readahead node pages */ 2738 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2739 2740 for (idx = addr; idx < addr + nrpages; idx++) { 2741 struct page *page = f2fs_get_tmp_page(sbi, idx); 2742 2743 if (IS_ERR(page)) 2744 return PTR_ERR(page); 2745 2746 rn = F2FS_NODE(page); 2747 sum_entry->nid = rn->footer.nid; 2748 sum_entry->version = 0; 2749 sum_entry->ofs_in_node = 0; 2750 sum_entry++; 2751 f2fs_put_page(page, 1); 2752 } 2753 2754 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2755 addr + nrpages); 2756 } 2757 return 0; 2758 } 2759 2760 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2761 { 2762 struct f2fs_nm_info *nm_i = NM_I(sbi); 2763 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2764 struct f2fs_journal *journal = curseg->journal; 2765 int i; 2766 2767 down_write(&curseg->journal_rwsem); 2768 for (i = 0; i < nats_in_cursum(journal); i++) { 2769 struct nat_entry *ne; 2770 struct f2fs_nat_entry raw_ne; 2771 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2772 2773 raw_ne = nat_in_journal(journal, i); 2774 2775 ne = __lookup_nat_cache(nm_i, nid); 2776 if (!ne) { 2777 ne = __alloc_nat_entry(nid, true); 2778 __init_nat_entry(nm_i, ne, &raw_ne, true); 2779 } 2780 2781 /* 2782 * if a free nat in journal has not been used after last 2783 * checkpoint, we should remove it from available nids, 2784 * since later we will add it again. 2785 */ 2786 if (!get_nat_flag(ne, IS_DIRTY) && 2787 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2788 spin_lock(&nm_i->nid_list_lock); 2789 nm_i->available_nids--; 2790 spin_unlock(&nm_i->nid_list_lock); 2791 } 2792 2793 __set_nat_cache_dirty(nm_i, ne); 2794 } 2795 update_nats_in_cursum(journal, -i); 2796 up_write(&curseg->journal_rwsem); 2797 } 2798 2799 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2800 struct list_head *head, int max) 2801 { 2802 struct nat_entry_set *cur; 2803 2804 if (nes->entry_cnt >= max) 2805 goto add_out; 2806 2807 list_for_each_entry(cur, head, set_list) { 2808 if (cur->entry_cnt >= nes->entry_cnt) { 2809 list_add(&nes->set_list, cur->set_list.prev); 2810 return; 2811 } 2812 } 2813 add_out: 2814 list_add_tail(&nes->set_list, head); 2815 } 2816 2817 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2818 struct page *page) 2819 { 2820 struct f2fs_nm_info *nm_i = NM_I(sbi); 2821 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2822 struct f2fs_nat_block *nat_blk = page_address(page); 2823 int valid = 0; 2824 int i = 0; 2825 2826 if (!enabled_nat_bits(sbi, NULL)) 2827 return; 2828 2829 if (nat_index == 0) { 2830 valid = 1; 2831 i = 1; 2832 } 2833 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2834 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2835 valid++; 2836 } 2837 if (valid == 0) { 2838 __set_bit_le(nat_index, nm_i->empty_nat_bits); 2839 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2840 return; 2841 } 2842 2843 __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2844 if (valid == NAT_ENTRY_PER_BLOCK) 2845 __set_bit_le(nat_index, nm_i->full_nat_bits); 2846 else 2847 __clear_bit_le(nat_index, nm_i->full_nat_bits); 2848 } 2849 2850 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2851 struct nat_entry_set *set, struct cp_control *cpc) 2852 { 2853 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2854 struct f2fs_journal *journal = curseg->journal; 2855 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 2856 bool to_journal = true; 2857 struct f2fs_nat_block *nat_blk; 2858 struct nat_entry *ne, *cur; 2859 struct page *page = NULL; 2860 2861 /* 2862 * there are two steps to flush nat entries: 2863 * #1, flush nat entries to journal in current hot data summary block. 2864 * #2, flush nat entries to nat page. 2865 */ 2866 if (enabled_nat_bits(sbi, cpc) || 2867 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 2868 to_journal = false; 2869 2870 if (to_journal) { 2871 down_write(&curseg->journal_rwsem); 2872 } else { 2873 page = get_next_nat_page(sbi, start_nid); 2874 if (IS_ERR(page)) 2875 return PTR_ERR(page); 2876 2877 nat_blk = page_address(page); 2878 f2fs_bug_on(sbi, !nat_blk); 2879 } 2880 2881 /* flush dirty nats in nat entry set */ 2882 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 2883 struct f2fs_nat_entry *raw_ne; 2884 nid_t nid = nat_get_nid(ne); 2885 int offset; 2886 2887 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 2888 2889 if (to_journal) { 2890 offset = f2fs_lookup_journal_in_cursum(journal, 2891 NAT_JOURNAL, nid, 1); 2892 f2fs_bug_on(sbi, offset < 0); 2893 raw_ne = &nat_in_journal(journal, offset); 2894 nid_in_journal(journal, offset) = cpu_to_le32(nid); 2895 } else { 2896 raw_ne = &nat_blk->entries[nid - start_nid]; 2897 } 2898 raw_nat_from_node_info(raw_ne, &ne->ni); 2899 nat_reset_flag(ne); 2900 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 2901 if (nat_get_blkaddr(ne) == NULL_ADDR) { 2902 add_free_nid(sbi, nid, false, true); 2903 } else { 2904 spin_lock(&NM_I(sbi)->nid_list_lock); 2905 update_free_nid_bitmap(sbi, nid, false, false); 2906 spin_unlock(&NM_I(sbi)->nid_list_lock); 2907 } 2908 } 2909 2910 if (to_journal) { 2911 up_write(&curseg->journal_rwsem); 2912 } else { 2913 __update_nat_bits(sbi, start_nid, page); 2914 f2fs_put_page(page, 1); 2915 } 2916 2917 /* Allow dirty nats by node block allocation in write_begin */ 2918 if (!set->entry_cnt) { 2919 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 2920 kmem_cache_free(nat_entry_set_slab, set); 2921 } 2922 return 0; 2923 } 2924 2925 /* 2926 * This function is called during the checkpointing process. 2927 */ 2928 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 2929 { 2930 struct f2fs_nm_info *nm_i = NM_I(sbi); 2931 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2932 struct f2fs_journal *journal = curseg->journal; 2933 struct nat_entry_set *setvec[SETVEC_SIZE]; 2934 struct nat_entry_set *set, *tmp; 2935 unsigned int found; 2936 nid_t set_idx = 0; 2937 LIST_HEAD(sets); 2938 int err = 0; 2939 2940 /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */ 2941 if (enabled_nat_bits(sbi, cpc)) { 2942 down_write(&nm_i->nat_tree_lock); 2943 remove_nats_in_journal(sbi); 2944 up_write(&nm_i->nat_tree_lock); 2945 } 2946 2947 if (!nm_i->dirty_nat_cnt) 2948 return 0; 2949 2950 down_write(&nm_i->nat_tree_lock); 2951 2952 /* 2953 * if there are no enough space in journal to store dirty nat 2954 * entries, remove all entries from journal and merge them 2955 * into nat entry set. 2956 */ 2957 if (enabled_nat_bits(sbi, cpc) || 2958 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 2959 remove_nats_in_journal(sbi); 2960 2961 while ((found = __gang_lookup_nat_set(nm_i, 2962 set_idx, SETVEC_SIZE, setvec))) { 2963 unsigned idx; 2964 set_idx = setvec[found - 1]->set + 1; 2965 for (idx = 0; idx < found; idx++) 2966 __adjust_nat_entry_set(setvec[idx], &sets, 2967 MAX_NAT_JENTRIES(journal)); 2968 } 2969 2970 /* flush dirty nats in nat entry set */ 2971 list_for_each_entry_safe(set, tmp, &sets, set_list) { 2972 err = __flush_nat_entry_set(sbi, set, cpc); 2973 if (err) 2974 break; 2975 } 2976 2977 up_write(&nm_i->nat_tree_lock); 2978 /* Allow dirty nats by node block allocation in write_begin */ 2979 2980 return err; 2981 } 2982 2983 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 2984 { 2985 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2986 struct f2fs_nm_info *nm_i = NM_I(sbi); 2987 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 2988 unsigned int i; 2989 __u64 cp_ver = cur_cp_version(ckpt); 2990 block_t nat_bits_addr; 2991 2992 if (!enabled_nat_bits(sbi, NULL)) 2993 return 0; 2994 2995 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 2996 nm_i->nat_bits = f2fs_kvzalloc(sbi, 2997 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 2998 if (!nm_i->nat_bits) 2999 return -ENOMEM; 3000 3001 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - 3002 nm_i->nat_bits_blocks; 3003 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3004 struct page *page; 3005 3006 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3007 if (IS_ERR(page)) 3008 return PTR_ERR(page); 3009 3010 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 3011 page_address(page), F2FS_BLKSIZE); 3012 f2fs_put_page(page, 1); 3013 } 3014 3015 cp_ver |= (cur_cp_crc(ckpt) << 32); 3016 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3017 disable_nat_bits(sbi, true); 3018 return 0; 3019 } 3020 3021 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3022 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3023 3024 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3025 return 0; 3026 } 3027 3028 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3029 { 3030 struct f2fs_nm_info *nm_i = NM_I(sbi); 3031 unsigned int i = 0; 3032 nid_t nid, last_nid; 3033 3034 if (!enabled_nat_bits(sbi, NULL)) 3035 return; 3036 3037 for (i = 0; i < nm_i->nat_blocks; i++) { 3038 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3039 if (i >= nm_i->nat_blocks) 3040 break; 3041 3042 __set_bit_le(i, nm_i->nat_block_bitmap); 3043 3044 nid = i * NAT_ENTRY_PER_BLOCK; 3045 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3046 3047 spin_lock(&NM_I(sbi)->nid_list_lock); 3048 for (; nid < last_nid; nid++) 3049 update_free_nid_bitmap(sbi, nid, true, true); 3050 spin_unlock(&NM_I(sbi)->nid_list_lock); 3051 } 3052 3053 for (i = 0; i < nm_i->nat_blocks; i++) { 3054 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3055 if (i >= nm_i->nat_blocks) 3056 break; 3057 3058 __set_bit_le(i, nm_i->nat_block_bitmap); 3059 } 3060 } 3061 3062 static int init_node_manager(struct f2fs_sb_info *sbi) 3063 { 3064 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3065 struct f2fs_nm_info *nm_i = NM_I(sbi); 3066 unsigned char *version_bitmap; 3067 unsigned int nat_segs; 3068 int err; 3069 3070 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3071 3072 /* segment_count_nat includes pair segment so divide to 2. */ 3073 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3074 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3075 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3076 3077 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3078 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3079 F2FS_RESERVED_NODE_NUM; 3080 nm_i->nid_cnt[FREE_NID] = 0; 3081 nm_i->nid_cnt[PREALLOC_NID] = 0; 3082 nm_i->nat_cnt = 0; 3083 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3084 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3085 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3086 3087 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3088 INIT_LIST_HEAD(&nm_i->free_nid_list); 3089 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3090 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3091 INIT_LIST_HEAD(&nm_i->nat_entries); 3092 spin_lock_init(&nm_i->nat_list_lock); 3093 3094 mutex_init(&nm_i->build_lock); 3095 spin_lock_init(&nm_i->nid_list_lock); 3096 init_rwsem(&nm_i->nat_tree_lock); 3097 3098 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3099 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3100 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3101 if (!version_bitmap) 3102 return -EFAULT; 3103 3104 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3105 GFP_KERNEL); 3106 if (!nm_i->nat_bitmap) 3107 return -ENOMEM; 3108 3109 err = __get_nat_bitmaps(sbi); 3110 if (err) 3111 return err; 3112 3113 #ifdef CONFIG_F2FS_CHECK_FS 3114 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3115 GFP_KERNEL); 3116 if (!nm_i->nat_bitmap_mir) 3117 return -ENOMEM; 3118 #endif 3119 3120 return 0; 3121 } 3122 3123 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3124 { 3125 struct f2fs_nm_info *nm_i = NM_I(sbi); 3126 int i; 3127 3128 nm_i->free_nid_bitmap = 3129 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3130 nm_i->nat_blocks), 3131 GFP_KERNEL); 3132 if (!nm_i->free_nid_bitmap) 3133 return -ENOMEM; 3134 3135 for (i = 0; i < nm_i->nat_blocks; i++) { 3136 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3137 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3138 if (!nm_i->free_nid_bitmap[i]) 3139 return -ENOMEM; 3140 } 3141 3142 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3143 GFP_KERNEL); 3144 if (!nm_i->nat_block_bitmap) 3145 return -ENOMEM; 3146 3147 nm_i->free_nid_count = 3148 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3149 nm_i->nat_blocks), 3150 GFP_KERNEL); 3151 if (!nm_i->free_nid_count) 3152 return -ENOMEM; 3153 return 0; 3154 } 3155 3156 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3157 { 3158 int err; 3159 3160 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3161 GFP_KERNEL); 3162 if (!sbi->nm_info) 3163 return -ENOMEM; 3164 3165 err = init_node_manager(sbi); 3166 if (err) 3167 return err; 3168 3169 err = init_free_nid_cache(sbi); 3170 if (err) 3171 return err; 3172 3173 /* load free nid status from nat_bits table */ 3174 load_free_nid_bitmap(sbi); 3175 3176 return f2fs_build_free_nids(sbi, true, true); 3177 } 3178 3179 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3180 { 3181 struct f2fs_nm_info *nm_i = NM_I(sbi); 3182 struct free_nid *i, *next_i; 3183 struct nat_entry *natvec[NATVEC_SIZE]; 3184 struct nat_entry_set *setvec[SETVEC_SIZE]; 3185 nid_t nid = 0; 3186 unsigned int found; 3187 3188 if (!nm_i) 3189 return; 3190 3191 /* destroy free nid list */ 3192 spin_lock(&nm_i->nid_list_lock); 3193 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3194 __remove_free_nid(sbi, i, FREE_NID); 3195 spin_unlock(&nm_i->nid_list_lock); 3196 kmem_cache_free(free_nid_slab, i); 3197 spin_lock(&nm_i->nid_list_lock); 3198 } 3199 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3200 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3201 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3202 spin_unlock(&nm_i->nid_list_lock); 3203 3204 /* destroy nat cache */ 3205 down_write(&nm_i->nat_tree_lock); 3206 while ((found = __gang_lookup_nat_cache(nm_i, 3207 nid, NATVEC_SIZE, natvec))) { 3208 unsigned idx; 3209 3210 nid = nat_get_nid(natvec[found - 1]) + 1; 3211 for (idx = 0; idx < found; idx++) { 3212 spin_lock(&nm_i->nat_list_lock); 3213 list_del(&natvec[idx]->list); 3214 spin_unlock(&nm_i->nat_list_lock); 3215 3216 __del_from_nat_cache(nm_i, natvec[idx]); 3217 } 3218 } 3219 f2fs_bug_on(sbi, nm_i->nat_cnt); 3220 3221 /* destroy nat set cache */ 3222 nid = 0; 3223 while ((found = __gang_lookup_nat_set(nm_i, 3224 nid, SETVEC_SIZE, setvec))) { 3225 unsigned idx; 3226 3227 nid = setvec[found - 1]->set + 1; 3228 for (idx = 0; idx < found; idx++) { 3229 /* entry_cnt is not zero, when cp_error was occurred */ 3230 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3231 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3232 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3233 } 3234 } 3235 up_write(&nm_i->nat_tree_lock); 3236 3237 kvfree(nm_i->nat_block_bitmap); 3238 if (nm_i->free_nid_bitmap) { 3239 int i; 3240 3241 for (i = 0; i < nm_i->nat_blocks; i++) 3242 kvfree(nm_i->free_nid_bitmap[i]); 3243 kvfree(nm_i->free_nid_bitmap); 3244 } 3245 kvfree(nm_i->free_nid_count); 3246 3247 kvfree(nm_i->nat_bitmap); 3248 kvfree(nm_i->nat_bits); 3249 #ifdef CONFIG_F2FS_CHECK_FS 3250 kvfree(nm_i->nat_bitmap_mir); 3251 #endif 3252 sbi->nm_info = NULL; 3253 kvfree(nm_i); 3254 } 3255 3256 int __init f2fs_create_node_manager_caches(void) 3257 { 3258 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3259 sizeof(struct nat_entry)); 3260 if (!nat_entry_slab) 3261 goto fail; 3262 3263 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3264 sizeof(struct free_nid)); 3265 if (!free_nid_slab) 3266 goto destroy_nat_entry; 3267 3268 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3269 sizeof(struct nat_entry_set)); 3270 if (!nat_entry_set_slab) 3271 goto destroy_free_nid; 3272 3273 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3274 sizeof(struct fsync_node_entry)); 3275 if (!fsync_node_entry_slab) 3276 goto destroy_nat_entry_set; 3277 return 0; 3278 3279 destroy_nat_entry_set: 3280 kmem_cache_destroy(nat_entry_set_slab); 3281 destroy_free_nid: 3282 kmem_cache_destroy(free_nid_slab); 3283 destroy_nat_entry: 3284 kmem_cache_destroy(nat_entry_slab); 3285 fail: 3286 return -ENOMEM; 3287 } 3288 3289 void f2fs_destroy_node_manager_caches(void) 3290 { 3291 kmem_cache_destroy(fsync_node_entry_slab); 3292 kmem_cache_destroy(nat_entry_set_slab); 3293 kmem_cache_destroy(free_nid_slab); 3294 kmem_cache_destroy(nat_entry_slab); 3295 } 3296