1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/sched/mm.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "iostat.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 40 return -EFSCORRUPTED; 41 } 42 return 0; 43 } 44 45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 46 { 47 struct f2fs_nm_info *nm_i = NM_I(sbi); 48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 49 struct sysinfo val; 50 unsigned long avail_ram; 51 unsigned long mem_size = 0; 52 bool res = false; 53 54 if (!nm_i) 55 return true; 56 57 si_meminfo(&val); 58 59 /* only uses low memory */ 60 avail_ram = val.totalram - val.totalhigh; 61 62 /* 63 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively 64 */ 65 if (type == FREE_NIDS) { 66 mem_size = (nm_i->nid_cnt[FREE_NID] * 67 sizeof(struct free_nid)) >> PAGE_SHIFT; 68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 69 } else if (type == NAT_ENTRIES) { 70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * 71 sizeof(struct nat_entry)) >> PAGE_SHIFT; 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 73 if (excess_cached_nats(sbi)) 74 res = false; 75 } else if (type == DIRTY_DENTS) { 76 if (sbi->sb->s_bdi->wb.dirty_exceeded) 77 return false; 78 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 80 } else if (type == INO_ENTRIES) { 81 int i; 82 83 for (i = 0; i < MAX_INO_ENTRY; i++) 84 mem_size += sbi->im[i].ino_num * 85 sizeof(struct ino_entry); 86 mem_size >>= PAGE_SHIFT; 87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 88 } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { 89 enum extent_type etype = type == READ_EXTENT_CACHE ? 90 EX_READ : EX_BLOCK_AGE; 91 struct extent_tree_info *eti = &sbi->extent_tree[etype]; 92 93 mem_size = (atomic_read(&eti->total_ext_tree) * 94 sizeof(struct extent_tree) + 95 atomic_read(&eti->total_ext_node) * 96 sizeof(struct extent_node)) >> PAGE_SHIFT; 97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 98 } else if (type == DISCARD_CACHE) { 99 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * 100 sizeof(struct discard_cmd)) >> PAGE_SHIFT; 101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); 102 } else if (type == COMPRESS_PAGE) { 103 #ifdef CONFIG_F2FS_FS_COMPRESSION 104 unsigned long free_ram = val.freeram; 105 106 /* 107 * free memory is lower than watermark or cached page count 108 * exceed threshold, deny caching compress page. 109 */ 110 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && 111 (COMPRESS_MAPPING(sbi)->nrpages < 112 free_ram * sbi->compress_percent / 100); 113 #else 114 res = false; 115 #endif 116 } else { 117 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 118 return true; 119 } 120 return res; 121 } 122 123 static void clear_node_page_dirty(struct page *page) 124 { 125 if (PageDirty(page)) { 126 f2fs_clear_page_cache_dirty_tag(page); 127 clear_page_dirty_for_io(page); 128 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 129 } 130 ClearPageUptodate(page); 131 } 132 133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 134 { 135 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); 136 } 137 138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 139 { 140 struct page *src_page; 141 struct page *dst_page; 142 pgoff_t dst_off; 143 void *src_addr; 144 void *dst_addr; 145 struct f2fs_nm_info *nm_i = NM_I(sbi); 146 147 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 148 149 /* get current nat block page with lock */ 150 src_page = get_current_nat_page(sbi, nid); 151 if (IS_ERR(src_page)) 152 return src_page; 153 dst_page = f2fs_grab_meta_page(sbi, dst_off); 154 f2fs_bug_on(sbi, PageDirty(src_page)); 155 156 src_addr = page_address(src_page); 157 dst_addr = page_address(dst_page); 158 memcpy(dst_addr, src_addr, PAGE_SIZE); 159 set_page_dirty(dst_page); 160 f2fs_put_page(src_page, 1); 161 162 set_to_next_nat(nm_i, nid); 163 164 return dst_page; 165 } 166 167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, 168 nid_t nid, bool no_fail) 169 { 170 struct nat_entry *new; 171 172 new = f2fs_kmem_cache_alloc(nat_entry_slab, 173 GFP_F2FS_ZERO, no_fail, sbi); 174 if (new) { 175 nat_set_nid(new, nid); 176 nat_reset_flag(new); 177 } 178 return new; 179 } 180 181 static void __free_nat_entry(struct nat_entry *e) 182 { 183 kmem_cache_free(nat_entry_slab, e); 184 } 185 186 /* must be locked by nat_tree_lock */ 187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 188 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 189 { 190 if (no_fail) 191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 193 return NULL; 194 195 if (raw_ne) 196 node_info_from_raw_nat(&ne->ni, raw_ne); 197 198 spin_lock(&nm_i->nat_list_lock); 199 list_add_tail(&ne->list, &nm_i->nat_entries); 200 spin_unlock(&nm_i->nat_list_lock); 201 202 nm_i->nat_cnt[TOTAL_NAT]++; 203 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 204 return ne; 205 } 206 207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 208 { 209 struct nat_entry *ne; 210 211 ne = radix_tree_lookup(&nm_i->nat_root, n); 212 213 /* for recent accessed nat entry, move it to tail of lru list */ 214 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 215 spin_lock(&nm_i->nat_list_lock); 216 if (!list_empty(&ne->list)) 217 list_move_tail(&ne->list, &nm_i->nat_entries); 218 spin_unlock(&nm_i->nat_list_lock); 219 } 220 221 return ne; 222 } 223 224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 225 nid_t start, unsigned int nr, struct nat_entry **ep) 226 { 227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 228 } 229 230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 231 { 232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 233 nm_i->nat_cnt[TOTAL_NAT]--; 234 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 235 __free_nat_entry(e); 236 } 237 238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 239 struct nat_entry *ne) 240 { 241 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 242 struct nat_entry_set *head; 243 244 head = radix_tree_lookup(&nm_i->nat_set_root, set); 245 if (!head) { 246 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, 247 GFP_NOFS, true, NULL); 248 249 INIT_LIST_HEAD(&head->entry_list); 250 INIT_LIST_HEAD(&head->set_list); 251 head->set = set; 252 head->entry_cnt = 0; 253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 254 } 255 return head; 256 } 257 258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 259 struct nat_entry *ne) 260 { 261 struct nat_entry_set *head; 262 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 263 264 if (!new_ne) 265 head = __grab_nat_entry_set(nm_i, ne); 266 267 /* 268 * update entry_cnt in below condition: 269 * 1. update NEW_ADDR to valid block address; 270 * 2. update old block address to new one; 271 */ 272 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 273 !get_nat_flag(ne, IS_DIRTY))) 274 head->entry_cnt++; 275 276 set_nat_flag(ne, IS_PREALLOC, new_ne); 277 278 if (get_nat_flag(ne, IS_DIRTY)) 279 goto refresh_list; 280 281 nm_i->nat_cnt[DIRTY_NAT]++; 282 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 283 set_nat_flag(ne, IS_DIRTY, true); 284 refresh_list: 285 spin_lock(&nm_i->nat_list_lock); 286 if (new_ne) 287 list_del_init(&ne->list); 288 else 289 list_move_tail(&ne->list, &head->entry_list); 290 spin_unlock(&nm_i->nat_list_lock); 291 } 292 293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 294 struct nat_entry_set *set, struct nat_entry *ne) 295 { 296 spin_lock(&nm_i->nat_list_lock); 297 list_move_tail(&ne->list, &nm_i->nat_entries); 298 spin_unlock(&nm_i->nat_list_lock); 299 300 set_nat_flag(ne, IS_DIRTY, false); 301 set->entry_cnt--; 302 nm_i->nat_cnt[DIRTY_NAT]--; 303 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 304 } 305 306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 307 nid_t start, unsigned int nr, struct nat_entry_set **ep) 308 { 309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 310 start, nr); 311 } 312 313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 314 { 315 return NODE_MAPPING(sbi) == page->mapping && 316 IS_DNODE(page) && is_cold_node(page); 317 } 318 319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 320 { 321 spin_lock_init(&sbi->fsync_node_lock); 322 INIT_LIST_HEAD(&sbi->fsync_node_list); 323 sbi->fsync_seg_id = 0; 324 sbi->fsync_node_num = 0; 325 } 326 327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 328 struct page *page) 329 { 330 struct fsync_node_entry *fn; 331 unsigned long flags; 332 unsigned int seq_id; 333 334 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, 335 GFP_NOFS, true, NULL); 336 337 get_page(page); 338 fn->page = page; 339 INIT_LIST_HEAD(&fn->list); 340 341 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 342 list_add_tail(&fn->list, &sbi->fsync_node_list); 343 fn->seq_id = sbi->fsync_seg_id++; 344 seq_id = fn->seq_id; 345 sbi->fsync_node_num++; 346 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 347 348 return seq_id; 349 } 350 351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 352 { 353 struct fsync_node_entry *fn; 354 unsigned long flags; 355 356 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 357 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 358 if (fn->page == page) { 359 list_del(&fn->list); 360 sbi->fsync_node_num--; 361 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 362 kmem_cache_free(fsync_node_entry_slab, fn); 363 put_page(page); 364 return; 365 } 366 } 367 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 368 f2fs_bug_on(sbi, 1); 369 } 370 371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 372 { 373 unsigned long flags; 374 375 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 376 sbi->fsync_seg_id = 0; 377 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 378 } 379 380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 381 { 382 struct f2fs_nm_info *nm_i = NM_I(sbi); 383 struct nat_entry *e; 384 bool need = false; 385 386 f2fs_down_read(&nm_i->nat_tree_lock); 387 e = __lookup_nat_cache(nm_i, nid); 388 if (e) { 389 if (!get_nat_flag(e, IS_CHECKPOINTED) && 390 !get_nat_flag(e, HAS_FSYNCED_INODE)) 391 need = true; 392 } 393 f2fs_up_read(&nm_i->nat_tree_lock); 394 return need; 395 } 396 397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 398 { 399 struct f2fs_nm_info *nm_i = NM_I(sbi); 400 struct nat_entry *e; 401 bool is_cp = true; 402 403 f2fs_down_read(&nm_i->nat_tree_lock); 404 e = __lookup_nat_cache(nm_i, nid); 405 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 406 is_cp = false; 407 f2fs_up_read(&nm_i->nat_tree_lock); 408 return is_cp; 409 } 410 411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 412 { 413 struct f2fs_nm_info *nm_i = NM_I(sbi); 414 struct nat_entry *e; 415 bool need_update = true; 416 417 f2fs_down_read(&nm_i->nat_tree_lock); 418 e = __lookup_nat_cache(nm_i, ino); 419 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 420 (get_nat_flag(e, IS_CHECKPOINTED) || 421 get_nat_flag(e, HAS_FSYNCED_INODE))) 422 need_update = false; 423 f2fs_up_read(&nm_i->nat_tree_lock); 424 return need_update; 425 } 426 427 /* must be locked by nat_tree_lock */ 428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 429 struct f2fs_nat_entry *ne) 430 { 431 struct f2fs_nm_info *nm_i = NM_I(sbi); 432 struct nat_entry *new, *e; 433 434 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ 435 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) 436 return; 437 438 new = __alloc_nat_entry(sbi, nid, false); 439 if (!new) 440 return; 441 442 f2fs_down_write(&nm_i->nat_tree_lock); 443 e = __lookup_nat_cache(nm_i, nid); 444 if (!e) 445 e = __init_nat_entry(nm_i, new, ne, false); 446 else 447 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 448 nat_get_blkaddr(e) != 449 le32_to_cpu(ne->block_addr) || 450 nat_get_version(e) != ne->version); 451 f2fs_up_write(&nm_i->nat_tree_lock); 452 if (e != new) 453 __free_nat_entry(new); 454 } 455 456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 457 block_t new_blkaddr, bool fsync_done) 458 { 459 struct f2fs_nm_info *nm_i = NM_I(sbi); 460 struct nat_entry *e; 461 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); 462 463 f2fs_down_write(&nm_i->nat_tree_lock); 464 e = __lookup_nat_cache(nm_i, ni->nid); 465 if (!e) { 466 e = __init_nat_entry(nm_i, new, NULL, true); 467 copy_node_info(&e->ni, ni); 468 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 469 } else if (new_blkaddr == NEW_ADDR) { 470 /* 471 * when nid is reallocated, 472 * previous nat entry can be remained in nat cache. 473 * So, reinitialize it with new information. 474 */ 475 copy_node_info(&e->ni, ni); 476 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 477 } 478 /* let's free early to reduce memory consumption */ 479 if (e != new) 480 __free_nat_entry(new); 481 482 /* sanity check */ 483 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 484 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 485 new_blkaddr == NULL_ADDR); 486 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 487 new_blkaddr == NEW_ADDR); 488 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 489 new_blkaddr == NEW_ADDR); 490 491 /* increment version no as node is removed */ 492 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 493 unsigned char version = nat_get_version(e); 494 495 nat_set_version(e, inc_node_version(version)); 496 } 497 498 /* change address */ 499 nat_set_blkaddr(e, new_blkaddr); 500 if (!__is_valid_data_blkaddr(new_blkaddr)) 501 set_nat_flag(e, IS_CHECKPOINTED, false); 502 __set_nat_cache_dirty(nm_i, e); 503 504 /* update fsync_mark if its inode nat entry is still alive */ 505 if (ni->nid != ni->ino) 506 e = __lookup_nat_cache(nm_i, ni->ino); 507 if (e) { 508 if (fsync_done && ni->nid == ni->ino) 509 set_nat_flag(e, HAS_FSYNCED_INODE, true); 510 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 511 } 512 f2fs_up_write(&nm_i->nat_tree_lock); 513 } 514 515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 516 { 517 struct f2fs_nm_info *nm_i = NM_I(sbi); 518 int nr = nr_shrink; 519 520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) 521 return 0; 522 523 spin_lock(&nm_i->nat_list_lock); 524 while (nr_shrink) { 525 struct nat_entry *ne; 526 527 if (list_empty(&nm_i->nat_entries)) 528 break; 529 530 ne = list_first_entry(&nm_i->nat_entries, 531 struct nat_entry, list); 532 list_del(&ne->list); 533 spin_unlock(&nm_i->nat_list_lock); 534 535 __del_from_nat_cache(nm_i, ne); 536 nr_shrink--; 537 538 spin_lock(&nm_i->nat_list_lock); 539 } 540 spin_unlock(&nm_i->nat_list_lock); 541 542 f2fs_up_write(&nm_i->nat_tree_lock); 543 return nr - nr_shrink; 544 } 545 546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 547 struct node_info *ni, bool checkpoint_context) 548 { 549 struct f2fs_nm_info *nm_i = NM_I(sbi); 550 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 551 struct f2fs_journal *journal = curseg->journal; 552 nid_t start_nid = START_NID(nid); 553 struct f2fs_nat_block *nat_blk; 554 struct page *page = NULL; 555 struct f2fs_nat_entry ne; 556 struct nat_entry *e; 557 pgoff_t index; 558 block_t blkaddr; 559 int i; 560 561 ni->nid = nid; 562 retry: 563 /* Check nat cache */ 564 f2fs_down_read(&nm_i->nat_tree_lock); 565 e = __lookup_nat_cache(nm_i, nid); 566 if (e) { 567 ni->ino = nat_get_ino(e); 568 ni->blk_addr = nat_get_blkaddr(e); 569 ni->version = nat_get_version(e); 570 f2fs_up_read(&nm_i->nat_tree_lock); 571 return 0; 572 } 573 574 /* 575 * Check current segment summary by trying to grab journal_rwsem first. 576 * This sem is on the critical path on the checkpoint requiring the above 577 * nat_tree_lock. Therefore, we should retry, if we failed to grab here 578 * while not bothering checkpoint. 579 */ 580 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { 581 down_read(&curseg->journal_rwsem); 582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || 583 !down_read_trylock(&curseg->journal_rwsem)) { 584 f2fs_up_read(&nm_i->nat_tree_lock); 585 goto retry; 586 } 587 588 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 589 if (i >= 0) { 590 ne = nat_in_journal(journal, i); 591 node_info_from_raw_nat(ni, &ne); 592 } 593 up_read(&curseg->journal_rwsem); 594 if (i >= 0) { 595 f2fs_up_read(&nm_i->nat_tree_lock); 596 goto cache; 597 } 598 599 /* Fill node_info from nat page */ 600 index = current_nat_addr(sbi, nid); 601 f2fs_up_read(&nm_i->nat_tree_lock); 602 603 page = f2fs_get_meta_page(sbi, index); 604 if (IS_ERR(page)) 605 return PTR_ERR(page); 606 607 nat_blk = (struct f2fs_nat_block *)page_address(page); 608 ne = nat_blk->entries[nid - start_nid]; 609 node_info_from_raw_nat(ni, &ne); 610 f2fs_put_page(page, 1); 611 cache: 612 blkaddr = le32_to_cpu(ne.block_addr); 613 if (__is_valid_data_blkaddr(blkaddr) && 614 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 615 return -EFAULT; 616 617 /* cache nat entry */ 618 cache_nat_entry(sbi, nid, &ne); 619 return 0; 620 } 621 622 /* 623 * readahead MAX_RA_NODE number of node pages. 624 */ 625 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 626 { 627 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 628 struct blk_plug plug; 629 int i, end; 630 nid_t nid; 631 632 blk_start_plug(&plug); 633 634 /* Then, try readahead for siblings of the desired node */ 635 end = start + n; 636 end = min(end, NIDS_PER_BLOCK); 637 for (i = start; i < end; i++) { 638 nid = get_nid(parent, i, false); 639 f2fs_ra_node_page(sbi, nid); 640 } 641 642 blk_finish_plug(&plug); 643 } 644 645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 646 { 647 const long direct_index = ADDRS_PER_INODE(dn->inode); 648 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 649 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 650 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 651 int cur_level = dn->cur_level; 652 int max_level = dn->max_level; 653 pgoff_t base = 0; 654 655 if (!dn->max_level) 656 return pgofs + 1; 657 658 while (max_level-- > cur_level) 659 skipped_unit *= NIDS_PER_BLOCK; 660 661 switch (dn->max_level) { 662 case 3: 663 base += 2 * indirect_blks; 664 fallthrough; 665 case 2: 666 base += 2 * direct_blks; 667 fallthrough; 668 case 1: 669 base += direct_index; 670 break; 671 default: 672 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 673 } 674 675 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 676 } 677 678 /* 679 * The maximum depth is four. 680 * Offset[0] will have raw inode offset. 681 */ 682 static int get_node_path(struct inode *inode, long block, 683 int offset[4], unsigned int noffset[4]) 684 { 685 const long direct_index = ADDRS_PER_INODE(inode); 686 const long direct_blks = ADDRS_PER_BLOCK(inode); 687 const long dptrs_per_blk = NIDS_PER_BLOCK; 688 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 689 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 690 int n = 0; 691 int level = 0; 692 693 noffset[0] = 0; 694 695 if (block < direct_index) { 696 offset[n] = block; 697 goto got; 698 } 699 block -= direct_index; 700 if (block < direct_blks) { 701 offset[n++] = NODE_DIR1_BLOCK; 702 noffset[n] = 1; 703 offset[n] = block; 704 level = 1; 705 goto got; 706 } 707 block -= direct_blks; 708 if (block < direct_blks) { 709 offset[n++] = NODE_DIR2_BLOCK; 710 noffset[n] = 2; 711 offset[n] = block; 712 level = 1; 713 goto got; 714 } 715 block -= direct_blks; 716 if (block < indirect_blks) { 717 offset[n++] = NODE_IND1_BLOCK; 718 noffset[n] = 3; 719 offset[n++] = block / direct_blks; 720 noffset[n] = 4 + offset[n - 1]; 721 offset[n] = block % direct_blks; 722 level = 2; 723 goto got; 724 } 725 block -= indirect_blks; 726 if (block < indirect_blks) { 727 offset[n++] = NODE_IND2_BLOCK; 728 noffset[n] = 4 + dptrs_per_blk; 729 offset[n++] = block / direct_blks; 730 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 731 offset[n] = block % direct_blks; 732 level = 2; 733 goto got; 734 } 735 block -= indirect_blks; 736 if (block < dindirect_blks) { 737 offset[n++] = NODE_DIND_BLOCK; 738 noffset[n] = 5 + (dptrs_per_blk * 2); 739 offset[n++] = block / indirect_blks; 740 noffset[n] = 6 + (dptrs_per_blk * 2) + 741 offset[n - 1] * (dptrs_per_blk + 1); 742 offset[n++] = (block / direct_blks) % dptrs_per_blk; 743 noffset[n] = 7 + (dptrs_per_blk * 2) + 744 offset[n - 2] * (dptrs_per_blk + 1) + 745 offset[n - 1]; 746 offset[n] = block % direct_blks; 747 level = 3; 748 goto got; 749 } else { 750 return -E2BIG; 751 } 752 got: 753 return level; 754 } 755 756 /* 757 * Caller should call f2fs_put_dnode(dn). 758 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 759 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 760 */ 761 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 762 { 763 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 764 struct page *npage[4]; 765 struct page *parent = NULL; 766 int offset[4]; 767 unsigned int noffset[4]; 768 nid_t nids[4]; 769 int level, i = 0; 770 int err = 0; 771 772 level = get_node_path(dn->inode, index, offset, noffset); 773 if (level < 0) 774 return level; 775 776 nids[0] = dn->inode->i_ino; 777 npage[0] = dn->inode_page; 778 779 if (!npage[0]) { 780 npage[0] = f2fs_get_node_page(sbi, nids[0]); 781 if (IS_ERR(npage[0])) 782 return PTR_ERR(npage[0]); 783 } 784 785 /* if inline_data is set, should not report any block indices */ 786 if (f2fs_has_inline_data(dn->inode) && index) { 787 err = -ENOENT; 788 f2fs_put_page(npage[0], 1); 789 goto release_out; 790 } 791 792 parent = npage[0]; 793 if (level != 0) 794 nids[1] = get_nid(parent, offset[0], true); 795 dn->inode_page = npage[0]; 796 dn->inode_page_locked = true; 797 798 /* get indirect or direct nodes */ 799 for (i = 1; i <= level; i++) { 800 bool done = false; 801 802 if (!nids[i] && mode == ALLOC_NODE) { 803 /* alloc new node */ 804 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 805 err = -ENOSPC; 806 goto release_pages; 807 } 808 809 dn->nid = nids[i]; 810 npage[i] = f2fs_new_node_page(dn, noffset[i]); 811 if (IS_ERR(npage[i])) { 812 f2fs_alloc_nid_failed(sbi, nids[i]); 813 err = PTR_ERR(npage[i]); 814 goto release_pages; 815 } 816 817 set_nid(parent, offset[i - 1], nids[i], i == 1); 818 f2fs_alloc_nid_done(sbi, nids[i]); 819 done = true; 820 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 821 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 822 if (IS_ERR(npage[i])) { 823 err = PTR_ERR(npage[i]); 824 goto release_pages; 825 } 826 done = true; 827 } 828 if (i == 1) { 829 dn->inode_page_locked = false; 830 unlock_page(parent); 831 } else { 832 f2fs_put_page(parent, 1); 833 } 834 835 if (!done) { 836 npage[i] = f2fs_get_node_page(sbi, nids[i]); 837 if (IS_ERR(npage[i])) { 838 err = PTR_ERR(npage[i]); 839 f2fs_put_page(npage[0], 0); 840 goto release_out; 841 } 842 } 843 if (i < level) { 844 parent = npage[i]; 845 nids[i + 1] = get_nid(parent, offset[i], false); 846 } 847 } 848 dn->nid = nids[level]; 849 dn->ofs_in_node = offset[level]; 850 dn->node_page = npage[level]; 851 dn->data_blkaddr = f2fs_data_blkaddr(dn); 852 853 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && 854 f2fs_sb_has_readonly(sbi)) { 855 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 856 unsigned int ofs_in_node = dn->ofs_in_node; 857 pgoff_t fofs = index; 858 unsigned int c_len; 859 block_t blkaddr; 860 861 /* should align fofs and ofs_in_node to cluster_size */ 862 if (fofs % cluster_size) { 863 fofs = round_down(fofs, cluster_size); 864 ofs_in_node = round_down(ofs_in_node, cluster_size); 865 } 866 867 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node); 868 if (!c_len) 869 goto out; 870 871 blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node); 872 if (blkaddr == COMPRESS_ADDR) 873 blkaddr = data_blkaddr(dn->inode, dn->node_page, 874 ofs_in_node + 1); 875 876 f2fs_update_read_extent_tree_range_compressed(dn->inode, 877 fofs, blkaddr, cluster_size, c_len); 878 } 879 out: 880 return 0; 881 882 release_pages: 883 f2fs_put_page(parent, 1); 884 if (i > 1) 885 f2fs_put_page(npage[0], 0); 886 release_out: 887 dn->inode_page = NULL; 888 dn->node_page = NULL; 889 if (err == -ENOENT) { 890 dn->cur_level = i; 891 dn->max_level = level; 892 dn->ofs_in_node = offset[level]; 893 } 894 return err; 895 } 896 897 static int truncate_node(struct dnode_of_data *dn) 898 { 899 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 900 struct node_info ni; 901 int err; 902 pgoff_t index; 903 904 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); 905 if (err) 906 return err; 907 908 if (ni.blk_addr != NEW_ADDR && 909 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) { 910 f2fs_err_ratelimited(sbi, 911 "nat entry is corrupted, run fsck to fix it, ino:%u, " 912 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr); 913 set_sbi_flag(sbi, SBI_NEED_FSCK); 914 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); 915 return -EFSCORRUPTED; 916 } 917 918 /* Deallocate node address */ 919 f2fs_invalidate_blocks(sbi, ni.blk_addr); 920 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 921 set_node_addr(sbi, &ni, NULL_ADDR, false); 922 923 if (dn->nid == dn->inode->i_ino) { 924 f2fs_remove_orphan_inode(sbi, dn->nid); 925 dec_valid_inode_count(sbi); 926 f2fs_inode_synced(dn->inode); 927 } 928 929 clear_node_page_dirty(dn->node_page); 930 set_sbi_flag(sbi, SBI_IS_DIRTY); 931 932 index = dn->node_page->index; 933 f2fs_put_page(dn->node_page, 1); 934 935 invalidate_mapping_pages(NODE_MAPPING(sbi), 936 index, index); 937 938 dn->node_page = NULL; 939 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 940 941 return 0; 942 } 943 944 static int truncate_dnode(struct dnode_of_data *dn) 945 { 946 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 947 struct page *page; 948 int err; 949 950 if (dn->nid == 0) 951 return 1; 952 953 /* get direct node */ 954 page = f2fs_get_node_page(sbi, dn->nid); 955 if (PTR_ERR(page) == -ENOENT) 956 return 1; 957 else if (IS_ERR(page)) 958 return PTR_ERR(page); 959 960 if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) { 961 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u", 962 dn->inode->i_ino, dn->nid, ino_of_node(page)); 963 set_sbi_flag(sbi, SBI_NEED_FSCK); 964 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE); 965 f2fs_put_page(page, 1); 966 return -EFSCORRUPTED; 967 } 968 969 /* Make dnode_of_data for parameter */ 970 dn->node_page = page; 971 dn->ofs_in_node = 0; 972 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 973 err = truncate_node(dn); 974 if (err) { 975 f2fs_put_page(page, 1); 976 return err; 977 } 978 979 return 1; 980 } 981 982 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 983 int ofs, int depth) 984 { 985 struct dnode_of_data rdn = *dn; 986 struct page *page; 987 struct f2fs_node *rn; 988 nid_t child_nid; 989 unsigned int child_nofs; 990 int freed = 0; 991 int i, ret; 992 993 if (dn->nid == 0) 994 return NIDS_PER_BLOCK + 1; 995 996 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 997 998 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 999 if (IS_ERR(page)) { 1000 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 1001 return PTR_ERR(page); 1002 } 1003 1004 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 1005 1006 rn = F2FS_NODE(page); 1007 if (depth < 3) { 1008 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 1009 child_nid = le32_to_cpu(rn->in.nid[i]); 1010 if (child_nid == 0) 1011 continue; 1012 rdn.nid = child_nid; 1013 ret = truncate_dnode(&rdn); 1014 if (ret < 0) 1015 goto out_err; 1016 if (set_nid(page, i, 0, false)) 1017 dn->node_changed = true; 1018 } 1019 } else { 1020 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 1021 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 1022 child_nid = le32_to_cpu(rn->in.nid[i]); 1023 if (child_nid == 0) { 1024 child_nofs += NIDS_PER_BLOCK + 1; 1025 continue; 1026 } 1027 rdn.nid = child_nid; 1028 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 1029 if (ret == (NIDS_PER_BLOCK + 1)) { 1030 if (set_nid(page, i, 0, false)) 1031 dn->node_changed = true; 1032 child_nofs += ret; 1033 } else if (ret < 0 && ret != -ENOENT) { 1034 goto out_err; 1035 } 1036 } 1037 freed = child_nofs; 1038 } 1039 1040 if (!ofs) { 1041 /* remove current indirect node */ 1042 dn->node_page = page; 1043 ret = truncate_node(dn); 1044 if (ret) 1045 goto out_err; 1046 freed++; 1047 } else { 1048 f2fs_put_page(page, 1); 1049 } 1050 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 1051 return freed; 1052 1053 out_err: 1054 f2fs_put_page(page, 1); 1055 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 1056 return ret; 1057 } 1058 1059 static int truncate_partial_nodes(struct dnode_of_data *dn, 1060 struct f2fs_inode *ri, int *offset, int depth) 1061 { 1062 struct page *pages[2]; 1063 nid_t nid[3]; 1064 nid_t child_nid; 1065 int err = 0; 1066 int i; 1067 int idx = depth - 2; 1068 1069 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1070 if (!nid[0]) 1071 return 0; 1072 1073 /* get indirect nodes in the path */ 1074 for (i = 0; i < idx + 1; i++) { 1075 /* reference count'll be increased */ 1076 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 1077 if (IS_ERR(pages[i])) { 1078 err = PTR_ERR(pages[i]); 1079 idx = i - 1; 1080 goto fail; 1081 } 1082 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 1083 } 1084 1085 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 1086 1087 /* free direct nodes linked to a partial indirect node */ 1088 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 1089 child_nid = get_nid(pages[idx], i, false); 1090 if (!child_nid) 1091 continue; 1092 dn->nid = child_nid; 1093 err = truncate_dnode(dn); 1094 if (err < 0) 1095 goto fail; 1096 if (set_nid(pages[idx], i, 0, false)) 1097 dn->node_changed = true; 1098 } 1099 1100 if (offset[idx + 1] == 0) { 1101 dn->node_page = pages[idx]; 1102 dn->nid = nid[idx]; 1103 err = truncate_node(dn); 1104 if (err) 1105 goto fail; 1106 } else { 1107 f2fs_put_page(pages[idx], 1); 1108 } 1109 offset[idx]++; 1110 offset[idx + 1] = 0; 1111 idx--; 1112 fail: 1113 for (i = idx; i >= 0; i--) 1114 f2fs_put_page(pages[i], 1); 1115 1116 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1117 1118 return err; 1119 } 1120 1121 /* 1122 * All the block addresses of data and nodes should be nullified. 1123 */ 1124 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1125 { 1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1127 int err = 0, cont = 1; 1128 int level, offset[4], noffset[4]; 1129 unsigned int nofs = 0; 1130 struct f2fs_inode *ri; 1131 struct dnode_of_data dn; 1132 struct page *page; 1133 1134 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1135 1136 level = get_node_path(inode, from, offset, noffset); 1137 if (level < 0) { 1138 trace_f2fs_truncate_inode_blocks_exit(inode, level); 1139 return level; 1140 } 1141 1142 page = f2fs_get_node_page(sbi, inode->i_ino); 1143 if (IS_ERR(page)) { 1144 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1145 return PTR_ERR(page); 1146 } 1147 1148 set_new_dnode(&dn, inode, page, NULL, 0); 1149 unlock_page(page); 1150 1151 ri = F2FS_INODE(page); 1152 switch (level) { 1153 case 0: 1154 case 1: 1155 nofs = noffset[1]; 1156 break; 1157 case 2: 1158 nofs = noffset[1]; 1159 if (!offset[level - 1]) 1160 goto skip_partial; 1161 err = truncate_partial_nodes(&dn, ri, offset, level); 1162 if (err < 0 && err != -ENOENT) 1163 goto fail; 1164 nofs += 1 + NIDS_PER_BLOCK; 1165 break; 1166 case 3: 1167 nofs = 5 + 2 * NIDS_PER_BLOCK; 1168 if (!offset[level - 1]) 1169 goto skip_partial; 1170 err = truncate_partial_nodes(&dn, ri, offset, level); 1171 if (err < 0 && err != -ENOENT) 1172 goto fail; 1173 break; 1174 default: 1175 BUG(); 1176 } 1177 1178 skip_partial: 1179 while (cont) { 1180 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 1181 switch (offset[0]) { 1182 case NODE_DIR1_BLOCK: 1183 case NODE_DIR2_BLOCK: 1184 err = truncate_dnode(&dn); 1185 break; 1186 1187 case NODE_IND1_BLOCK: 1188 case NODE_IND2_BLOCK: 1189 err = truncate_nodes(&dn, nofs, offset[1], 2); 1190 break; 1191 1192 case NODE_DIND_BLOCK: 1193 err = truncate_nodes(&dn, nofs, offset[1], 3); 1194 cont = 0; 1195 break; 1196 1197 default: 1198 BUG(); 1199 } 1200 if (err < 0 && err != -ENOENT) 1201 goto fail; 1202 if (offset[1] == 0 && 1203 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 1204 lock_page(page); 1205 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1206 f2fs_wait_on_page_writeback(page, NODE, true, true); 1207 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 1208 set_page_dirty(page); 1209 unlock_page(page); 1210 } 1211 offset[1] = 0; 1212 offset[0]++; 1213 nofs += err; 1214 } 1215 fail: 1216 f2fs_put_page(page, 0); 1217 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1218 return err > 0 ? 0 : err; 1219 } 1220 1221 /* caller must lock inode page */ 1222 int f2fs_truncate_xattr_node(struct inode *inode) 1223 { 1224 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1225 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1226 struct dnode_of_data dn; 1227 struct page *npage; 1228 int err; 1229 1230 if (!nid) 1231 return 0; 1232 1233 npage = f2fs_get_node_page(sbi, nid); 1234 if (IS_ERR(npage)) 1235 return PTR_ERR(npage); 1236 1237 set_new_dnode(&dn, inode, NULL, npage, nid); 1238 err = truncate_node(&dn); 1239 if (err) { 1240 f2fs_put_page(npage, 1); 1241 return err; 1242 } 1243 1244 f2fs_i_xnid_write(inode, 0); 1245 1246 return 0; 1247 } 1248 1249 /* 1250 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1251 * f2fs_unlock_op(). 1252 */ 1253 int f2fs_remove_inode_page(struct inode *inode) 1254 { 1255 struct dnode_of_data dn; 1256 int err; 1257 1258 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1259 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1260 if (err) 1261 return err; 1262 1263 err = f2fs_truncate_xattr_node(inode); 1264 if (err) { 1265 f2fs_put_dnode(&dn); 1266 return err; 1267 } 1268 1269 /* remove potential inline_data blocks */ 1270 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1271 S_ISLNK(inode->i_mode)) 1272 f2fs_truncate_data_blocks_range(&dn, 1); 1273 1274 /* 0 is possible, after f2fs_new_inode() has failed */ 1275 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1276 f2fs_put_dnode(&dn); 1277 return -EIO; 1278 } 1279 1280 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1281 f2fs_warn(F2FS_I_SB(inode), 1282 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1283 inode->i_ino, (unsigned long long)inode->i_blocks); 1284 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1285 } 1286 1287 /* will put inode & node pages */ 1288 err = truncate_node(&dn); 1289 if (err) { 1290 f2fs_put_dnode(&dn); 1291 return err; 1292 } 1293 return 0; 1294 } 1295 1296 struct page *f2fs_new_inode_page(struct inode *inode) 1297 { 1298 struct dnode_of_data dn; 1299 1300 /* allocate inode page for new inode */ 1301 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1302 1303 /* caller should f2fs_put_page(page, 1); */ 1304 return f2fs_new_node_page(&dn, 0); 1305 } 1306 1307 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1308 { 1309 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1310 struct node_info new_ni; 1311 struct page *page; 1312 int err; 1313 1314 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1315 return ERR_PTR(-EPERM); 1316 1317 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1318 if (!page) 1319 return ERR_PTR(-ENOMEM); 1320 1321 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1322 goto fail; 1323 1324 #ifdef CONFIG_F2FS_CHECK_FS 1325 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); 1326 if (err) { 1327 dec_valid_node_count(sbi, dn->inode, !ofs); 1328 goto fail; 1329 } 1330 if (unlikely(new_ni.blk_addr != NULL_ADDR)) { 1331 err = -EFSCORRUPTED; 1332 dec_valid_node_count(sbi, dn->inode, !ofs); 1333 set_sbi_flag(sbi, SBI_NEED_FSCK); 1334 f2fs_warn_ratelimited(sbi, 1335 "f2fs_new_node_page: inconsistent nat entry, " 1336 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u", 1337 new_ni.ino, new_ni.nid, new_ni.blk_addr, 1338 new_ni.version, new_ni.flag); 1339 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); 1340 goto fail; 1341 } 1342 #endif 1343 new_ni.nid = dn->nid; 1344 new_ni.ino = dn->inode->i_ino; 1345 new_ni.blk_addr = NULL_ADDR; 1346 new_ni.flag = 0; 1347 new_ni.version = 0; 1348 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1349 1350 f2fs_wait_on_page_writeback(page, NODE, true, true); 1351 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1352 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1353 if (!PageUptodate(page)) 1354 SetPageUptodate(page); 1355 if (set_page_dirty(page)) 1356 dn->node_changed = true; 1357 1358 if (f2fs_has_xattr_block(ofs)) 1359 f2fs_i_xnid_write(dn->inode, dn->nid); 1360 1361 if (ofs == 0) 1362 inc_valid_inode_count(sbi); 1363 return page; 1364 fail: 1365 clear_node_page_dirty(page); 1366 f2fs_put_page(page, 1); 1367 return ERR_PTR(err); 1368 } 1369 1370 /* 1371 * Caller should do after getting the following values. 1372 * 0: f2fs_put_page(page, 0) 1373 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1374 */ 1375 static int read_node_page(struct page *page, blk_opf_t op_flags) 1376 { 1377 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1378 struct node_info ni; 1379 struct f2fs_io_info fio = { 1380 .sbi = sbi, 1381 .type = NODE, 1382 .op = REQ_OP_READ, 1383 .op_flags = op_flags, 1384 .page = page, 1385 .encrypted_page = NULL, 1386 }; 1387 int err; 1388 1389 if (PageUptodate(page)) { 1390 if (!f2fs_inode_chksum_verify(sbi, page)) { 1391 ClearPageUptodate(page); 1392 return -EFSBADCRC; 1393 } 1394 return LOCKED_PAGE; 1395 } 1396 1397 err = f2fs_get_node_info(sbi, page->index, &ni, false); 1398 if (err) 1399 return err; 1400 1401 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ 1402 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { 1403 ClearPageUptodate(page); 1404 return -ENOENT; 1405 } 1406 1407 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1408 1409 err = f2fs_submit_page_bio(&fio); 1410 1411 if (!err) 1412 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); 1413 1414 return err; 1415 } 1416 1417 /* 1418 * Readahead a node page 1419 */ 1420 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1421 { 1422 struct page *apage; 1423 int err; 1424 1425 if (!nid) 1426 return; 1427 if (f2fs_check_nid_range(sbi, nid)) 1428 return; 1429 1430 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1431 if (apage) 1432 return; 1433 1434 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1435 if (!apage) 1436 return; 1437 1438 err = read_node_page(apage, REQ_RAHEAD); 1439 f2fs_put_page(apage, err ? 1 : 0); 1440 } 1441 1442 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1443 struct page *parent, int start) 1444 { 1445 struct page *page; 1446 int err; 1447 1448 if (!nid) 1449 return ERR_PTR(-ENOENT); 1450 if (f2fs_check_nid_range(sbi, nid)) 1451 return ERR_PTR(-EINVAL); 1452 repeat: 1453 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1454 if (!page) 1455 return ERR_PTR(-ENOMEM); 1456 1457 err = read_node_page(page, 0); 1458 if (err < 0) { 1459 goto out_put_err; 1460 } else if (err == LOCKED_PAGE) { 1461 err = 0; 1462 goto page_hit; 1463 } 1464 1465 if (parent) 1466 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1467 1468 lock_page(page); 1469 1470 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1471 f2fs_put_page(page, 1); 1472 goto repeat; 1473 } 1474 1475 if (unlikely(!PageUptodate(page))) { 1476 err = -EIO; 1477 goto out_err; 1478 } 1479 1480 if (!f2fs_inode_chksum_verify(sbi, page)) { 1481 err = -EFSBADCRC; 1482 goto out_err; 1483 } 1484 page_hit: 1485 if (likely(nid == nid_of_node(page))) 1486 return page; 1487 1488 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1489 nid, nid_of_node(page), ino_of_node(page), 1490 ofs_of_node(page), cpver_of_node(page), 1491 next_blkaddr_of_node(page)); 1492 set_sbi_flag(sbi, SBI_NEED_FSCK); 1493 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); 1494 err = -EFSCORRUPTED; 1495 out_err: 1496 ClearPageUptodate(page); 1497 out_put_err: 1498 /* ENOENT comes from read_node_page which is not an error. */ 1499 if (err != -ENOENT) 1500 f2fs_handle_page_eio(sbi, page->index, NODE); 1501 f2fs_put_page(page, 1); 1502 return ERR_PTR(err); 1503 } 1504 1505 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1506 { 1507 return __get_node_page(sbi, nid, NULL, 0); 1508 } 1509 1510 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1511 { 1512 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1513 nid_t nid = get_nid(parent, start, false); 1514 1515 return __get_node_page(sbi, nid, parent, start); 1516 } 1517 1518 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1519 { 1520 struct inode *inode; 1521 struct page *page; 1522 int ret; 1523 1524 /* should flush inline_data before evict_inode */ 1525 inode = ilookup(sbi->sb, ino); 1526 if (!inode) 1527 return; 1528 1529 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1530 FGP_LOCK|FGP_NOWAIT, 0); 1531 if (!page) 1532 goto iput_out; 1533 1534 if (!PageUptodate(page)) 1535 goto page_out; 1536 1537 if (!PageDirty(page)) 1538 goto page_out; 1539 1540 if (!clear_page_dirty_for_io(page)) 1541 goto page_out; 1542 1543 ret = f2fs_write_inline_data(inode, page); 1544 inode_dec_dirty_pages(inode); 1545 f2fs_remove_dirty_inode(inode); 1546 if (ret) 1547 set_page_dirty(page); 1548 page_out: 1549 f2fs_put_page(page, 1); 1550 iput_out: 1551 iput(inode); 1552 } 1553 1554 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1555 { 1556 pgoff_t index; 1557 struct folio_batch fbatch; 1558 struct page *last_page = NULL; 1559 int nr_folios; 1560 1561 folio_batch_init(&fbatch); 1562 index = 0; 1563 1564 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1565 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1566 &fbatch))) { 1567 int i; 1568 1569 for (i = 0; i < nr_folios; i++) { 1570 struct page *page = &fbatch.folios[i]->page; 1571 1572 if (unlikely(f2fs_cp_error(sbi))) { 1573 f2fs_put_page(last_page, 0); 1574 folio_batch_release(&fbatch); 1575 return ERR_PTR(-EIO); 1576 } 1577 1578 if (!IS_DNODE(page) || !is_cold_node(page)) 1579 continue; 1580 if (ino_of_node(page) != ino) 1581 continue; 1582 1583 lock_page(page); 1584 1585 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1586 continue_unlock: 1587 unlock_page(page); 1588 continue; 1589 } 1590 if (ino_of_node(page) != ino) 1591 goto continue_unlock; 1592 1593 if (!PageDirty(page)) { 1594 /* someone wrote it for us */ 1595 goto continue_unlock; 1596 } 1597 1598 if (last_page) 1599 f2fs_put_page(last_page, 0); 1600 1601 get_page(page); 1602 last_page = page; 1603 unlock_page(page); 1604 } 1605 folio_batch_release(&fbatch); 1606 cond_resched(); 1607 } 1608 return last_page; 1609 } 1610 1611 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1612 struct writeback_control *wbc, bool do_balance, 1613 enum iostat_type io_type, unsigned int *seq_id) 1614 { 1615 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1616 nid_t nid; 1617 struct node_info ni; 1618 struct f2fs_io_info fio = { 1619 .sbi = sbi, 1620 .ino = ino_of_node(page), 1621 .type = NODE, 1622 .op = REQ_OP_WRITE, 1623 .op_flags = wbc_to_write_flags(wbc), 1624 .page = page, 1625 .encrypted_page = NULL, 1626 .submitted = 0, 1627 .io_type = io_type, 1628 .io_wbc = wbc, 1629 }; 1630 unsigned int seq; 1631 1632 trace_f2fs_writepage(page, NODE); 1633 1634 if (unlikely(f2fs_cp_error(sbi))) { 1635 /* keep node pages in remount-ro mode */ 1636 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) 1637 goto redirty_out; 1638 ClearPageUptodate(page); 1639 dec_page_count(sbi, F2FS_DIRTY_NODES); 1640 unlock_page(page); 1641 return 0; 1642 } 1643 1644 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1645 goto redirty_out; 1646 1647 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1648 wbc->sync_mode == WB_SYNC_NONE && 1649 IS_DNODE(page) && is_cold_node(page)) 1650 goto redirty_out; 1651 1652 /* get old block addr of this node page */ 1653 nid = nid_of_node(page); 1654 f2fs_bug_on(sbi, page->index != nid); 1655 1656 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) 1657 goto redirty_out; 1658 1659 if (wbc->for_reclaim) { 1660 if (!f2fs_down_read_trylock(&sbi->node_write)) 1661 goto redirty_out; 1662 } else { 1663 f2fs_down_read(&sbi->node_write); 1664 } 1665 1666 /* This page is already truncated */ 1667 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1668 ClearPageUptodate(page); 1669 dec_page_count(sbi, F2FS_DIRTY_NODES); 1670 f2fs_up_read(&sbi->node_write); 1671 unlock_page(page); 1672 return 0; 1673 } 1674 1675 if (__is_valid_data_blkaddr(ni.blk_addr) && 1676 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1677 DATA_GENERIC_ENHANCE)) { 1678 f2fs_up_read(&sbi->node_write); 1679 goto redirty_out; 1680 } 1681 1682 if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi)) 1683 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1684 1685 /* should add to global list before clearing PAGECACHE status */ 1686 if (f2fs_in_warm_node_list(sbi, page)) { 1687 seq = f2fs_add_fsync_node_entry(sbi, page); 1688 if (seq_id) 1689 *seq_id = seq; 1690 } 1691 1692 set_page_writeback(page); 1693 1694 fio.old_blkaddr = ni.blk_addr; 1695 f2fs_do_write_node_page(nid, &fio); 1696 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1697 dec_page_count(sbi, F2FS_DIRTY_NODES); 1698 f2fs_up_read(&sbi->node_write); 1699 1700 if (wbc->for_reclaim) { 1701 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1702 submitted = NULL; 1703 } 1704 1705 unlock_page(page); 1706 1707 if (unlikely(f2fs_cp_error(sbi))) { 1708 f2fs_submit_merged_write(sbi, NODE); 1709 submitted = NULL; 1710 } 1711 if (submitted) 1712 *submitted = fio.submitted; 1713 1714 if (do_balance) 1715 f2fs_balance_fs(sbi, false); 1716 return 0; 1717 1718 redirty_out: 1719 redirty_page_for_writepage(wbc, page); 1720 return AOP_WRITEPAGE_ACTIVATE; 1721 } 1722 1723 int f2fs_move_node_page(struct page *node_page, int gc_type) 1724 { 1725 int err = 0; 1726 1727 if (gc_type == FG_GC) { 1728 struct writeback_control wbc = { 1729 .sync_mode = WB_SYNC_ALL, 1730 .nr_to_write = 1, 1731 .for_reclaim = 0, 1732 }; 1733 1734 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1735 1736 set_page_dirty(node_page); 1737 1738 if (!clear_page_dirty_for_io(node_page)) { 1739 err = -EAGAIN; 1740 goto out_page; 1741 } 1742 1743 if (__write_node_page(node_page, false, NULL, 1744 &wbc, false, FS_GC_NODE_IO, NULL)) { 1745 err = -EAGAIN; 1746 unlock_page(node_page); 1747 } 1748 goto release_page; 1749 } else { 1750 /* set page dirty and write it */ 1751 if (!PageWriteback(node_page)) 1752 set_page_dirty(node_page); 1753 } 1754 out_page: 1755 unlock_page(node_page); 1756 release_page: 1757 f2fs_put_page(node_page, 0); 1758 return err; 1759 } 1760 1761 static int f2fs_write_node_page(struct page *page, 1762 struct writeback_control *wbc) 1763 { 1764 return __write_node_page(page, false, NULL, wbc, false, 1765 FS_NODE_IO, NULL); 1766 } 1767 1768 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1769 struct writeback_control *wbc, bool atomic, 1770 unsigned int *seq_id) 1771 { 1772 pgoff_t index; 1773 struct folio_batch fbatch; 1774 int ret = 0; 1775 struct page *last_page = NULL; 1776 bool marked = false; 1777 nid_t ino = inode->i_ino; 1778 int nr_folios; 1779 int nwritten = 0; 1780 1781 if (atomic) { 1782 last_page = last_fsync_dnode(sbi, ino); 1783 if (IS_ERR_OR_NULL(last_page)) 1784 return PTR_ERR_OR_ZERO(last_page); 1785 } 1786 retry: 1787 folio_batch_init(&fbatch); 1788 index = 0; 1789 1790 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1791 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1792 &fbatch))) { 1793 int i; 1794 1795 for (i = 0; i < nr_folios; i++) { 1796 struct page *page = &fbatch.folios[i]->page; 1797 bool submitted = false; 1798 1799 if (unlikely(f2fs_cp_error(sbi))) { 1800 f2fs_put_page(last_page, 0); 1801 folio_batch_release(&fbatch); 1802 ret = -EIO; 1803 goto out; 1804 } 1805 1806 if (!IS_DNODE(page) || !is_cold_node(page)) 1807 continue; 1808 if (ino_of_node(page) != ino) 1809 continue; 1810 1811 lock_page(page); 1812 1813 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1814 continue_unlock: 1815 unlock_page(page); 1816 continue; 1817 } 1818 if (ino_of_node(page) != ino) 1819 goto continue_unlock; 1820 1821 if (!PageDirty(page) && page != last_page) { 1822 /* someone wrote it for us */ 1823 goto continue_unlock; 1824 } 1825 1826 f2fs_wait_on_page_writeback(page, NODE, true, true); 1827 1828 set_fsync_mark(page, 0); 1829 set_dentry_mark(page, 0); 1830 1831 if (!atomic || page == last_page) { 1832 set_fsync_mark(page, 1); 1833 percpu_counter_inc(&sbi->rf_node_block_count); 1834 if (IS_INODE(page)) { 1835 if (is_inode_flag_set(inode, 1836 FI_DIRTY_INODE)) 1837 f2fs_update_inode(inode, page); 1838 set_dentry_mark(page, 1839 f2fs_need_dentry_mark(sbi, ino)); 1840 } 1841 /* may be written by other thread */ 1842 if (!PageDirty(page)) 1843 set_page_dirty(page); 1844 } 1845 1846 if (!clear_page_dirty_for_io(page)) 1847 goto continue_unlock; 1848 1849 ret = __write_node_page(page, atomic && 1850 page == last_page, 1851 &submitted, wbc, true, 1852 FS_NODE_IO, seq_id); 1853 if (ret) { 1854 unlock_page(page); 1855 f2fs_put_page(last_page, 0); 1856 break; 1857 } else if (submitted) { 1858 nwritten++; 1859 } 1860 1861 if (page == last_page) { 1862 f2fs_put_page(page, 0); 1863 marked = true; 1864 break; 1865 } 1866 } 1867 folio_batch_release(&fbatch); 1868 cond_resched(); 1869 1870 if (ret || marked) 1871 break; 1872 } 1873 if (!ret && atomic && !marked) { 1874 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1875 ino, last_page->index); 1876 lock_page(last_page); 1877 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1878 set_page_dirty(last_page); 1879 unlock_page(last_page); 1880 goto retry; 1881 } 1882 out: 1883 if (nwritten) 1884 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1885 return ret ? -EIO : 0; 1886 } 1887 1888 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1889 { 1890 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1891 bool clean; 1892 1893 if (inode->i_ino != ino) 1894 return 0; 1895 1896 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1897 return 0; 1898 1899 spin_lock(&sbi->inode_lock[DIRTY_META]); 1900 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1901 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1902 1903 if (clean) 1904 return 0; 1905 1906 inode = igrab(inode); 1907 if (!inode) 1908 return 0; 1909 return 1; 1910 } 1911 1912 static bool flush_dirty_inode(struct page *page) 1913 { 1914 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1915 struct inode *inode; 1916 nid_t ino = ino_of_node(page); 1917 1918 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1919 if (!inode) 1920 return false; 1921 1922 f2fs_update_inode(inode, page); 1923 unlock_page(page); 1924 1925 iput(inode); 1926 return true; 1927 } 1928 1929 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1930 { 1931 pgoff_t index = 0; 1932 struct folio_batch fbatch; 1933 int nr_folios; 1934 1935 folio_batch_init(&fbatch); 1936 1937 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1938 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1939 &fbatch))) { 1940 int i; 1941 1942 for (i = 0; i < nr_folios; i++) { 1943 struct page *page = &fbatch.folios[i]->page; 1944 1945 if (!IS_DNODE(page)) 1946 continue; 1947 1948 lock_page(page); 1949 1950 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1951 continue_unlock: 1952 unlock_page(page); 1953 continue; 1954 } 1955 1956 if (!PageDirty(page)) { 1957 /* someone wrote it for us */ 1958 goto continue_unlock; 1959 } 1960 1961 /* flush inline_data, if it's async context. */ 1962 if (page_private_inline(page)) { 1963 clear_page_private_inline(page); 1964 unlock_page(page); 1965 flush_inline_data(sbi, ino_of_node(page)); 1966 continue; 1967 } 1968 unlock_page(page); 1969 } 1970 folio_batch_release(&fbatch); 1971 cond_resched(); 1972 } 1973 } 1974 1975 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1976 struct writeback_control *wbc, 1977 bool do_balance, enum iostat_type io_type) 1978 { 1979 pgoff_t index; 1980 struct folio_batch fbatch; 1981 int step = 0; 1982 int nwritten = 0; 1983 int ret = 0; 1984 int nr_folios, done = 0; 1985 1986 folio_batch_init(&fbatch); 1987 1988 next_step: 1989 index = 0; 1990 1991 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), 1992 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1993 &fbatch))) { 1994 int i; 1995 1996 for (i = 0; i < nr_folios; i++) { 1997 struct page *page = &fbatch.folios[i]->page; 1998 bool submitted = false; 1999 2000 /* give a priority to WB_SYNC threads */ 2001 if (atomic_read(&sbi->wb_sync_req[NODE]) && 2002 wbc->sync_mode == WB_SYNC_NONE) { 2003 done = 1; 2004 break; 2005 } 2006 2007 /* 2008 * flushing sequence with step: 2009 * 0. indirect nodes 2010 * 1. dentry dnodes 2011 * 2. file dnodes 2012 */ 2013 if (step == 0 && IS_DNODE(page)) 2014 continue; 2015 if (step == 1 && (!IS_DNODE(page) || 2016 is_cold_node(page))) 2017 continue; 2018 if (step == 2 && (!IS_DNODE(page) || 2019 !is_cold_node(page))) 2020 continue; 2021 lock_node: 2022 if (wbc->sync_mode == WB_SYNC_ALL) 2023 lock_page(page); 2024 else if (!trylock_page(page)) 2025 continue; 2026 2027 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 2028 continue_unlock: 2029 unlock_page(page); 2030 continue; 2031 } 2032 2033 if (!PageDirty(page)) { 2034 /* someone wrote it for us */ 2035 goto continue_unlock; 2036 } 2037 2038 /* flush inline_data/inode, if it's async context. */ 2039 if (!do_balance) 2040 goto write_node; 2041 2042 /* flush inline_data */ 2043 if (page_private_inline(page)) { 2044 clear_page_private_inline(page); 2045 unlock_page(page); 2046 flush_inline_data(sbi, ino_of_node(page)); 2047 goto lock_node; 2048 } 2049 2050 /* flush dirty inode */ 2051 if (IS_INODE(page) && flush_dirty_inode(page)) 2052 goto lock_node; 2053 write_node: 2054 f2fs_wait_on_page_writeback(page, NODE, true, true); 2055 2056 if (!clear_page_dirty_for_io(page)) 2057 goto continue_unlock; 2058 2059 set_fsync_mark(page, 0); 2060 set_dentry_mark(page, 0); 2061 2062 ret = __write_node_page(page, false, &submitted, 2063 wbc, do_balance, io_type, NULL); 2064 if (ret) 2065 unlock_page(page); 2066 else if (submitted) 2067 nwritten++; 2068 2069 if (--wbc->nr_to_write == 0) 2070 break; 2071 } 2072 folio_batch_release(&fbatch); 2073 cond_resched(); 2074 2075 if (wbc->nr_to_write == 0) { 2076 step = 2; 2077 break; 2078 } 2079 } 2080 2081 if (step < 2) { 2082 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 2083 wbc->sync_mode == WB_SYNC_NONE && step == 1) 2084 goto out; 2085 step++; 2086 goto next_step; 2087 } 2088 out: 2089 if (nwritten) 2090 f2fs_submit_merged_write(sbi, NODE); 2091 2092 if (unlikely(f2fs_cp_error(sbi))) 2093 return -EIO; 2094 return ret; 2095 } 2096 2097 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 2098 unsigned int seq_id) 2099 { 2100 struct fsync_node_entry *fn; 2101 struct page *page; 2102 struct list_head *head = &sbi->fsync_node_list; 2103 unsigned long flags; 2104 unsigned int cur_seq_id = 0; 2105 2106 while (seq_id && cur_seq_id < seq_id) { 2107 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 2108 if (list_empty(head)) { 2109 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2110 break; 2111 } 2112 fn = list_first_entry(head, struct fsync_node_entry, list); 2113 if (fn->seq_id > seq_id) { 2114 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2115 break; 2116 } 2117 cur_seq_id = fn->seq_id; 2118 page = fn->page; 2119 get_page(page); 2120 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2121 2122 f2fs_wait_on_page_writeback(page, NODE, true, false); 2123 2124 put_page(page); 2125 } 2126 2127 return filemap_check_errors(NODE_MAPPING(sbi)); 2128 } 2129 2130 static int f2fs_write_node_pages(struct address_space *mapping, 2131 struct writeback_control *wbc) 2132 { 2133 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2134 struct blk_plug plug; 2135 long diff; 2136 2137 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2138 goto skip_write; 2139 2140 /* balancing f2fs's metadata in background */ 2141 f2fs_balance_fs_bg(sbi, true); 2142 2143 /* collect a number of dirty node pages and write together */ 2144 if (wbc->sync_mode != WB_SYNC_ALL && 2145 get_pages(sbi, F2FS_DIRTY_NODES) < 2146 nr_pages_to_skip(sbi, NODE)) 2147 goto skip_write; 2148 2149 if (wbc->sync_mode == WB_SYNC_ALL) 2150 atomic_inc(&sbi->wb_sync_req[NODE]); 2151 else if (atomic_read(&sbi->wb_sync_req[NODE])) { 2152 /* to avoid potential deadlock */ 2153 if (current->plug) 2154 blk_finish_plug(current->plug); 2155 goto skip_write; 2156 } 2157 2158 trace_f2fs_writepages(mapping->host, wbc, NODE); 2159 2160 diff = nr_pages_to_write(sbi, NODE, wbc); 2161 blk_start_plug(&plug); 2162 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2163 blk_finish_plug(&plug); 2164 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2165 2166 if (wbc->sync_mode == WB_SYNC_ALL) 2167 atomic_dec(&sbi->wb_sync_req[NODE]); 2168 return 0; 2169 2170 skip_write: 2171 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2172 trace_f2fs_writepages(mapping->host, wbc, NODE); 2173 return 0; 2174 } 2175 2176 static bool f2fs_dirty_node_folio(struct address_space *mapping, 2177 struct folio *folio) 2178 { 2179 trace_f2fs_set_page_dirty(&folio->page, NODE); 2180 2181 if (!folio_test_uptodate(folio)) 2182 folio_mark_uptodate(folio); 2183 #ifdef CONFIG_F2FS_CHECK_FS 2184 if (IS_INODE(&folio->page)) 2185 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); 2186 #endif 2187 if (filemap_dirty_folio(mapping, folio)) { 2188 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 2189 set_page_private_reference(&folio->page); 2190 return true; 2191 } 2192 return false; 2193 } 2194 2195 /* 2196 * Structure of the f2fs node operations 2197 */ 2198 const struct address_space_operations f2fs_node_aops = { 2199 .writepage = f2fs_write_node_page, 2200 .writepages = f2fs_write_node_pages, 2201 .dirty_folio = f2fs_dirty_node_folio, 2202 .invalidate_folio = f2fs_invalidate_folio, 2203 .release_folio = f2fs_release_folio, 2204 .migrate_folio = filemap_migrate_folio, 2205 }; 2206 2207 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2208 nid_t n) 2209 { 2210 return radix_tree_lookup(&nm_i->free_nid_root, n); 2211 } 2212 2213 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2214 struct free_nid *i) 2215 { 2216 struct f2fs_nm_info *nm_i = NM_I(sbi); 2217 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2218 2219 if (err) 2220 return err; 2221 2222 nm_i->nid_cnt[FREE_NID]++; 2223 list_add_tail(&i->list, &nm_i->free_nid_list); 2224 return 0; 2225 } 2226 2227 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2228 struct free_nid *i, enum nid_state state) 2229 { 2230 struct f2fs_nm_info *nm_i = NM_I(sbi); 2231 2232 f2fs_bug_on(sbi, state != i->state); 2233 nm_i->nid_cnt[state]--; 2234 if (state == FREE_NID) 2235 list_del(&i->list); 2236 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2237 } 2238 2239 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2240 enum nid_state org_state, enum nid_state dst_state) 2241 { 2242 struct f2fs_nm_info *nm_i = NM_I(sbi); 2243 2244 f2fs_bug_on(sbi, org_state != i->state); 2245 i->state = dst_state; 2246 nm_i->nid_cnt[org_state]--; 2247 nm_i->nid_cnt[dst_state]++; 2248 2249 switch (dst_state) { 2250 case PREALLOC_NID: 2251 list_del(&i->list); 2252 break; 2253 case FREE_NID: 2254 list_add_tail(&i->list, &nm_i->free_nid_list); 2255 break; 2256 default: 2257 BUG_ON(1); 2258 } 2259 } 2260 2261 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) 2262 { 2263 struct f2fs_nm_info *nm_i = NM_I(sbi); 2264 unsigned int i; 2265 bool ret = true; 2266 2267 f2fs_down_read(&nm_i->nat_tree_lock); 2268 for (i = 0; i < nm_i->nat_blocks; i++) { 2269 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { 2270 ret = false; 2271 break; 2272 } 2273 } 2274 f2fs_up_read(&nm_i->nat_tree_lock); 2275 2276 return ret; 2277 } 2278 2279 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2280 bool set, bool build) 2281 { 2282 struct f2fs_nm_info *nm_i = NM_I(sbi); 2283 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2284 unsigned int nid_ofs = nid - START_NID(nid); 2285 2286 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2287 return; 2288 2289 if (set) { 2290 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2291 return; 2292 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2293 nm_i->free_nid_count[nat_ofs]++; 2294 } else { 2295 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2296 return; 2297 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2298 if (!build) 2299 nm_i->free_nid_count[nat_ofs]--; 2300 } 2301 } 2302 2303 /* return if the nid is recognized as free */ 2304 static bool add_free_nid(struct f2fs_sb_info *sbi, 2305 nid_t nid, bool build, bool update) 2306 { 2307 struct f2fs_nm_info *nm_i = NM_I(sbi); 2308 struct free_nid *i, *e; 2309 struct nat_entry *ne; 2310 int err = -EINVAL; 2311 bool ret = false; 2312 2313 /* 0 nid should not be used */ 2314 if (unlikely(nid == 0)) 2315 return false; 2316 2317 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2318 return false; 2319 2320 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL); 2321 i->nid = nid; 2322 i->state = FREE_NID; 2323 2324 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2325 2326 spin_lock(&nm_i->nid_list_lock); 2327 2328 if (build) { 2329 /* 2330 * Thread A Thread B 2331 * - f2fs_create 2332 * - f2fs_new_inode 2333 * - f2fs_alloc_nid 2334 * - __insert_nid_to_list(PREALLOC_NID) 2335 * - f2fs_balance_fs_bg 2336 * - f2fs_build_free_nids 2337 * - __f2fs_build_free_nids 2338 * - scan_nat_page 2339 * - add_free_nid 2340 * - __lookup_nat_cache 2341 * - f2fs_add_link 2342 * - f2fs_init_inode_metadata 2343 * - f2fs_new_inode_page 2344 * - f2fs_new_node_page 2345 * - set_node_addr 2346 * - f2fs_alloc_nid_done 2347 * - __remove_nid_from_list(PREALLOC_NID) 2348 * - __insert_nid_to_list(FREE_NID) 2349 */ 2350 ne = __lookup_nat_cache(nm_i, nid); 2351 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2352 nat_get_blkaddr(ne) != NULL_ADDR)) 2353 goto err_out; 2354 2355 e = __lookup_free_nid_list(nm_i, nid); 2356 if (e) { 2357 if (e->state == FREE_NID) 2358 ret = true; 2359 goto err_out; 2360 } 2361 } 2362 ret = true; 2363 err = __insert_free_nid(sbi, i); 2364 err_out: 2365 if (update) { 2366 update_free_nid_bitmap(sbi, nid, ret, build); 2367 if (!build) 2368 nm_i->available_nids++; 2369 } 2370 spin_unlock(&nm_i->nid_list_lock); 2371 radix_tree_preload_end(); 2372 2373 if (err) 2374 kmem_cache_free(free_nid_slab, i); 2375 return ret; 2376 } 2377 2378 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2379 { 2380 struct f2fs_nm_info *nm_i = NM_I(sbi); 2381 struct free_nid *i; 2382 bool need_free = false; 2383 2384 spin_lock(&nm_i->nid_list_lock); 2385 i = __lookup_free_nid_list(nm_i, nid); 2386 if (i && i->state == FREE_NID) { 2387 __remove_free_nid(sbi, i, FREE_NID); 2388 need_free = true; 2389 } 2390 spin_unlock(&nm_i->nid_list_lock); 2391 2392 if (need_free) 2393 kmem_cache_free(free_nid_slab, i); 2394 } 2395 2396 static int scan_nat_page(struct f2fs_sb_info *sbi, 2397 struct page *nat_page, nid_t start_nid) 2398 { 2399 struct f2fs_nm_info *nm_i = NM_I(sbi); 2400 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2401 block_t blk_addr; 2402 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2403 int i; 2404 2405 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2406 2407 i = start_nid % NAT_ENTRY_PER_BLOCK; 2408 2409 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2410 if (unlikely(start_nid >= nm_i->max_nid)) 2411 break; 2412 2413 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2414 2415 if (blk_addr == NEW_ADDR) 2416 return -EFSCORRUPTED; 2417 2418 if (blk_addr == NULL_ADDR) { 2419 add_free_nid(sbi, start_nid, true, true); 2420 } else { 2421 spin_lock(&NM_I(sbi)->nid_list_lock); 2422 update_free_nid_bitmap(sbi, start_nid, false, true); 2423 spin_unlock(&NM_I(sbi)->nid_list_lock); 2424 } 2425 } 2426 2427 return 0; 2428 } 2429 2430 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2431 { 2432 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2433 struct f2fs_journal *journal = curseg->journal; 2434 int i; 2435 2436 down_read(&curseg->journal_rwsem); 2437 for (i = 0; i < nats_in_cursum(journal); i++) { 2438 block_t addr; 2439 nid_t nid; 2440 2441 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2442 nid = le32_to_cpu(nid_in_journal(journal, i)); 2443 if (addr == NULL_ADDR) 2444 add_free_nid(sbi, nid, true, false); 2445 else 2446 remove_free_nid(sbi, nid); 2447 } 2448 up_read(&curseg->journal_rwsem); 2449 } 2450 2451 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2452 { 2453 struct f2fs_nm_info *nm_i = NM_I(sbi); 2454 unsigned int i, idx; 2455 nid_t nid; 2456 2457 f2fs_down_read(&nm_i->nat_tree_lock); 2458 2459 for (i = 0; i < nm_i->nat_blocks; i++) { 2460 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2461 continue; 2462 if (!nm_i->free_nid_count[i]) 2463 continue; 2464 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2465 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2466 NAT_ENTRY_PER_BLOCK, idx); 2467 if (idx >= NAT_ENTRY_PER_BLOCK) 2468 break; 2469 2470 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2471 add_free_nid(sbi, nid, true, false); 2472 2473 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2474 goto out; 2475 } 2476 } 2477 out: 2478 scan_curseg_cache(sbi); 2479 2480 f2fs_up_read(&nm_i->nat_tree_lock); 2481 } 2482 2483 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2484 bool sync, bool mount) 2485 { 2486 struct f2fs_nm_info *nm_i = NM_I(sbi); 2487 int i = 0, ret; 2488 nid_t nid = nm_i->next_scan_nid; 2489 2490 if (unlikely(nid >= nm_i->max_nid)) 2491 nid = 0; 2492 2493 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) 2494 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; 2495 2496 /* Enough entries */ 2497 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2498 return 0; 2499 2500 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2501 return 0; 2502 2503 if (!mount) { 2504 /* try to find free nids in free_nid_bitmap */ 2505 scan_free_nid_bits(sbi); 2506 2507 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2508 return 0; 2509 } 2510 2511 /* readahead nat pages to be scanned */ 2512 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2513 META_NAT, true); 2514 2515 f2fs_down_read(&nm_i->nat_tree_lock); 2516 2517 while (1) { 2518 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2519 nm_i->nat_block_bitmap)) { 2520 struct page *page = get_current_nat_page(sbi, nid); 2521 2522 if (IS_ERR(page)) { 2523 ret = PTR_ERR(page); 2524 } else { 2525 ret = scan_nat_page(sbi, page, nid); 2526 f2fs_put_page(page, 1); 2527 } 2528 2529 if (ret) { 2530 f2fs_up_read(&nm_i->nat_tree_lock); 2531 2532 if (ret == -EFSCORRUPTED) { 2533 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2534 set_sbi_flag(sbi, SBI_NEED_FSCK); 2535 f2fs_handle_error(sbi, 2536 ERROR_INCONSISTENT_NAT); 2537 } 2538 2539 return ret; 2540 } 2541 } 2542 2543 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2544 if (unlikely(nid >= nm_i->max_nid)) 2545 nid = 0; 2546 2547 if (++i >= FREE_NID_PAGES) 2548 break; 2549 } 2550 2551 /* go to the next free nat pages to find free nids abundantly */ 2552 nm_i->next_scan_nid = nid; 2553 2554 /* find free nids from current sum_pages */ 2555 scan_curseg_cache(sbi); 2556 2557 f2fs_up_read(&nm_i->nat_tree_lock); 2558 2559 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2560 nm_i->ra_nid_pages, META_NAT, false); 2561 2562 return 0; 2563 } 2564 2565 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2566 { 2567 int ret; 2568 2569 mutex_lock(&NM_I(sbi)->build_lock); 2570 ret = __f2fs_build_free_nids(sbi, sync, mount); 2571 mutex_unlock(&NM_I(sbi)->build_lock); 2572 2573 return ret; 2574 } 2575 2576 /* 2577 * If this function returns success, caller can obtain a new nid 2578 * from second parameter of this function. 2579 * The returned nid could be used ino as well as nid when inode is created. 2580 */ 2581 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2582 { 2583 struct f2fs_nm_info *nm_i = NM_I(sbi); 2584 struct free_nid *i = NULL; 2585 retry: 2586 if (time_to_inject(sbi, FAULT_ALLOC_NID)) 2587 return false; 2588 2589 spin_lock(&nm_i->nid_list_lock); 2590 2591 if (unlikely(nm_i->available_nids == 0)) { 2592 spin_unlock(&nm_i->nid_list_lock); 2593 return false; 2594 } 2595 2596 /* We should not use stale free nids created by f2fs_build_free_nids */ 2597 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2598 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2599 i = list_first_entry(&nm_i->free_nid_list, 2600 struct free_nid, list); 2601 *nid = i->nid; 2602 2603 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2604 nm_i->available_nids--; 2605 2606 update_free_nid_bitmap(sbi, *nid, false, false); 2607 2608 spin_unlock(&nm_i->nid_list_lock); 2609 return true; 2610 } 2611 spin_unlock(&nm_i->nid_list_lock); 2612 2613 /* Let's scan nat pages and its caches to get free nids */ 2614 if (!f2fs_build_free_nids(sbi, true, false)) 2615 goto retry; 2616 return false; 2617 } 2618 2619 /* 2620 * f2fs_alloc_nid() should be called prior to this function. 2621 */ 2622 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2623 { 2624 struct f2fs_nm_info *nm_i = NM_I(sbi); 2625 struct free_nid *i; 2626 2627 spin_lock(&nm_i->nid_list_lock); 2628 i = __lookup_free_nid_list(nm_i, nid); 2629 f2fs_bug_on(sbi, !i); 2630 __remove_free_nid(sbi, i, PREALLOC_NID); 2631 spin_unlock(&nm_i->nid_list_lock); 2632 2633 kmem_cache_free(free_nid_slab, i); 2634 } 2635 2636 /* 2637 * f2fs_alloc_nid() should be called prior to this function. 2638 */ 2639 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2640 { 2641 struct f2fs_nm_info *nm_i = NM_I(sbi); 2642 struct free_nid *i; 2643 bool need_free = false; 2644 2645 if (!nid) 2646 return; 2647 2648 spin_lock(&nm_i->nid_list_lock); 2649 i = __lookup_free_nid_list(nm_i, nid); 2650 f2fs_bug_on(sbi, !i); 2651 2652 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2653 __remove_free_nid(sbi, i, PREALLOC_NID); 2654 need_free = true; 2655 } else { 2656 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2657 } 2658 2659 nm_i->available_nids++; 2660 2661 update_free_nid_bitmap(sbi, nid, true, false); 2662 2663 spin_unlock(&nm_i->nid_list_lock); 2664 2665 if (need_free) 2666 kmem_cache_free(free_nid_slab, i); 2667 } 2668 2669 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2670 { 2671 struct f2fs_nm_info *nm_i = NM_I(sbi); 2672 int nr = nr_shrink; 2673 2674 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2675 return 0; 2676 2677 if (!mutex_trylock(&nm_i->build_lock)) 2678 return 0; 2679 2680 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2681 struct free_nid *i, *next; 2682 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2683 2684 spin_lock(&nm_i->nid_list_lock); 2685 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2686 if (!nr_shrink || !batch || 2687 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2688 break; 2689 __remove_free_nid(sbi, i, FREE_NID); 2690 kmem_cache_free(free_nid_slab, i); 2691 nr_shrink--; 2692 batch--; 2693 } 2694 spin_unlock(&nm_i->nid_list_lock); 2695 } 2696 2697 mutex_unlock(&nm_i->build_lock); 2698 2699 return nr - nr_shrink; 2700 } 2701 2702 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2703 { 2704 void *src_addr, *dst_addr; 2705 size_t inline_size; 2706 struct page *ipage; 2707 struct f2fs_inode *ri; 2708 2709 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2710 if (IS_ERR(ipage)) 2711 return PTR_ERR(ipage); 2712 2713 ri = F2FS_INODE(page); 2714 if (ri->i_inline & F2FS_INLINE_XATTR) { 2715 if (!f2fs_has_inline_xattr(inode)) { 2716 set_inode_flag(inode, FI_INLINE_XATTR); 2717 stat_inc_inline_xattr(inode); 2718 } 2719 } else { 2720 if (f2fs_has_inline_xattr(inode)) { 2721 stat_dec_inline_xattr(inode); 2722 clear_inode_flag(inode, FI_INLINE_XATTR); 2723 } 2724 goto update_inode; 2725 } 2726 2727 dst_addr = inline_xattr_addr(inode, ipage); 2728 src_addr = inline_xattr_addr(inode, page); 2729 inline_size = inline_xattr_size(inode); 2730 2731 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2732 memcpy(dst_addr, src_addr, inline_size); 2733 update_inode: 2734 f2fs_update_inode(inode, ipage); 2735 f2fs_put_page(ipage, 1); 2736 return 0; 2737 } 2738 2739 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2740 { 2741 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2742 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2743 nid_t new_xnid; 2744 struct dnode_of_data dn; 2745 struct node_info ni; 2746 struct page *xpage; 2747 int err; 2748 2749 if (!prev_xnid) 2750 goto recover_xnid; 2751 2752 /* 1: invalidate the previous xattr nid */ 2753 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); 2754 if (err) 2755 return err; 2756 2757 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2758 dec_valid_node_count(sbi, inode, false); 2759 set_node_addr(sbi, &ni, NULL_ADDR, false); 2760 2761 recover_xnid: 2762 /* 2: update xattr nid in inode */ 2763 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2764 return -ENOSPC; 2765 2766 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2767 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2768 if (IS_ERR(xpage)) { 2769 f2fs_alloc_nid_failed(sbi, new_xnid); 2770 return PTR_ERR(xpage); 2771 } 2772 2773 f2fs_alloc_nid_done(sbi, new_xnid); 2774 f2fs_update_inode_page(inode); 2775 2776 /* 3: update and set xattr node page dirty */ 2777 if (page) { 2778 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), 2779 VALID_XATTR_BLOCK_SIZE); 2780 set_page_dirty(xpage); 2781 } 2782 f2fs_put_page(xpage, 1); 2783 2784 return 0; 2785 } 2786 2787 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2788 { 2789 struct f2fs_inode *src, *dst; 2790 nid_t ino = ino_of_node(page); 2791 struct node_info old_ni, new_ni; 2792 struct page *ipage; 2793 int err; 2794 2795 err = f2fs_get_node_info(sbi, ino, &old_ni, false); 2796 if (err) 2797 return err; 2798 2799 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2800 return -EINVAL; 2801 retry: 2802 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2803 if (!ipage) { 2804 memalloc_retry_wait(GFP_NOFS); 2805 goto retry; 2806 } 2807 2808 /* Should not use this inode from free nid list */ 2809 remove_free_nid(sbi, ino); 2810 2811 if (!PageUptodate(ipage)) 2812 SetPageUptodate(ipage); 2813 fill_node_footer(ipage, ino, ino, 0, true); 2814 set_cold_node(ipage, false); 2815 2816 src = F2FS_INODE(page); 2817 dst = F2FS_INODE(ipage); 2818 2819 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); 2820 dst->i_size = 0; 2821 dst->i_blocks = cpu_to_le64(1); 2822 dst->i_links = cpu_to_le32(1); 2823 dst->i_xattr_nid = 0; 2824 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2825 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2826 dst->i_extra_isize = src->i_extra_isize; 2827 2828 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2829 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2830 i_inline_xattr_size)) 2831 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2832 2833 if (f2fs_sb_has_project_quota(sbi) && 2834 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2835 i_projid)) 2836 dst->i_projid = src->i_projid; 2837 2838 if (f2fs_sb_has_inode_crtime(sbi) && 2839 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2840 i_crtime_nsec)) { 2841 dst->i_crtime = src->i_crtime; 2842 dst->i_crtime_nsec = src->i_crtime_nsec; 2843 } 2844 } 2845 2846 new_ni = old_ni; 2847 new_ni.ino = ino; 2848 2849 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2850 WARN_ON(1); 2851 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2852 inc_valid_inode_count(sbi); 2853 set_page_dirty(ipage); 2854 f2fs_put_page(ipage, 1); 2855 return 0; 2856 } 2857 2858 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2859 unsigned int segno, struct f2fs_summary_block *sum) 2860 { 2861 struct f2fs_node *rn; 2862 struct f2fs_summary *sum_entry; 2863 block_t addr; 2864 int i, idx, last_offset, nrpages; 2865 2866 /* scan the node segment */ 2867 last_offset = BLKS_PER_SEG(sbi); 2868 addr = START_BLOCK(sbi, segno); 2869 sum_entry = &sum->entries[0]; 2870 2871 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2872 nrpages = bio_max_segs(last_offset - i); 2873 2874 /* readahead node pages */ 2875 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2876 2877 for (idx = addr; idx < addr + nrpages; idx++) { 2878 struct page *page = f2fs_get_tmp_page(sbi, idx); 2879 2880 if (IS_ERR(page)) 2881 return PTR_ERR(page); 2882 2883 rn = F2FS_NODE(page); 2884 sum_entry->nid = rn->footer.nid; 2885 sum_entry->version = 0; 2886 sum_entry->ofs_in_node = 0; 2887 sum_entry++; 2888 f2fs_put_page(page, 1); 2889 } 2890 2891 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2892 addr + nrpages); 2893 } 2894 return 0; 2895 } 2896 2897 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2898 { 2899 struct f2fs_nm_info *nm_i = NM_I(sbi); 2900 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2901 struct f2fs_journal *journal = curseg->journal; 2902 int i; 2903 2904 down_write(&curseg->journal_rwsem); 2905 for (i = 0; i < nats_in_cursum(journal); i++) { 2906 struct nat_entry *ne; 2907 struct f2fs_nat_entry raw_ne; 2908 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2909 2910 if (f2fs_check_nid_range(sbi, nid)) 2911 continue; 2912 2913 raw_ne = nat_in_journal(journal, i); 2914 2915 ne = __lookup_nat_cache(nm_i, nid); 2916 if (!ne) { 2917 ne = __alloc_nat_entry(sbi, nid, true); 2918 __init_nat_entry(nm_i, ne, &raw_ne, true); 2919 } 2920 2921 /* 2922 * if a free nat in journal has not been used after last 2923 * checkpoint, we should remove it from available nids, 2924 * since later we will add it again. 2925 */ 2926 if (!get_nat_flag(ne, IS_DIRTY) && 2927 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2928 spin_lock(&nm_i->nid_list_lock); 2929 nm_i->available_nids--; 2930 spin_unlock(&nm_i->nid_list_lock); 2931 } 2932 2933 __set_nat_cache_dirty(nm_i, ne); 2934 } 2935 update_nats_in_cursum(journal, -i); 2936 up_write(&curseg->journal_rwsem); 2937 } 2938 2939 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2940 struct list_head *head, int max) 2941 { 2942 struct nat_entry_set *cur; 2943 2944 if (nes->entry_cnt >= max) 2945 goto add_out; 2946 2947 list_for_each_entry(cur, head, set_list) { 2948 if (cur->entry_cnt >= nes->entry_cnt) { 2949 list_add(&nes->set_list, cur->set_list.prev); 2950 return; 2951 } 2952 } 2953 add_out: 2954 list_add_tail(&nes->set_list, head); 2955 } 2956 2957 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, 2958 unsigned int valid) 2959 { 2960 if (valid == 0) { 2961 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); 2962 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2963 return; 2964 } 2965 2966 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); 2967 if (valid == NAT_ENTRY_PER_BLOCK) 2968 __set_bit_le(nat_ofs, nm_i->full_nat_bits); 2969 else 2970 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2971 } 2972 2973 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2974 struct page *page) 2975 { 2976 struct f2fs_nm_info *nm_i = NM_I(sbi); 2977 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2978 struct f2fs_nat_block *nat_blk = page_address(page); 2979 int valid = 0; 2980 int i = 0; 2981 2982 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 2983 return; 2984 2985 if (nat_index == 0) { 2986 valid = 1; 2987 i = 1; 2988 } 2989 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2990 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2991 valid++; 2992 } 2993 2994 __update_nat_bits(nm_i, nat_index, valid); 2995 } 2996 2997 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) 2998 { 2999 struct f2fs_nm_info *nm_i = NM_I(sbi); 3000 unsigned int nat_ofs; 3001 3002 f2fs_down_read(&nm_i->nat_tree_lock); 3003 3004 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { 3005 unsigned int valid = 0, nid_ofs = 0; 3006 3007 /* handle nid zero due to it should never be used */ 3008 if (unlikely(nat_ofs == 0)) { 3009 valid = 1; 3010 nid_ofs = 1; 3011 } 3012 3013 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { 3014 if (!test_bit_le(nid_ofs, 3015 nm_i->free_nid_bitmap[nat_ofs])) 3016 valid++; 3017 } 3018 3019 __update_nat_bits(nm_i, nat_ofs, valid); 3020 } 3021 3022 f2fs_up_read(&nm_i->nat_tree_lock); 3023 } 3024 3025 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 3026 struct nat_entry_set *set, struct cp_control *cpc) 3027 { 3028 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3029 struct f2fs_journal *journal = curseg->journal; 3030 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 3031 bool to_journal = true; 3032 struct f2fs_nat_block *nat_blk; 3033 struct nat_entry *ne, *cur; 3034 struct page *page = NULL; 3035 3036 /* 3037 * there are two steps to flush nat entries: 3038 * #1, flush nat entries to journal in current hot data summary block. 3039 * #2, flush nat entries to nat page. 3040 */ 3041 if ((cpc->reason & CP_UMOUNT) || 3042 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 3043 to_journal = false; 3044 3045 if (to_journal) { 3046 down_write(&curseg->journal_rwsem); 3047 } else { 3048 page = get_next_nat_page(sbi, start_nid); 3049 if (IS_ERR(page)) 3050 return PTR_ERR(page); 3051 3052 nat_blk = page_address(page); 3053 f2fs_bug_on(sbi, !nat_blk); 3054 } 3055 3056 /* flush dirty nats in nat entry set */ 3057 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 3058 struct f2fs_nat_entry *raw_ne; 3059 nid_t nid = nat_get_nid(ne); 3060 int offset; 3061 3062 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 3063 3064 if (to_journal) { 3065 offset = f2fs_lookup_journal_in_cursum(journal, 3066 NAT_JOURNAL, nid, 1); 3067 f2fs_bug_on(sbi, offset < 0); 3068 raw_ne = &nat_in_journal(journal, offset); 3069 nid_in_journal(journal, offset) = cpu_to_le32(nid); 3070 } else { 3071 raw_ne = &nat_blk->entries[nid - start_nid]; 3072 } 3073 raw_nat_from_node_info(raw_ne, &ne->ni); 3074 nat_reset_flag(ne); 3075 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 3076 if (nat_get_blkaddr(ne) == NULL_ADDR) { 3077 add_free_nid(sbi, nid, false, true); 3078 } else { 3079 spin_lock(&NM_I(sbi)->nid_list_lock); 3080 update_free_nid_bitmap(sbi, nid, false, false); 3081 spin_unlock(&NM_I(sbi)->nid_list_lock); 3082 } 3083 } 3084 3085 if (to_journal) { 3086 up_write(&curseg->journal_rwsem); 3087 } else { 3088 update_nat_bits(sbi, start_nid, page); 3089 f2fs_put_page(page, 1); 3090 } 3091 3092 /* Allow dirty nats by node block allocation in write_begin */ 3093 if (!set->entry_cnt) { 3094 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 3095 kmem_cache_free(nat_entry_set_slab, set); 3096 } 3097 return 0; 3098 } 3099 3100 /* 3101 * This function is called during the checkpointing process. 3102 */ 3103 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 3104 { 3105 struct f2fs_nm_info *nm_i = NM_I(sbi); 3106 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3107 struct f2fs_journal *journal = curseg->journal; 3108 struct nat_entry_set *setvec[NAT_VEC_SIZE]; 3109 struct nat_entry_set *set, *tmp; 3110 unsigned int found; 3111 nid_t set_idx = 0; 3112 LIST_HEAD(sets); 3113 int err = 0; 3114 3115 /* 3116 * during unmount, let's flush nat_bits before checking 3117 * nat_cnt[DIRTY_NAT]. 3118 */ 3119 if (cpc->reason & CP_UMOUNT) { 3120 f2fs_down_write(&nm_i->nat_tree_lock); 3121 remove_nats_in_journal(sbi); 3122 f2fs_up_write(&nm_i->nat_tree_lock); 3123 } 3124 3125 if (!nm_i->nat_cnt[DIRTY_NAT]) 3126 return 0; 3127 3128 f2fs_down_write(&nm_i->nat_tree_lock); 3129 3130 /* 3131 * if there are no enough space in journal to store dirty nat 3132 * entries, remove all entries from journal and merge them 3133 * into nat entry set. 3134 */ 3135 if (cpc->reason & CP_UMOUNT || 3136 !__has_cursum_space(journal, 3137 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) 3138 remove_nats_in_journal(sbi); 3139 3140 while ((found = __gang_lookup_nat_set(nm_i, 3141 set_idx, NAT_VEC_SIZE, setvec))) { 3142 unsigned idx; 3143 3144 set_idx = setvec[found - 1]->set + 1; 3145 for (idx = 0; idx < found; idx++) 3146 __adjust_nat_entry_set(setvec[idx], &sets, 3147 MAX_NAT_JENTRIES(journal)); 3148 } 3149 3150 /* flush dirty nats in nat entry set */ 3151 list_for_each_entry_safe(set, tmp, &sets, set_list) { 3152 err = __flush_nat_entry_set(sbi, set, cpc); 3153 if (err) 3154 break; 3155 } 3156 3157 f2fs_up_write(&nm_i->nat_tree_lock); 3158 /* Allow dirty nats by node block allocation in write_begin */ 3159 3160 return err; 3161 } 3162 3163 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 3164 { 3165 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3166 struct f2fs_nm_info *nm_i = NM_I(sbi); 3167 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 3168 unsigned int i; 3169 __u64 cp_ver = cur_cp_version(ckpt); 3170 block_t nat_bits_addr; 3171 3172 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 3173 nm_i->nat_bits = f2fs_kvzalloc(sbi, 3174 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); 3175 if (!nm_i->nat_bits) 3176 return -ENOMEM; 3177 3178 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3179 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3180 3181 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3182 return 0; 3183 3184 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - 3185 nm_i->nat_bits_blocks; 3186 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3187 struct page *page; 3188 3189 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3190 if (IS_ERR(page)) 3191 return PTR_ERR(page); 3192 3193 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), 3194 page_address(page), F2FS_BLKSIZE); 3195 f2fs_put_page(page, 1); 3196 } 3197 3198 cp_ver |= (cur_cp_crc(ckpt) << 32); 3199 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3200 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 3201 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", 3202 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); 3203 return 0; 3204 } 3205 3206 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3207 return 0; 3208 } 3209 3210 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3211 { 3212 struct f2fs_nm_info *nm_i = NM_I(sbi); 3213 unsigned int i = 0; 3214 nid_t nid, last_nid; 3215 3216 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3217 return; 3218 3219 for (i = 0; i < nm_i->nat_blocks; i++) { 3220 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3221 if (i >= nm_i->nat_blocks) 3222 break; 3223 3224 __set_bit_le(i, nm_i->nat_block_bitmap); 3225 3226 nid = i * NAT_ENTRY_PER_BLOCK; 3227 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3228 3229 spin_lock(&NM_I(sbi)->nid_list_lock); 3230 for (; nid < last_nid; nid++) 3231 update_free_nid_bitmap(sbi, nid, true, true); 3232 spin_unlock(&NM_I(sbi)->nid_list_lock); 3233 } 3234 3235 for (i = 0; i < nm_i->nat_blocks; i++) { 3236 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3237 if (i >= nm_i->nat_blocks) 3238 break; 3239 3240 __set_bit_le(i, nm_i->nat_block_bitmap); 3241 } 3242 } 3243 3244 static int init_node_manager(struct f2fs_sb_info *sbi) 3245 { 3246 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3247 struct f2fs_nm_info *nm_i = NM_I(sbi); 3248 unsigned char *version_bitmap; 3249 unsigned int nat_segs; 3250 int err; 3251 3252 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3253 3254 /* segment_count_nat includes pair segment so divide to 2. */ 3255 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3256 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3257 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3258 3259 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3260 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3261 F2FS_RESERVED_NODE_NUM; 3262 nm_i->nid_cnt[FREE_NID] = 0; 3263 nm_i->nid_cnt[PREALLOC_NID] = 0; 3264 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3265 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3266 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3267 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; 3268 3269 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3270 INIT_LIST_HEAD(&nm_i->free_nid_list); 3271 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3272 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3273 INIT_LIST_HEAD(&nm_i->nat_entries); 3274 spin_lock_init(&nm_i->nat_list_lock); 3275 3276 mutex_init(&nm_i->build_lock); 3277 spin_lock_init(&nm_i->nid_list_lock); 3278 init_f2fs_rwsem(&nm_i->nat_tree_lock); 3279 3280 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3281 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3282 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3283 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3284 GFP_KERNEL); 3285 if (!nm_i->nat_bitmap) 3286 return -ENOMEM; 3287 3288 err = __get_nat_bitmaps(sbi); 3289 if (err) 3290 return err; 3291 3292 #ifdef CONFIG_F2FS_CHECK_FS 3293 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3294 GFP_KERNEL); 3295 if (!nm_i->nat_bitmap_mir) 3296 return -ENOMEM; 3297 #endif 3298 3299 return 0; 3300 } 3301 3302 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3303 { 3304 struct f2fs_nm_info *nm_i = NM_I(sbi); 3305 int i; 3306 3307 nm_i->free_nid_bitmap = 3308 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3309 nm_i->nat_blocks), 3310 GFP_KERNEL); 3311 if (!nm_i->free_nid_bitmap) 3312 return -ENOMEM; 3313 3314 for (i = 0; i < nm_i->nat_blocks; i++) { 3315 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3316 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3317 if (!nm_i->free_nid_bitmap[i]) 3318 return -ENOMEM; 3319 } 3320 3321 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3322 GFP_KERNEL); 3323 if (!nm_i->nat_block_bitmap) 3324 return -ENOMEM; 3325 3326 nm_i->free_nid_count = 3327 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3328 nm_i->nat_blocks), 3329 GFP_KERNEL); 3330 if (!nm_i->free_nid_count) 3331 return -ENOMEM; 3332 return 0; 3333 } 3334 3335 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3336 { 3337 int err; 3338 3339 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3340 GFP_KERNEL); 3341 if (!sbi->nm_info) 3342 return -ENOMEM; 3343 3344 err = init_node_manager(sbi); 3345 if (err) 3346 return err; 3347 3348 err = init_free_nid_cache(sbi); 3349 if (err) 3350 return err; 3351 3352 /* load free nid status from nat_bits table */ 3353 load_free_nid_bitmap(sbi); 3354 3355 return f2fs_build_free_nids(sbi, true, true); 3356 } 3357 3358 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3359 { 3360 struct f2fs_nm_info *nm_i = NM_I(sbi); 3361 struct free_nid *i, *next_i; 3362 void *vec[NAT_VEC_SIZE]; 3363 struct nat_entry **natvec = (struct nat_entry **)vec; 3364 struct nat_entry_set **setvec = (struct nat_entry_set **)vec; 3365 nid_t nid = 0; 3366 unsigned int found; 3367 3368 if (!nm_i) 3369 return; 3370 3371 /* destroy free nid list */ 3372 spin_lock(&nm_i->nid_list_lock); 3373 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3374 __remove_free_nid(sbi, i, FREE_NID); 3375 spin_unlock(&nm_i->nid_list_lock); 3376 kmem_cache_free(free_nid_slab, i); 3377 spin_lock(&nm_i->nid_list_lock); 3378 } 3379 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3380 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3381 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3382 spin_unlock(&nm_i->nid_list_lock); 3383 3384 /* destroy nat cache */ 3385 f2fs_down_write(&nm_i->nat_tree_lock); 3386 while ((found = __gang_lookup_nat_cache(nm_i, 3387 nid, NAT_VEC_SIZE, natvec))) { 3388 unsigned idx; 3389 3390 nid = nat_get_nid(natvec[found - 1]) + 1; 3391 for (idx = 0; idx < found; idx++) { 3392 spin_lock(&nm_i->nat_list_lock); 3393 list_del(&natvec[idx]->list); 3394 spin_unlock(&nm_i->nat_list_lock); 3395 3396 __del_from_nat_cache(nm_i, natvec[idx]); 3397 } 3398 } 3399 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); 3400 3401 /* destroy nat set cache */ 3402 nid = 0; 3403 memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE); 3404 while ((found = __gang_lookup_nat_set(nm_i, 3405 nid, NAT_VEC_SIZE, setvec))) { 3406 unsigned idx; 3407 3408 nid = setvec[found - 1]->set + 1; 3409 for (idx = 0; idx < found; idx++) { 3410 /* entry_cnt is not zero, when cp_error was occurred */ 3411 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3412 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3413 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3414 } 3415 } 3416 f2fs_up_write(&nm_i->nat_tree_lock); 3417 3418 kvfree(nm_i->nat_block_bitmap); 3419 if (nm_i->free_nid_bitmap) { 3420 int i; 3421 3422 for (i = 0; i < nm_i->nat_blocks; i++) 3423 kvfree(nm_i->free_nid_bitmap[i]); 3424 kvfree(nm_i->free_nid_bitmap); 3425 } 3426 kvfree(nm_i->free_nid_count); 3427 3428 kvfree(nm_i->nat_bitmap); 3429 kvfree(nm_i->nat_bits); 3430 #ifdef CONFIG_F2FS_CHECK_FS 3431 kvfree(nm_i->nat_bitmap_mir); 3432 #endif 3433 sbi->nm_info = NULL; 3434 kfree(nm_i); 3435 } 3436 3437 int __init f2fs_create_node_manager_caches(void) 3438 { 3439 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3440 sizeof(struct nat_entry)); 3441 if (!nat_entry_slab) 3442 goto fail; 3443 3444 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3445 sizeof(struct free_nid)); 3446 if (!free_nid_slab) 3447 goto destroy_nat_entry; 3448 3449 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3450 sizeof(struct nat_entry_set)); 3451 if (!nat_entry_set_slab) 3452 goto destroy_free_nid; 3453 3454 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3455 sizeof(struct fsync_node_entry)); 3456 if (!fsync_node_entry_slab) 3457 goto destroy_nat_entry_set; 3458 return 0; 3459 3460 destroy_nat_entry_set: 3461 kmem_cache_destroy(nat_entry_set_slab); 3462 destroy_free_nid: 3463 kmem_cache_destroy(free_nid_slab); 3464 destroy_nat_entry: 3465 kmem_cache_destroy(nat_entry_slab); 3466 fail: 3467 return -ENOMEM; 3468 } 3469 3470 void f2fs_destroy_node_manager_caches(void) 3471 { 3472 kmem_cache_destroy(fsync_node_entry_slab); 3473 kmem_cache_destroy(nat_entry_set_slab); 3474 kmem_cache_destroy(free_nid_slab); 3475 kmem_cache_destroy(nat_entry_slab); 3476 } 3477