1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include "trace.h" 23 #include <trace/events/f2fs.h> 24 25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 26 27 static struct kmem_cache *nat_entry_slab; 28 static struct kmem_cache *free_nid_slab; 29 static struct kmem_cache *nat_entry_set_slab; 30 31 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 32 { 33 struct f2fs_nm_info *nm_i = NM_I(sbi); 34 struct sysinfo val; 35 unsigned long avail_ram; 36 unsigned long mem_size = 0; 37 bool res = false; 38 39 si_meminfo(&val); 40 41 /* only uses low memory */ 42 avail_ram = val.totalram - val.totalhigh; 43 44 /* 45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively 46 */ 47 if (type == FREE_NIDS) { 48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 49 PAGE_CACHE_SHIFT; 50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 51 } else if (type == NAT_ENTRIES) { 52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 53 PAGE_CACHE_SHIFT; 54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 55 } else if (type == DIRTY_DENTS) { 56 if (sbi->sb->s_bdi->wb.dirty_exceeded) 57 return false; 58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 59 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 60 } else if (type == INO_ENTRIES) { 61 int i; 62 63 for (i = 0; i <= UPDATE_INO; i++) 64 mem_size += (sbi->im[i].ino_num * 65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 67 } else if (type == EXTENT_CACHE) { 68 mem_size = (atomic_read(&sbi->total_ext_tree) * 69 sizeof(struct extent_tree) + 70 atomic_read(&sbi->total_ext_node) * 71 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 73 } else { 74 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 75 return true; 76 } 77 return res; 78 } 79 80 static void clear_node_page_dirty(struct page *page) 81 { 82 struct address_space *mapping = page->mapping; 83 unsigned int long flags; 84 85 if (PageDirty(page)) { 86 spin_lock_irqsave(&mapping->tree_lock, flags); 87 radix_tree_tag_clear(&mapping->page_tree, 88 page_index(page), 89 PAGECACHE_TAG_DIRTY); 90 spin_unlock_irqrestore(&mapping->tree_lock, flags); 91 92 clear_page_dirty_for_io(page); 93 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 94 } 95 ClearPageUptodate(page); 96 } 97 98 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 99 { 100 pgoff_t index = current_nat_addr(sbi, nid); 101 return get_meta_page(sbi, index); 102 } 103 104 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 105 { 106 struct page *src_page; 107 struct page *dst_page; 108 pgoff_t src_off; 109 pgoff_t dst_off; 110 void *src_addr; 111 void *dst_addr; 112 struct f2fs_nm_info *nm_i = NM_I(sbi); 113 114 src_off = current_nat_addr(sbi, nid); 115 dst_off = next_nat_addr(sbi, src_off); 116 117 /* get current nat block page with lock */ 118 src_page = get_meta_page(sbi, src_off); 119 dst_page = grab_meta_page(sbi, dst_off); 120 f2fs_bug_on(sbi, PageDirty(src_page)); 121 122 src_addr = page_address(src_page); 123 dst_addr = page_address(dst_page); 124 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 125 set_page_dirty(dst_page); 126 f2fs_put_page(src_page, 1); 127 128 set_to_next_nat(nm_i, nid); 129 130 return dst_page; 131 } 132 133 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 134 { 135 return radix_tree_lookup(&nm_i->nat_root, n); 136 } 137 138 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 139 nid_t start, unsigned int nr, struct nat_entry **ep) 140 { 141 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 142 } 143 144 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 145 { 146 list_del(&e->list); 147 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 148 nm_i->nat_cnt--; 149 kmem_cache_free(nat_entry_slab, e); 150 } 151 152 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 153 struct nat_entry *ne) 154 { 155 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 156 struct nat_entry_set *head; 157 158 if (get_nat_flag(ne, IS_DIRTY)) 159 return; 160 161 head = radix_tree_lookup(&nm_i->nat_set_root, set); 162 if (!head) { 163 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); 164 165 INIT_LIST_HEAD(&head->entry_list); 166 INIT_LIST_HEAD(&head->set_list); 167 head->set = set; 168 head->entry_cnt = 0; 169 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 170 } 171 list_move_tail(&ne->list, &head->entry_list); 172 nm_i->dirty_nat_cnt++; 173 head->entry_cnt++; 174 set_nat_flag(ne, IS_DIRTY, true); 175 } 176 177 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 178 struct nat_entry *ne) 179 { 180 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 181 struct nat_entry_set *head; 182 183 head = radix_tree_lookup(&nm_i->nat_set_root, set); 184 if (head) { 185 list_move_tail(&ne->list, &nm_i->nat_entries); 186 set_nat_flag(ne, IS_DIRTY, false); 187 head->entry_cnt--; 188 nm_i->dirty_nat_cnt--; 189 } 190 } 191 192 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 193 nid_t start, unsigned int nr, struct nat_entry_set **ep) 194 { 195 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 196 start, nr); 197 } 198 199 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 200 { 201 struct f2fs_nm_info *nm_i = NM_I(sbi); 202 struct nat_entry *e; 203 bool need = false; 204 205 down_read(&nm_i->nat_tree_lock); 206 e = __lookup_nat_cache(nm_i, nid); 207 if (e) { 208 if (!get_nat_flag(e, IS_CHECKPOINTED) && 209 !get_nat_flag(e, HAS_FSYNCED_INODE)) 210 need = true; 211 } 212 up_read(&nm_i->nat_tree_lock); 213 return need; 214 } 215 216 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 217 { 218 struct f2fs_nm_info *nm_i = NM_I(sbi); 219 struct nat_entry *e; 220 bool is_cp = true; 221 222 down_read(&nm_i->nat_tree_lock); 223 e = __lookup_nat_cache(nm_i, nid); 224 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 225 is_cp = false; 226 up_read(&nm_i->nat_tree_lock); 227 return is_cp; 228 } 229 230 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 231 { 232 struct f2fs_nm_info *nm_i = NM_I(sbi); 233 struct nat_entry *e; 234 bool need_update = true; 235 236 down_read(&nm_i->nat_tree_lock); 237 e = __lookup_nat_cache(nm_i, ino); 238 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 239 (get_nat_flag(e, IS_CHECKPOINTED) || 240 get_nat_flag(e, HAS_FSYNCED_INODE))) 241 need_update = false; 242 up_read(&nm_i->nat_tree_lock); 243 return need_update; 244 } 245 246 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 247 { 248 struct nat_entry *new; 249 250 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); 251 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); 252 memset(new, 0, sizeof(struct nat_entry)); 253 nat_set_nid(new, nid); 254 nat_reset_flag(new); 255 list_add_tail(&new->list, &nm_i->nat_entries); 256 nm_i->nat_cnt++; 257 return new; 258 } 259 260 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 261 struct f2fs_nat_entry *ne) 262 { 263 struct nat_entry *e; 264 265 e = __lookup_nat_cache(nm_i, nid); 266 if (!e) { 267 e = grab_nat_entry(nm_i, nid); 268 node_info_from_raw_nat(&e->ni, ne); 269 } 270 } 271 272 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 273 block_t new_blkaddr, bool fsync_done) 274 { 275 struct f2fs_nm_info *nm_i = NM_I(sbi); 276 struct nat_entry *e; 277 278 down_write(&nm_i->nat_tree_lock); 279 e = __lookup_nat_cache(nm_i, ni->nid); 280 if (!e) { 281 e = grab_nat_entry(nm_i, ni->nid); 282 copy_node_info(&e->ni, ni); 283 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 284 } else if (new_blkaddr == NEW_ADDR) { 285 /* 286 * when nid is reallocated, 287 * previous nat entry can be remained in nat cache. 288 * So, reinitialize it with new information. 289 */ 290 copy_node_info(&e->ni, ni); 291 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 292 } 293 294 /* sanity check */ 295 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 296 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 297 new_blkaddr == NULL_ADDR); 298 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 299 new_blkaddr == NEW_ADDR); 300 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 301 nat_get_blkaddr(e) != NULL_ADDR && 302 new_blkaddr == NEW_ADDR); 303 304 /* increment version no as node is removed */ 305 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 306 unsigned char version = nat_get_version(e); 307 nat_set_version(e, inc_node_version(version)); 308 309 /* in order to reuse the nid */ 310 if (nm_i->next_scan_nid > ni->nid) 311 nm_i->next_scan_nid = ni->nid; 312 } 313 314 /* change address */ 315 nat_set_blkaddr(e, new_blkaddr); 316 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 317 set_nat_flag(e, IS_CHECKPOINTED, false); 318 __set_nat_cache_dirty(nm_i, e); 319 320 /* update fsync_mark if its inode nat entry is still alive */ 321 if (ni->nid != ni->ino) 322 e = __lookup_nat_cache(nm_i, ni->ino); 323 if (e) { 324 if (fsync_done && ni->nid == ni->ino) 325 set_nat_flag(e, HAS_FSYNCED_INODE, true); 326 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 327 } 328 up_write(&nm_i->nat_tree_lock); 329 } 330 331 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 332 { 333 struct f2fs_nm_info *nm_i = NM_I(sbi); 334 int nr = nr_shrink; 335 336 if (!down_write_trylock(&nm_i->nat_tree_lock)) 337 return 0; 338 339 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 340 struct nat_entry *ne; 341 ne = list_first_entry(&nm_i->nat_entries, 342 struct nat_entry, list); 343 __del_from_nat_cache(nm_i, ne); 344 nr_shrink--; 345 } 346 up_write(&nm_i->nat_tree_lock); 347 return nr - nr_shrink; 348 } 349 350 /* 351 * This function always returns success 352 */ 353 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 354 { 355 struct f2fs_nm_info *nm_i = NM_I(sbi); 356 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 357 struct f2fs_summary_block *sum = curseg->sum_blk; 358 nid_t start_nid = START_NID(nid); 359 struct f2fs_nat_block *nat_blk; 360 struct page *page = NULL; 361 struct f2fs_nat_entry ne; 362 struct nat_entry *e; 363 int i; 364 365 ni->nid = nid; 366 367 /* Check nat cache */ 368 down_read(&nm_i->nat_tree_lock); 369 e = __lookup_nat_cache(nm_i, nid); 370 if (e) { 371 ni->ino = nat_get_ino(e); 372 ni->blk_addr = nat_get_blkaddr(e); 373 ni->version = nat_get_version(e); 374 } 375 up_read(&nm_i->nat_tree_lock); 376 if (e) 377 return; 378 379 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 380 381 down_write(&nm_i->nat_tree_lock); 382 383 /* Check current segment summary */ 384 mutex_lock(&curseg->curseg_mutex); 385 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 386 if (i >= 0) { 387 ne = nat_in_journal(sum, i); 388 node_info_from_raw_nat(ni, &ne); 389 } 390 mutex_unlock(&curseg->curseg_mutex); 391 if (i >= 0) 392 goto cache; 393 394 /* Fill node_info from nat page */ 395 page = get_current_nat_page(sbi, start_nid); 396 nat_blk = (struct f2fs_nat_block *)page_address(page); 397 ne = nat_blk->entries[nid - start_nid]; 398 node_info_from_raw_nat(ni, &ne); 399 f2fs_put_page(page, 1); 400 cache: 401 /* cache nat entry */ 402 cache_nat_entry(NM_I(sbi), nid, &ne); 403 up_write(&nm_i->nat_tree_lock); 404 } 405 406 /* 407 * The maximum depth is four. 408 * Offset[0] will have raw inode offset. 409 */ 410 static int get_node_path(struct f2fs_inode_info *fi, long block, 411 int offset[4], unsigned int noffset[4]) 412 { 413 const long direct_index = ADDRS_PER_INODE(fi); 414 const long direct_blks = ADDRS_PER_BLOCK; 415 const long dptrs_per_blk = NIDS_PER_BLOCK; 416 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 417 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 418 int n = 0; 419 int level = 0; 420 421 noffset[0] = 0; 422 423 if (block < direct_index) { 424 offset[n] = block; 425 goto got; 426 } 427 block -= direct_index; 428 if (block < direct_blks) { 429 offset[n++] = NODE_DIR1_BLOCK; 430 noffset[n] = 1; 431 offset[n] = block; 432 level = 1; 433 goto got; 434 } 435 block -= direct_blks; 436 if (block < direct_blks) { 437 offset[n++] = NODE_DIR2_BLOCK; 438 noffset[n] = 2; 439 offset[n] = block; 440 level = 1; 441 goto got; 442 } 443 block -= direct_blks; 444 if (block < indirect_blks) { 445 offset[n++] = NODE_IND1_BLOCK; 446 noffset[n] = 3; 447 offset[n++] = block / direct_blks; 448 noffset[n] = 4 + offset[n - 1]; 449 offset[n] = block % direct_blks; 450 level = 2; 451 goto got; 452 } 453 block -= indirect_blks; 454 if (block < indirect_blks) { 455 offset[n++] = NODE_IND2_BLOCK; 456 noffset[n] = 4 + dptrs_per_blk; 457 offset[n++] = block / direct_blks; 458 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 459 offset[n] = block % direct_blks; 460 level = 2; 461 goto got; 462 } 463 block -= indirect_blks; 464 if (block < dindirect_blks) { 465 offset[n++] = NODE_DIND_BLOCK; 466 noffset[n] = 5 + (dptrs_per_blk * 2); 467 offset[n++] = block / indirect_blks; 468 noffset[n] = 6 + (dptrs_per_blk * 2) + 469 offset[n - 1] * (dptrs_per_blk + 1); 470 offset[n++] = (block / direct_blks) % dptrs_per_blk; 471 noffset[n] = 7 + (dptrs_per_blk * 2) + 472 offset[n - 2] * (dptrs_per_blk + 1) + 473 offset[n - 1]; 474 offset[n] = block % direct_blks; 475 level = 3; 476 goto got; 477 } else { 478 BUG(); 479 } 480 got: 481 return level; 482 } 483 484 /* 485 * Caller should call f2fs_put_dnode(dn). 486 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 487 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 488 * In the case of RDONLY_NODE, we don't need to care about mutex. 489 */ 490 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 491 { 492 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 493 struct page *npage[4]; 494 struct page *parent = NULL; 495 int offset[4]; 496 unsigned int noffset[4]; 497 nid_t nids[4]; 498 int level, i; 499 int err = 0; 500 501 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 502 503 nids[0] = dn->inode->i_ino; 504 npage[0] = dn->inode_page; 505 506 if (!npage[0]) { 507 npage[0] = get_node_page(sbi, nids[0]); 508 if (IS_ERR(npage[0])) 509 return PTR_ERR(npage[0]); 510 } 511 512 /* if inline_data is set, should not report any block indices */ 513 if (f2fs_has_inline_data(dn->inode) && index) { 514 err = -ENOENT; 515 f2fs_put_page(npage[0], 1); 516 goto release_out; 517 } 518 519 parent = npage[0]; 520 if (level != 0) 521 nids[1] = get_nid(parent, offset[0], true); 522 dn->inode_page = npage[0]; 523 dn->inode_page_locked = true; 524 525 /* get indirect or direct nodes */ 526 for (i = 1; i <= level; i++) { 527 bool done = false; 528 529 if (!nids[i] && mode == ALLOC_NODE) { 530 /* alloc new node */ 531 if (!alloc_nid(sbi, &(nids[i]))) { 532 err = -ENOSPC; 533 goto release_pages; 534 } 535 536 dn->nid = nids[i]; 537 npage[i] = new_node_page(dn, noffset[i], NULL); 538 if (IS_ERR(npage[i])) { 539 alloc_nid_failed(sbi, nids[i]); 540 err = PTR_ERR(npage[i]); 541 goto release_pages; 542 } 543 544 set_nid(parent, offset[i - 1], nids[i], i == 1); 545 alloc_nid_done(sbi, nids[i]); 546 done = true; 547 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 548 npage[i] = get_node_page_ra(parent, offset[i - 1]); 549 if (IS_ERR(npage[i])) { 550 err = PTR_ERR(npage[i]); 551 goto release_pages; 552 } 553 done = true; 554 } 555 if (i == 1) { 556 dn->inode_page_locked = false; 557 unlock_page(parent); 558 } else { 559 f2fs_put_page(parent, 1); 560 } 561 562 if (!done) { 563 npage[i] = get_node_page(sbi, nids[i]); 564 if (IS_ERR(npage[i])) { 565 err = PTR_ERR(npage[i]); 566 f2fs_put_page(npage[0], 0); 567 goto release_out; 568 } 569 } 570 if (i < level) { 571 parent = npage[i]; 572 nids[i + 1] = get_nid(parent, offset[i], false); 573 } 574 } 575 dn->nid = nids[level]; 576 dn->ofs_in_node = offset[level]; 577 dn->node_page = npage[level]; 578 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 579 return 0; 580 581 release_pages: 582 f2fs_put_page(parent, 1); 583 if (i > 1) 584 f2fs_put_page(npage[0], 0); 585 release_out: 586 dn->inode_page = NULL; 587 dn->node_page = NULL; 588 return err; 589 } 590 591 static void truncate_node(struct dnode_of_data *dn) 592 { 593 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 594 struct node_info ni; 595 596 get_node_info(sbi, dn->nid, &ni); 597 if (dn->inode->i_blocks == 0) { 598 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); 599 goto invalidate; 600 } 601 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 602 603 /* Deallocate node address */ 604 invalidate_blocks(sbi, ni.blk_addr); 605 dec_valid_node_count(sbi, dn->inode); 606 set_node_addr(sbi, &ni, NULL_ADDR, false); 607 608 if (dn->nid == dn->inode->i_ino) { 609 remove_orphan_inode(sbi, dn->nid); 610 dec_valid_inode_count(sbi); 611 } else { 612 sync_inode_page(dn); 613 } 614 invalidate: 615 clear_node_page_dirty(dn->node_page); 616 set_sbi_flag(sbi, SBI_IS_DIRTY); 617 618 f2fs_put_page(dn->node_page, 1); 619 620 invalidate_mapping_pages(NODE_MAPPING(sbi), 621 dn->node_page->index, dn->node_page->index); 622 623 dn->node_page = NULL; 624 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 625 } 626 627 static int truncate_dnode(struct dnode_of_data *dn) 628 { 629 struct page *page; 630 631 if (dn->nid == 0) 632 return 1; 633 634 /* get direct node */ 635 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 636 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 637 return 1; 638 else if (IS_ERR(page)) 639 return PTR_ERR(page); 640 641 /* Make dnode_of_data for parameter */ 642 dn->node_page = page; 643 dn->ofs_in_node = 0; 644 truncate_data_blocks(dn); 645 truncate_node(dn); 646 return 1; 647 } 648 649 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 650 int ofs, int depth) 651 { 652 struct dnode_of_data rdn = *dn; 653 struct page *page; 654 struct f2fs_node *rn; 655 nid_t child_nid; 656 unsigned int child_nofs; 657 int freed = 0; 658 int i, ret; 659 660 if (dn->nid == 0) 661 return NIDS_PER_BLOCK + 1; 662 663 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 664 665 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 666 if (IS_ERR(page)) { 667 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 668 return PTR_ERR(page); 669 } 670 671 rn = F2FS_NODE(page); 672 if (depth < 3) { 673 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 674 child_nid = le32_to_cpu(rn->in.nid[i]); 675 if (child_nid == 0) 676 continue; 677 rdn.nid = child_nid; 678 ret = truncate_dnode(&rdn); 679 if (ret < 0) 680 goto out_err; 681 if (set_nid(page, i, 0, false)) 682 dn->node_changed = true; 683 } 684 } else { 685 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 686 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 687 child_nid = le32_to_cpu(rn->in.nid[i]); 688 if (child_nid == 0) { 689 child_nofs += NIDS_PER_BLOCK + 1; 690 continue; 691 } 692 rdn.nid = child_nid; 693 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 694 if (ret == (NIDS_PER_BLOCK + 1)) { 695 if (set_nid(page, i, 0, false)) 696 dn->node_changed = true; 697 child_nofs += ret; 698 } else if (ret < 0 && ret != -ENOENT) { 699 goto out_err; 700 } 701 } 702 freed = child_nofs; 703 } 704 705 if (!ofs) { 706 /* remove current indirect node */ 707 dn->node_page = page; 708 truncate_node(dn); 709 freed++; 710 } else { 711 f2fs_put_page(page, 1); 712 } 713 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 714 return freed; 715 716 out_err: 717 f2fs_put_page(page, 1); 718 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 719 return ret; 720 } 721 722 static int truncate_partial_nodes(struct dnode_of_data *dn, 723 struct f2fs_inode *ri, int *offset, int depth) 724 { 725 struct page *pages[2]; 726 nid_t nid[3]; 727 nid_t child_nid; 728 int err = 0; 729 int i; 730 int idx = depth - 2; 731 732 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 733 if (!nid[0]) 734 return 0; 735 736 /* get indirect nodes in the path */ 737 for (i = 0; i < idx + 1; i++) { 738 /* reference count'll be increased */ 739 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 740 if (IS_ERR(pages[i])) { 741 err = PTR_ERR(pages[i]); 742 idx = i - 1; 743 goto fail; 744 } 745 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 746 } 747 748 /* free direct nodes linked to a partial indirect node */ 749 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 750 child_nid = get_nid(pages[idx], i, false); 751 if (!child_nid) 752 continue; 753 dn->nid = child_nid; 754 err = truncate_dnode(dn); 755 if (err < 0) 756 goto fail; 757 if (set_nid(pages[idx], i, 0, false)) 758 dn->node_changed = true; 759 } 760 761 if (offset[idx + 1] == 0) { 762 dn->node_page = pages[idx]; 763 dn->nid = nid[idx]; 764 truncate_node(dn); 765 } else { 766 f2fs_put_page(pages[idx], 1); 767 } 768 offset[idx]++; 769 offset[idx + 1] = 0; 770 idx--; 771 fail: 772 for (i = idx; i >= 0; i--) 773 f2fs_put_page(pages[i], 1); 774 775 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 776 777 return err; 778 } 779 780 /* 781 * All the block addresses of data and nodes should be nullified. 782 */ 783 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 784 { 785 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 786 int err = 0, cont = 1; 787 int level, offset[4], noffset[4]; 788 unsigned int nofs = 0; 789 struct f2fs_inode *ri; 790 struct dnode_of_data dn; 791 struct page *page; 792 793 trace_f2fs_truncate_inode_blocks_enter(inode, from); 794 795 level = get_node_path(F2FS_I(inode), from, offset, noffset); 796 restart: 797 page = get_node_page(sbi, inode->i_ino); 798 if (IS_ERR(page)) { 799 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 800 return PTR_ERR(page); 801 } 802 803 set_new_dnode(&dn, inode, page, NULL, 0); 804 unlock_page(page); 805 806 ri = F2FS_INODE(page); 807 switch (level) { 808 case 0: 809 case 1: 810 nofs = noffset[1]; 811 break; 812 case 2: 813 nofs = noffset[1]; 814 if (!offset[level - 1]) 815 goto skip_partial; 816 err = truncate_partial_nodes(&dn, ri, offset, level); 817 if (err < 0 && err != -ENOENT) 818 goto fail; 819 nofs += 1 + NIDS_PER_BLOCK; 820 break; 821 case 3: 822 nofs = 5 + 2 * NIDS_PER_BLOCK; 823 if (!offset[level - 1]) 824 goto skip_partial; 825 err = truncate_partial_nodes(&dn, ri, offset, level); 826 if (err < 0 && err != -ENOENT) 827 goto fail; 828 break; 829 default: 830 BUG(); 831 } 832 833 skip_partial: 834 while (cont) { 835 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 836 switch (offset[0]) { 837 case NODE_DIR1_BLOCK: 838 case NODE_DIR2_BLOCK: 839 err = truncate_dnode(&dn); 840 break; 841 842 case NODE_IND1_BLOCK: 843 case NODE_IND2_BLOCK: 844 err = truncate_nodes(&dn, nofs, offset[1], 2); 845 break; 846 847 case NODE_DIND_BLOCK: 848 err = truncate_nodes(&dn, nofs, offset[1], 3); 849 cont = 0; 850 break; 851 852 default: 853 BUG(); 854 } 855 if (err < 0 && err != -ENOENT) 856 goto fail; 857 if (offset[1] == 0 && 858 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 859 lock_page(page); 860 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 861 f2fs_put_page(page, 1); 862 goto restart; 863 } 864 f2fs_wait_on_page_writeback(page, NODE); 865 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 866 set_page_dirty(page); 867 unlock_page(page); 868 } 869 offset[1] = 0; 870 offset[0]++; 871 nofs += err; 872 } 873 fail: 874 f2fs_put_page(page, 0); 875 trace_f2fs_truncate_inode_blocks_exit(inode, err); 876 return err > 0 ? 0 : err; 877 } 878 879 int truncate_xattr_node(struct inode *inode, struct page *page) 880 { 881 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 882 nid_t nid = F2FS_I(inode)->i_xattr_nid; 883 struct dnode_of_data dn; 884 struct page *npage; 885 886 if (!nid) 887 return 0; 888 889 npage = get_node_page(sbi, nid); 890 if (IS_ERR(npage)) 891 return PTR_ERR(npage); 892 893 F2FS_I(inode)->i_xattr_nid = 0; 894 895 /* need to do checkpoint during fsync */ 896 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 897 898 set_new_dnode(&dn, inode, page, npage, nid); 899 900 if (page) 901 dn.inode_page_locked = true; 902 truncate_node(&dn); 903 return 0; 904 } 905 906 /* 907 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 908 * f2fs_unlock_op(). 909 */ 910 int remove_inode_page(struct inode *inode) 911 { 912 struct dnode_of_data dn; 913 int err; 914 915 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 916 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 917 if (err) 918 return err; 919 920 err = truncate_xattr_node(inode, dn.inode_page); 921 if (err) { 922 f2fs_put_dnode(&dn); 923 return err; 924 } 925 926 /* remove potential inline_data blocks */ 927 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 928 S_ISLNK(inode->i_mode)) 929 truncate_data_blocks_range(&dn, 1); 930 931 /* 0 is possible, after f2fs_new_inode() has failed */ 932 f2fs_bug_on(F2FS_I_SB(inode), 933 inode->i_blocks != 0 && inode->i_blocks != 1); 934 935 /* will put inode & node pages */ 936 truncate_node(&dn); 937 return 0; 938 } 939 940 struct page *new_inode_page(struct inode *inode) 941 { 942 struct dnode_of_data dn; 943 944 /* allocate inode page for new inode */ 945 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 946 947 /* caller should f2fs_put_page(page, 1); */ 948 return new_node_page(&dn, 0, NULL); 949 } 950 951 struct page *new_node_page(struct dnode_of_data *dn, 952 unsigned int ofs, struct page *ipage) 953 { 954 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 955 struct node_info old_ni, new_ni; 956 struct page *page; 957 int err; 958 959 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 960 return ERR_PTR(-EPERM); 961 962 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 963 if (!page) 964 return ERR_PTR(-ENOMEM); 965 966 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 967 err = -ENOSPC; 968 goto fail; 969 } 970 971 get_node_info(sbi, dn->nid, &old_ni); 972 973 /* Reinitialize old_ni with new node page */ 974 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); 975 new_ni = old_ni; 976 new_ni.ino = dn->inode->i_ino; 977 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 978 979 f2fs_wait_on_page_writeback(page, NODE); 980 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 981 set_cold_node(dn->inode, page); 982 SetPageUptodate(page); 983 if (set_page_dirty(page)) 984 dn->node_changed = true; 985 986 if (f2fs_has_xattr_block(ofs)) 987 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 988 989 dn->node_page = page; 990 if (ipage) 991 update_inode(dn->inode, ipage); 992 else 993 sync_inode_page(dn); 994 if (ofs == 0) 995 inc_valid_inode_count(sbi); 996 997 return page; 998 999 fail: 1000 clear_node_page_dirty(page); 1001 f2fs_put_page(page, 1); 1002 return ERR_PTR(err); 1003 } 1004 1005 /* 1006 * Caller should do after getting the following values. 1007 * 0: f2fs_put_page(page, 0) 1008 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1009 */ 1010 static int read_node_page(struct page *page, int rw) 1011 { 1012 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1013 struct node_info ni; 1014 struct f2fs_io_info fio = { 1015 .sbi = sbi, 1016 .type = NODE, 1017 .rw = rw, 1018 .page = page, 1019 .encrypted_page = NULL, 1020 }; 1021 1022 get_node_info(sbi, page->index, &ni); 1023 1024 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1025 ClearPageUptodate(page); 1026 return -ENOENT; 1027 } 1028 1029 if (PageUptodate(page)) 1030 return LOCKED_PAGE; 1031 1032 fio.blk_addr = ni.blk_addr; 1033 return f2fs_submit_page_bio(&fio); 1034 } 1035 1036 /* 1037 * Readahead a node page 1038 */ 1039 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1040 { 1041 struct page *apage; 1042 int err; 1043 1044 if (!nid) 1045 return; 1046 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1047 1048 apage = find_get_page(NODE_MAPPING(sbi), nid); 1049 if (apage && PageUptodate(apage)) { 1050 f2fs_put_page(apage, 0); 1051 return; 1052 } 1053 f2fs_put_page(apage, 0); 1054 1055 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 1056 if (!apage) 1057 return; 1058 1059 err = read_node_page(apage, READA); 1060 f2fs_put_page(apage, err ? 1 : 0); 1061 } 1062 1063 /* 1064 * readahead MAX_RA_NODE number of node pages. 1065 */ 1066 void ra_node_pages(struct page *parent, int start) 1067 { 1068 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1069 struct blk_plug plug; 1070 int i, end; 1071 nid_t nid; 1072 1073 blk_start_plug(&plug); 1074 1075 /* Then, try readahead for siblings of the desired node */ 1076 end = start + MAX_RA_NODE; 1077 end = min(end, NIDS_PER_BLOCK); 1078 for (i = start; i < end; i++) { 1079 nid = get_nid(parent, i, false); 1080 ra_node_page(sbi, nid); 1081 } 1082 1083 blk_finish_plug(&plug); 1084 } 1085 1086 struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1087 struct page *parent, int start) 1088 { 1089 struct page *page; 1090 int err; 1091 1092 if (!nid) 1093 return ERR_PTR(-ENOENT); 1094 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1095 repeat: 1096 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1097 if (!page) 1098 return ERR_PTR(-ENOMEM); 1099 1100 err = read_node_page(page, READ_SYNC); 1101 if (err < 0) { 1102 f2fs_put_page(page, 1); 1103 return ERR_PTR(err); 1104 } else if (err == LOCKED_PAGE) { 1105 goto page_hit; 1106 } 1107 1108 if (parent) 1109 ra_node_pages(parent, start + 1); 1110 1111 lock_page(page); 1112 1113 if (unlikely(!PageUptodate(page))) { 1114 f2fs_put_page(page, 1); 1115 return ERR_PTR(-EIO); 1116 } 1117 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1118 f2fs_put_page(page, 1); 1119 goto repeat; 1120 } 1121 page_hit: 1122 f2fs_bug_on(sbi, nid != nid_of_node(page)); 1123 return page; 1124 } 1125 1126 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1127 { 1128 return __get_node_page(sbi, nid, NULL, 0); 1129 } 1130 1131 struct page *get_node_page_ra(struct page *parent, int start) 1132 { 1133 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1134 nid_t nid = get_nid(parent, start, false); 1135 1136 return __get_node_page(sbi, nid, parent, start); 1137 } 1138 1139 void sync_inode_page(struct dnode_of_data *dn) 1140 { 1141 int ret = 0; 1142 1143 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1144 ret = update_inode(dn->inode, dn->node_page); 1145 } else if (dn->inode_page) { 1146 if (!dn->inode_page_locked) 1147 lock_page(dn->inode_page); 1148 ret = update_inode(dn->inode, dn->inode_page); 1149 if (!dn->inode_page_locked) 1150 unlock_page(dn->inode_page); 1151 } else { 1152 ret = update_inode_page(dn->inode); 1153 } 1154 dn->node_changed = ret ? true: false; 1155 } 1156 1157 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1158 struct writeback_control *wbc) 1159 { 1160 pgoff_t index, end; 1161 struct pagevec pvec; 1162 int step = ino ? 2 : 0; 1163 int nwritten = 0, wrote = 0; 1164 1165 pagevec_init(&pvec, 0); 1166 1167 next_step: 1168 index = 0; 1169 end = LONG_MAX; 1170 1171 while (index <= end) { 1172 int i, nr_pages; 1173 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1174 PAGECACHE_TAG_DIRTY, 1175 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1176 if (nr_pages == 0) 1177 break; 1178 1179 for (i = 0; i < nr_pages; i++) { 1180 struct page *page = pvec.pages[i]; 1181 1182 if (unlikely(f2fs_cp_error(sbi))) { 1183 pagevec_release(&pvec); 1184 return -EIO; 1185 } 1186 1187 /* 1188 * flushing sequence with step: 1189 * 0. indirect nodes 1190 * 1. dentry dnodes 1191 * 2. file dnodes 1192 */ 1193 if (step == 0 && IS_DNODE(page)) 1194 continue; 1195 if (step == 1 && (!IS_DNODE(page) || 1196 is_cold_node(page))) 1197 continue; 1198 if (step == 2 && (!IS_DNODE(page) || 1199 !is_cold_node(page))) 1200 continue; 1201 1202 /* 1203 * If an fsync mode, 1204 * we should not skip writing node pages. 1205 */ 1206 if (ino && ino_of_node(page) == ino) 1207 lock_page(page); 1208 else if (!trylock_page(page)) 1209 continue; 1210 1211 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1212 continue_unlock: 1213 unlock_page(page); 1214 continue; 1215 } 1216 if (ino && ino_of_node(page) != ino) 1217 goto continue_unlock; 1218 1219 if (!PageDirty(page)) { 1220 /* someone wrote it for us */ 1221 goto continue_unlock; 1222 } 1223 1224 if (!clear_page_dirty_for_io(page)) 1225 goto continue_unlock; 1226 1227 /* called by fsync() */ 1228 if (ino && IS_DNODE(page)) { 1229 set_fsync_mark(page, 1); 1230 if (IS_INODE(page)) 1231 set_dentry_mark(page, 1232 need_dentry_mark(sbi, ino)); 1233 nwritten++; 1234 } else { 1235 set_fsync_mark(page, 0); 1236 set_dentry_mark(page, 0); 1237 } 1238 1239 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1240 unlock_page(page); 1241 else 1242 wrote++; 1243 1244 if (--wbc->nr_to_write == 0) 1245 break; 1246 } 1247 pagevec_release(&pvec); 1248 cond_resched(); 1249 1250 if (wbc->nr_to_write == 0) { 1251 step = 2; 1252 break; 1253 } 1254 } 1255 1256 if (step < 2) { 1257 step++; 1258 goto next_step; 1259 } 1260 1261 if (wrote) 1262 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1263 return nwritten; 1264 } 1265 1266 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1267 { 1268 pgoff_t index = 0, end = LONG_MAX; 1269 struct pagevec pvec; 1270 int ret2 = 0, ret = 0; 1271 1272 pagevec_init(&pvec, 0); 1273 1274 while (index <= end) { 1275 int i, nr_pages; 1276 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1277 PAGECACHE_TAG_WRITEBACK, 1278 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1279 if (nr_pages == 0) 1280 break; 1281 1282 for (i = 0; i < nr_pages; i++) { 1283 struct page *page = pvec.pages[i]; 1284 1285 /* until radix tree lookup accepts end_index */ 1286 if (unlikely(page->index > end)) 1287 continue; 1288 1289 if (ino && ino_of_node(page) == ino) { 1290 f2fs_wait_on_page_writeback(page, NODE); 1291 if (TestClearPageError(page)) 1292 ret = -EIO; 1293 } 1294 } 1295 pagevec_release(&pvec); 1296 cond_resched(); 1297 } 1298 1299 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1300 ret2 = -ENOSPC; 1301 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1302 ret2 = -EIO; 1303 if (!ret) 1304 ret = ret2; 1305 return ret; 1306 } 1307 1308 static int f2fs_write_node_page(struct page *page, 1309 struct writeback_control *wbc) 1310 { 1311 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1312 nid_t nid; 1313 struct node_info ni; 1314 struct f2fs_io_info fio = { 1315 .sbi = sbi, 1316 .type = NODE, 1317 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1318 .page = page, 1319 .encrypted_page = NULL, 1320 }; 1321 1322 trace_f2fs_writepage(page, NODE); 1323 1324 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1325 goto redirty_out; 1326 if (unlikely(f2fs_cp_error(sbi))) 1327 goto redirty_out; 1328 1329 f2fs_wait_on_page_writeback(page, NODE); 1330 1331 /* get old block addr of this node page */ 1332 nid = nid_of_node(page); 1333 f2fs_bug_on(sbi, page->index != nid); 1334 1335 if (wbc->for_reclaim) { 1336 if (!down_read_trylock(&sbi->node_write)) 1337 goto redirty_out; 1338 } else { 1339 down_read(&sbi->node_write); 1340 } 1341 1342 get_node_info(sbi, nid, &ni); 1343 1344 /* This page is already truncated */ 1345 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1346 ClearPageUptodate(page); 1347 dec_page_count(sbi, F2FS_DIRTY_NODES); 1348 up_read(&sbi->node_write); 1349 unlock_page(page); 1350 return 0; 1351 } 1352 1353 set_page_writeback(page); 1354 fio.blk_addr = ni.blk_addr; 1355 write_node_page(nid, &fio); 1356 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page)); 1357 dec_page_count(sbi, F2FS_DIRTY_NODES); 1358 up_read(&sbi->node_write); 1359 unlock_page(page); 1360 1361 if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) 1362 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1363 1364 return 0; 1365 1366 redirty_out: 1367 redirty_page_for_writepage(wbc, page); 1368 return AOP_WRITEPAGE_ACTIVATE; 1369 } 1370 1371 static int f2fs_write_node_pages(struct address_space *mapping, 1372 struct writeback_control *wbc) 1373 { 1374 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1375 long diff; 1376 1377 trace_f2fs_writepages(mapping->host, wbc, NODE); 1378 1379 /* balancing f2fs's metadata in background */ 1380 f2fs_balance_fs_bg(sbi); 1381 1382 /* collect a number of dirty node pages and write together */ 1383 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1384 goto skip_write; 1385 1386 diff = nr_pages_to_write(sbi, NODE, wbc); 1387 wbc->sync_mode = WB_SYNC_NONE; 1388 sync_node_pages(sbi, 0, wbc); 1389 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1390 return 0; 1391 1392 skip_write: 1393 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1394 return 0; 1395 } 1396 1397 static int f2fs_set_node_page_dirty(struct page *page) 1398 { 1399 trace_f2fs_set_page_dirty(page, NODE); 1400 1401 SetPageUptodate(page); 1402 if (!PageDirty(page)) { 1403 __set_page_dirty_nobuffers(page); 1404 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1405 SetPagePrivate(page); 1406 f2fs_trace_pid(page); 1407 return 1; 1408 } 1409 return 0; 1410 } 1411 1412 /* 1413 * Structure of the f2fs node operations 1414 */ 1415 const struct address_space_operations f2fs_node_aops = { 1416 .writepage = f2fs_write_node_page, 1417 .writepages = f2fs_write_node_pages, 1418 .set_page_dirty = f2fs_set_node_page_dirty, 1419 .invalidatepage = f2fs_invalidate_page, 1420 .releasepage = f2fs_release_page, 1421 }; 1422 1423 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1424 nid_t n) 1425 { 1426 return radix_tree_lookup(&nm_i->free_nid_root, n); 1427 } 1428 1429 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1430 struct free_nid *i) 1431 { 1432 list_del(&i->list); 1433 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1434 } 1435 1436 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1437 { 1438 struct f2fs_nm_info *nm_i = NM_I(sbi); 1439 struct free_nid *i; 1440 struct nat_entry *ne; 1441 bool allocated = false; 1442 1443 if (!available_free_memory(sbi, FREE_NIDS)) 1444 return -1; 1445 1446 /* 0 nid should not be used */ 1447 if (unlikely(nid == 0)) 1448 return 0; 1449 1450 if (build) { 1451 /* do not add allocated nids */ 1452 ne = __lookup_nat_cache(nm_i, nid); 1453 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 1454 nat_get_blkaddr(ne) != NULL_ADDR)) 1455 allocated = true; 1456 if (allocated) 1457 return 0; 1458 } 1459 1460 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1461 i->nid = nid; 1462 i->state = NID_NEW; 1463 1464 if (radix_tree_preload(GFP_NOFS)) { 1465 kmem_cache_free(free_nid_slab, i); 1466 return 0; 1467 } 1468 1469 spin_lock(&nm_i->free_nid_list_lock); 1470 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1471 spin_unlock(&nm_i->free_nid_list_lock); 1472 radix_tree_preload_end(); 1473 kmem_cache_free(free_nid_slab, i); 1474 return 0; 1475 } 1476 list_add_tail(&i->list, &nm_i->free_nid_list); 1477 nm_i->fcnt++; 1478 spin_unlock(&nm_i->free_nid_list_lock); 1479 radix_tree_preload_end(); 1480 return 1; 1481 } 1482 1483 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1484 { 1485 struct free_nid *i; 1486 bool need_free = false; 1487 1488 spin_lock(&nm_i->free_nid_list_lock); 1489 i = __lookup_free_nid_list(nm_i, nid); 1490 if (i && i->state == NID_NEW) { 1491 __del_from_free_nid_list(nm_i, i); 1492 nm_i->fcnt--; 1493 need_free = true; 1494 } 1495 spin_unlock(&nm_i->free_nid_list_lock); 1496 1497 if (need_free) 1498 kmem_cache_free(free_nid_slab, i); 1499 } 1500 1501 static void scan_nat_page(struct f2fs_sb_info *sbi, 1502 struct page *nat_page, nid_t start_nid) 1503 { 1504 struct f2fs_nm_info *nm_i = NM_I(sbi); 1505 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1506 block_t blk_addr; 1507 int i; 1508 1509 i = start_nid % NAT_ENTRY_PER_BLOCK; 1510 1511 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1512 1513 if (unlikely(start_nid >= nm_i->max_nid)) 1514 break; 1515 1516 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1517 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1518 if (blk_addr == NULL_ADDR) { 1519 if (add_free_nid(sbi, start_nid, true) < 0) 1520 break; 1521 } 1522 } 1523 } 1524 1525 static void build_free_nids(struct f2fs_sb_info *sbi) 1526 { 1527 struct f2fs_nm_info *nm_i = NM_I(sbi); 1528 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1529 struct f2fs_summary_block *sum = curseg->sum_blk; 1530 int i = 0; 1531 nid_t nid = nm_i->next_scan_nid; 1532 1533 /* Enough entries */ 1534 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1535 return; 1536 1537 /* readahead nat pages to be scanned */ 1538 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 1539 META_NAT, true); 1540 1541 down_read(&nm_i->nat_tree_lock); 1542 1543 while (1) { 1544 struct page *page = get_current_nat_page(sbi, nid); 1545 1546 scan_nat_page(sbi, page, nid); 1547 f2fs_put_page(page, 1); 1548 1549 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1550 if (unlikely(nid >= nm_i->max_nid)) 1551 nid = 0; 1552 1553 if (++i >= FREE_NID_PAGES) 1554 break; 1555 } 1556 1557 /* go to the next free nat pages to find free nids abundantly */ 1558 nm_i->next_scan_nid = nid; 1559 1560 /* find free nids from current sum_pages */ 1561 mutex_lock(&curseg->curseg_mutex); 1562 for (i = 0; i < nats_in_cursum(sum); i++) { 1563 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1564 nid = le32_to_cpu(nid_in_journal(sum, i)); 1565 if (addr == NULL_ADDR) 1566 add_free_nid(sbi, nid, true); 1567 else 1568 remove_free_nid(nm_i, nid); 1569 } 1570 mutex_unlock(&curseg->curseg_mutex); 1571 up_read(&nm_i->nat_tree_lock); 1572 1573 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 1574 nm_i->ra_nid_pages, META_NAT, false); 1575 } 1576 1577 /* 1578 * If this function returns success, caller can obtain a new nid 1579 * from second parameter of this function. 1580 * The returned nid could be used ino as well as nid when inode is created. 1581 */ 1582 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1583 { 1584 struct f2fs_nm_info *nm_i = NM_I(sbi); 1585 struct free_nid *i = NULL; 1586 retry: 1587 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1588 return false; 1589 1590 spin_lock(&nm_i->free_nid_list_lock); 1591 1592 /* We should not use stale free nids created by build_free_nids */ 1593 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1594 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 1595 list_for_each_entry(i, &nm_i->free_nid_list, list) 1596 if (i->state == NID_NEW) 1597 break; 1598 1599 f2fs_bug_on(sbi, i->state != NID_NEW); 1600 *nid = i->nid; 1601 i->state = NID_ALLOC; 1602 nm_i->fcnt--; 1603 spin_unlock(&nm_i->free_nid_list_lock); 1604 return true; 1605 } 1606 spin_unlock(&nm_i->free_nid_list_lock); 1607 1608 /* Let's scan nat pages and its caches to get free nids */ 1609 mutex_lock(&nm_i->build_lock); 1610 build_free_nids(sbi); 1611 mutex_unlock(&nm_i->build_lock); 1612 goto retry; 1613 } 1614 1615 /* 1616 * alloc_nid() should be called prior to this function. 1617 */ 1618 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1619 { 1620 struct f2fs_nm_info *nm_i = NM_I(sbi); 1621 struct free_nid *i; 1622 1623 spin_lock(&nm_i->free_nid_list_lock); 1624 i = __lookup_free_nid_list(nm_i, nid); 1625 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1626 __del_from_free_nid_list(nm_i, i); 1627 spin_unlock(&nm_i->free_nid_list_lock); 1628 1629 kmem_cache_free(free_nid_slab, i); 1630 } 1631 1632 /* 1633 * alloc_nid() should be called prior to this function. 1634 */ 1635 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1636 { 1637 struct f2fs_nm_info *nm_i = NM_I(sbi); 1638 struct free_nid *i; 1639 bool need_free = false; 1640 1641 if (!nid) 1642 return; 1643 1644 spin_lock(&nm_i->free_nid_list_lock); 1645 i = __lookup_free_nid_list(nm_i, nid); 1646 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1647 if (!available_free_memory(sbi, FREE_NIDS)) { 1648 __del_from_free_nid_list(nm_i, i); 1649 need_free = true; 1650 } else { 1651 i->state = NID_NEW; 1652 nm_i->fcnt++; 1653 } 1654 spin_unlock(&nm_i->free_nid_list_lock); 1655 1656 if (need_free) 1657 kmem_cache_free(free_nid_slab, i); 1658 } 1659 1660 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 1661 { 1662 struct f2fs_nm_info *nm_i = NM_I(sbi); 1663 struct free_nid *i, *next; 1664 int nr = nr_shrink; 1665 1666 if (!mutex_trylock(&nm_i->build_lock)) 1667 return 0; 1668 1669 spin_lock(&nm_i->free_nid_list_lock); 1670 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 1671 if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK) 1672 break; 1673 if (i->state == NID_ALLOC) 1674 continue; 1675 __del_from_free_nid_list(nm_i, i); 1676 kmem_cache_free(free_nid_slab, i); 1677 nm_i->fcnt--; 1678 nr_shrink--; 1679 } 1680 spin_unlock(&nm_i->free_nid_list_lock); 1681 mutex_unlock(&nm_i->build_lock); 1682 1683 return nr - nr_shrink; 1684 } 1685 1686 void recover_inline_xattr(struct inode *inode, struct page *page) 1687 { 1688 void *src_addr, *dst_addr; 1689 size_t inline_size; 1690 struct page *ipage; 1691 struct f2fs_inode *ri; 1692 1693 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 1694 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 1695 1696 ri = F2FS_INODE(page); 1697 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1698 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); 1699 goto update_inode; 1700 } 1701 1702 dst_addr = inline_xattr_addr(ipage); 1703 src_addr = inline_xattr_addr(page); 1704 inline_size = inline_xattr_size(inode); 1705 1706 f2fs_wait_on_page_writeback(ipage, NODE); 1707 memcpy(dst_addr, src_addr, inline_size); 1708 update_inode: 1709 update_inode(inode, ipage); 1710 f2fs_put_page(ipage, 1); 1711 } 1712 1713 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1714 { 1715 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1716 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1717 nid_t new_xnid = nid_of_node(page); 1718 struct node_info ni; 1719 1720 /* 1: invalidate the previous xattr nid */ 1721 if (!prev_xnid) 1722 goto recover_xnid; 1723 1724 /* Deallocate node address */ 1725 get_node_info(sbi, prev_xnid, &ni); 1726 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 1727 invalidate_blocks(sbi, ni.blk_addr); 1728 dec_valid_node_count(sbi, inode); 1729 set_node_addr(sbi, &ni, NULL_ADDR, false); 1730 1731 recover_xnid: 1732 /* 2: allocate new xattr nid */ 1733 if (unlikely(!inc_valid_node_count(sbi, inode))) 1734 f2fs_bug_on(sbi, 1); 1735 1736 remove_free_nid(NM_I(sbi), new_xnid); 1737 get_node_info(sbi, new_xnid, &ni); 1738 ni.ino = inode->i_ino; 1739 set_node_addr(sbi, &ni, NEW_ADDR, false); 1740 F2FS_I(inode)->i_xattr_nid = new_xnid; 1741 1742 /* 3: update xattr blkaddr */ 1743 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1744 set_node_addr(sbi, &ni, blkaddr, false); 1745 1746 update_inode_page(inode); 1747 } 1748 1749 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1750 { 1751 struct f2fs_inode *src, *dst; 1752 nid_t ino = ino_of_node(page); 1753 struct node_info old_ni, new_ni; 1754 struct page *ipage; 1755 1756 get_node_info(sbi, ino, &old_ni); 1757 1758 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1759 return -EINVAL; 1760 1761 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1762 if (!ipage) 1763 return -ENOMEM; 1764 1765 /* Should not use this inode from free nid list */ 1766 remove_free_nid(NM_I(sbi), ino); 1767 1768 SetPageUptodate(ipage); 1769 fill_node_footer(ipage, ino, ino, 0, true); 1770 1771 src = F2FS_INODE(page); 1772 dst = F2FS_INODE(ipage); 1773 1774 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1775 dst->i_size = 0; 1776 dst->i_blocks = cpu_to_le64(1); 1777 dst->i_links = cpu_to_le32(1); 1778 dst->i_xattr_nid = 0; 1779 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 1780 1781 new_ni = old_ni; 1782 new_ni.ino = ino; 1783 1784 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1785 WARN_ON(1); 1786 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1787 inc_valid_inode_count(sbi); 1788 set_page_dirty(ipage); 1789 f2fs_put_page(ipage, 1); 1790 return 0; 1791 } 1792 1793 int restore_node_summary(struct f2fs_sb_info *sbi, 1794 unsigned int segno, struct f2fs_summary_block *sum) 1795 { 1796 struct f2fs_node *rn; 1797 struct f2fs_summary *sum_entry; 1798 block_t addr; 1799 int bio_blocks = MAX_BIO_BLOCKS(sbi); 1800 int i, idx, last_offset, nrpages; 1801 1802 /* scan the node segment */ 1803 last_offset = sbi->blocks_per_seg; 1804 addr = START_BLOCK(sbi, segno); 1805 sum_entry = &sum->entries[0]; 1806 1807 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 1808 nrpages = min(last_offset - i, bio_blocks); 1809 1810 /* readahead node pages */ 1811 ra_meta_pages(sbi, addr, nrpages, META_POR, true); 1812 1813 for (idx = addr; idx < addr + nrpages; idx++) { 1814 struct page *page = get_tmp_page(sbi, idx); 1815 1816 rn = F2FS_NODE(page); 1817 sum_entry->nid = rn->footer.nid; 1818 sum_entry->version = 0; 1819 sum_entry->ofs_in_node = 0; 1820 sum_entry++; 1821 f2fs_put_page(page, 1); 1822 } 1823 1824 invalidate_mapping_pages(META_MAPPING(sbi), addr, 1825 addr + nrpages); 1826 } 1827 return 0; 1828 } 1829 1830 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1831 { 1832 struct f2fs_nm_info *nm_i = NM_I(sbi); 1833 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1834 struct f2fs_summary_block *sum = curseg->sum_blk; 1835 int i; 1836 1837 mutex_lock(&curseg->curseg_mutex); 1838 for (i = 0; i < nats_in_cursum(sum); i++) { 1839 struct nat_entry *ne; 1840 struct f2fs_nat_entry raw_ne; 1841 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1842 1843 raw_ne = nat_in_journal(sum, i); 1844 1845 ne = __lookup_nat_cache(nm_i, nid); 1846 if (!ne) { 1847 ne = grab_nat_entry(nm_i, nid); 1848 node_info_from_raw_nat(&ne->ni, &raw_ne); 1849 } 1850 __set_nat_cache_dirty(nm_i, ne); 1851 } 1852 update_nats_in_cursum(sum, -i); 1853 mutex_unlock(&curseg->curseg_mutex); 1854 } 1855 1856 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 1857 struct list_head *head, int max) 1858 { 1859 struct nat_entry_set *cur; 1860 1861 if (nes->entry_cnt >= max) 1862 goto add_out; 1863 1864 list_for_each_entry(cur, head, set_list) { 1865 if (cur->entry_cnt >= nes->entry_cnt) { 1866 list_add(&nes->set_list, cur->set_list.prev); 1867 return; 1868 } 1869 } 1870 add_out: 1871 list_add_tail(&nes->set_list, head); 1872 } 1873 1874 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 1875 struct nat_entry_set *set) 1876 { 1877 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1878 struct f2fs_summary_block *sum = curseg->sum_blk; 1879 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 1880 bool to_journal = true; 1881 struct f2fs_nat_block *nat_blk; 1882 struct nat_entry *ne, *cur; 1883 struct page *page = NULL; 1884 1885 /* 1886 * there are two steps to flush nat entries: 1887 * #1, flush nat entries to journal in current hot data summary block. 1888 * #2, flush nat entries to nat page. 1889 */ 1890 if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL)) 1891 to_journal = false; 1892 1893 if (to_journal) { 1894 mutex_lock(&curseg->curseg_mutex); 1895 } else { 1896 page = get_next_nat_page(sbi, start_nid); 1897 nat_blk = page_address(page); 1898 f2fs_bug_on(sbi, !nat_blk); 1899 } 1900 1901 /* flush dirty nats in nat entry set */ 1902 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 1903 struct f2fs_nat_entry *raw_ne; 1904 nid_t nid = nat_get_nid(ne); 1905 int offset; 1906 1907 if (nat_get_blkaddr(ne) == NEW_ADDR) 1908 continue; 1909 1910 if (to_journal) { 1911 offset = lookup_journal_in_cursum(sum, 1912 NAT_JOURNAL, nid, 1); 1913 f2fs_bug_on(sbi, offset < 0); 1914 raw_ne = &nat_in_journal(sum, offset); 1915 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1916 } else { 1917 raw_ne = &nat_blk->entries[nid - start_nid]; 1918 } 1919 raw_nat_from_node_info(raw_ne, &ne->ni); 1920 nat_reset_flag(ne); 1921 __clear_nat_cache_dirty(NM_I(sbi), ne); 1922 if (nat_get_blkaddr(ne) == NULL_ADDR) 1923 add_free_nid(sbi, nid, false); 1924 } 1925 1926 if (to_journal) 1927 mutex_unlock(&curseg->curseg_mutex); 1928 else 1929 f2fs_put_page(page, 1); 1930 1931 f2fs_bug_on(sbi, set->entry_cnt); 1932 1933 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 1934 kmem_cache_free(nat_entry_set_slab, set); 1935 } 1936 1937 /* 1938 * This function is called during the checkpointing process. 1939 */ 1940 void flush_nat_entries(struct f2fs_sb_info *sbi) 1941 { 1942 struct f2fs_nm_info *nm_i = NM_I(sbi); 1943 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1944 struct f2fs_summary_block *sum = curseg->sum_blk; 1945 struct nat_entry_set *setvec[SETVEC_SIZE]; 1946 struct nat_entry_set *set, *tmp; 1947 unsigned int found; 1948 nid_t set_idx = 0; 1949 LIST_HEAD(sets); 1950 1951 if (!nm_i->dirty_nat_cnt) 1952 return; 1953 1954 down_write(&nm_i->nat_tree_lock); 1955 1956 /* 1957 * if there are no enough space in journal to store dirty nat 1958 * entries, remove all entries from journal and merge them 1959 * into nat entry set. 1960 */ 1961 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1962 remove_nats_in_journal(sbi); 1963 1964 while ((found = __gang_lookup_nat_set(nm_i, 1965 set_idx, SETVEC_SIZE, setvec))) { 1966 unsigned idx; 1967 set_idx = setvec[found - 1]->set + 1; 1968 for (idx = 0; idx < found; idx++) 1969 __adjust_nat_entry_set(setvec[idx], &sets, 1970 MAX_NAT_JENTRIES(sum)); 1971 } 1972 1973 /* flush dirty nats in nat entry set */ 1974 list_for_each_entry_safe(set, tmp, &sets, set_list) 1975 __flush_nat_entry_set(sbi, set); 1976 1977 up_write(&nm_i->nat_tree_lock); 1978 1979 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 1980 } 1981 1982 static int init_node_manager(struct f2fs_sb_info *sbi) 1983 { 1984 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1985 struct f2fs_nm_info *nm_i = NM_I(sbi); 1986 unsigned char *version_bitmap; 1987 unsigned int nat_segs, nat_blocks; 1988 1989 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1990 1991 /* segment_count_nat includes pair segment so divide to 2. */ 1992 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1993 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1994 1995 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1996 1997 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1998 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 1999 nm_i->fcnt = 0; 2000 nm_i->nat_cnt = 0; 2001 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 2002 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 2003 2004 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 2005 INIT_LIST_HEAD(&nm_i->free_nid_list); 2006 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 2007 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 2008 INIT_LIST_HEAD(&nm_i->nat_entries); 2009 2010 mutex_init(&nm_i->build_lock); 2011 spin_lock_init(&nm_i->free_nid_list_lock); 2012 init_rwsem(&nm_i->nat_tree_lock); 2013 2014 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 2015 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 2016 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 2017 if (!version_bitmap) 2018 return -EFAULT; 2019 2020 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 2021 GFP_KERNEL); 2022 if (!nm_i->nat_bitmap) 2023 return -ENOMEM; 2024 return 0; 2025 } 2026 2027 int build_node_manager(struct f2fs_sb_info *sbi) 2028 { 2029 int err; 2030 2031 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 2032 if (!sbi->nm_info) 2033 return -ENOMEM; 2034 2035 err = init_node_manager(sbi); 2036 if (err) 2037 return err; 2038 2039 build_free_nids(sbi); 2040 return 0; 2041 } 2042 2043 void destroy_node_manager(struct f2fs_sb_info *sbi) 2044 { 2045 struct f2fs_nm_info *nm_i = NM_I(sbi); 2046 struct free_nid *i, *next_i; 2047 struct nat_entry *natvec[NATVEC_SIZE]; 2048 struct nat_entry_set *setvec[SETVEC_SIZE]; 2049 nid_t nid = 0; 2050 unsigned int found; 2051 2052 if (!nm_i) 2053 return; 2054 2055 /* destroy free nid list */ 2056 spin_lock(&nm_i->free_nid_list_lock); 2057 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2058 f2fs_bug_on(sbi, i->state == NID_ALLOC); 2059 __del_from_free_nid_list(nm_i, i); 2060 nm_i->fcnt--; 2061 spin_unlock(&nm_i->free_nid_list_lock); 2062 kmem_cache_free(free_nid_slab, i); 2063 spin_lock(&nm_i->free_nid_list_lock); 2064 } 2065 f2fs_bug_on(sbi, nm_i->fcnt); 2066 spin_unlock(&nm_i->free_nid_list_lock); 2067 2068 /* destroy nat cache */ 2069 down_write(&nm_i->nat_tree_lock); 2070 while ((found = __gang_lookup_nat_cache(nm_i, 2071 nid, NATVEC_SIZE, natvec))) { 2072 unsigned idx; 2073 2074 nid = nat_get_nid(natvec[found - 1]) + 1; 2075 for (idx = 0; idx < found; idx++) 2076 __del_from_nat_cache(nm_i, natvec[idx]); 2077 } 2078 f2fs_bug_on(sbi, nm_i->nat_cnt); 2079 2080 /* destroy nat set cache */ 2081 nid = 0; 2082 while ((found = __gang_lookup_nat_set(nm_i, 2083 nid, SETVEC_SIZE, setvec))) { 2084 unsigned idx; 2085 2086 nid = setvec[found - 1]->set + 1; 2087 for (idx = 0; idx < found; idx++) { 2088 /* entry_cnt is not zero, when cp_error was occurred */ 2089 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 2090 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 2091 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2092 } 2093 } 2094 up_write(&nm_i->nat_tree_lock); 2095 2096 kfree(nm_i->nat_bitmap); 2097 sbi->nm_info = NULL; 2098 kfree(nm_i); 2099 } 2100 2101 int __init create_node_manager_caches(void) 2102 { 2103 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2104 sizeof(struct nat_entry)); 2105 if (!nat_entry_slab) 2106 goto fail; 2107 2108 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2109 sizeof(struct free_nid)); 2110 if (!free_nid_slab) 2111 goto destroy_nat_entry; 2112 2113 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2114 sizeof(struct nat_entry_set)); 2115 if (!nat_entry_set_slab) 2116 goto destroy_free_nid; 2117 return 0; 2118 2119 destroy_free_nid: 2120 kmem_cache_destroy(free_nid_slab); 2121 destroy_nat_entry: 2122 kmem_cache_destroy(nat_entry_slab); 2123 fail: 2124 return -ENOMEM; 2125 } 2126 2127 void destroy_node_manager_caches(void) 2128 { 2129 kmem_cache_destroy(nat_entry_set_slab); 2130 kmem_cache_destroy(free_nid_slab); 2131 kmem_cache_destroy(nat_entry_slab); 2132 } 2133