1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include <trace/events/f2fs.h> 23 24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25 26 static struct kmem_cache *nat_entry_slab; 27 static struct kmem_cache *free_nid_slab; 28 static struct kmem_cache *nat_entry_set_slab; 29 30 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 31 { 32 struct f2fs_nm_info *nm_i = NM_I(sbi); 33 struct sysinfo val; 34 unsigned long avail_ram; 35 unsigned long mem_size = 0; 36 bool res = false; 37 38 si_meminfo(&val); 39 40 /* only uses low memory */ 41 avail_ram = val.totalram - val.totalhigh; 42 43 /* give 25%, 25%, 50%, 50% memory for each components respectively */ 44 if (type == FREE_NIDS) { 45 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 46 PAGE_CACHE_SHIFT; 47 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 48 } else if (type == NAT_ENTRIES) { 49 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 50 PAGE_CACHE_SHIFT; 51 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 52 } else if (type == DIRTY_DENTS) { 53 if (sbi->sb->s_bdi->dirty_exceeded) 54 return false; 55 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 56 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 57 } else if (type == INO_ENTRIES) { 58 int i; 59 60 if (sbi->sb->s_bdi->dirty_exceeded) 61 return false; 62 for (i = 0; i <= UPDATE_INO; i++) 63 mem_size += (sbi->im[i].ino_num * 64 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 66 } 67 return res; 68 } 69 70 static void clear_node_page_dirty(struct page *page) 71 { 72 struct address_space *mapping = page->mapping; 73 unsigned int long flags; 74 75 if (PageDirty(page)) { 76 spin_lock_irqsave(&mapping->tree_lock, flags); 77 radix_tree_tag_clear(&mapping->page_tree, 78 page_index(page), 79 PAGECACHE_TAG_DIRTY); 80 spin_unlock_irqrestore(&mapping->tree_lock, flags); 81 82 clear_page_dirty_for_io(page); 83 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 84 } 85 ClearPageUptodate(page); 86 } 87 88 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 89 { 90 pgoff_t index = current_nat_addr(sbi, nid); 91 return get_meta_page(sbi, index); 92 } 93 94 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 95 { 96 struct page *src_page; 97 struct page *dst_page; 98 pgoff_t src_off; 99 pgoff_t dst_off; 100 void *src_addr; 101 void *dst_addr; 102 struct f2fs_nm_info *nm_i = NM_I(sbi); 103 104 src_off = current_nat_addr(sbi, nid); 105 dst_off = next_nat_addr(sbi, src_off); 106 107 /* get current nat block page with lock */ 108 src_page = get_meta_page(sbi, src_off); 109 dst_page = grab_meta_page(sbi, dst_off); 110 f2fs_bug_on(sbi, PageDirty(src_page)); 111 112 src_addr = page_address(src_page); 113 dst_addr = page_address(dst_page); 114 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 115 set_page_dirty(dst_page); 116 f2fs_put_page(src_page, 1); 117 118 set_to_next_nat(nm_i, nid); 119 120 return dst_page; 121 } 122 123 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 124 { 125 return radix_tree_lookup(&nm_i->nat_root, n); 126 } 127 128 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 129 nid_t start, unsigned int nr, struct nat_entry **ep) 130 { 131 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 132 } 133 134 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 135 { 136 list_del(&e->list); 137 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 138 nm_i->nat_cnt--; 139 kmem_cache_free(nat_entry_slab, e); 140 } 141 142 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 143 struct nat_entry *ne) 144 { 145 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 146 struct nat_entry_set *head; 147 148 if (get_nat_flag(ne, IS_DIRTY)) 149 return; 150 retry: 151 head = radix_tree_lookup(&nm_i->nat_set_root, set); 152 if (!head) { 153 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 154 155 INIT_LIST_HEAD(&head->entry_list); 156 INIT_LIST_HEAD(&head->set_list); 157 head->set = set; 158 head->entry_cnt = 0; 159 160 if (radix_tree_insert(&nm_i->nat_set_root, set, head)) { 161 kmem_cache_free(nat_entry_set_slab, head); 162 goto retry; 163 } 164 } 165 list_move_tail(&ne->list, &head->entry_list); 166 nm_i->dirty_nat_cnt++; 167 head->entry_cnt++; 168 set_nat_flag(ne, IS_DIRTY, true); 169 } 170 171 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 172 struct nat_entry *ne) 173 { 174 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 175 struct nat_entry_set *head; 176 177 head = radix_tree_lookup(&nm_i->nat_set_root, set); 178 if (head) { 179 list_move_tail(&ne->list, &nm_i->nat_entries); 180 set_nat_flag(ne, IS_DIRTY, false); 181 head->entry_cnt--; 182 nm_i->dirty_nat_cnt--; 183 } 184 } 185 186 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 187 nid_t start, unsigned int nr, struct nat_entry_set **ep) 188 { 189 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 190 start, nr); 191 } 192 193 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 194 { 195 struct f2fs_nm_info *nm_i = NM_I(sbi); 196 struct nat_entry *e; 197 bool is_cp = true; 198 199 down_read(&nm_i->nat_tree_lock); 200 e = __lookup_nat_cache(nm_i, nid); 201 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 202 is_cp = false; 203 up_read(&nm_i->nat_tree_lock); 204 return is_cp; 205 } 206 207 bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino) 208 { 209 struct f2fs_nm_info *nm_i = NM_I(sbi); 210 struct nat_entry *e; 211 bool fsynced = false; 212 213 down_read(&nm_i->nat_tree_lock); 214 e = __lookup_nat_cache(nm_i, ino); 215 if (e && get_nat_flag(e, HAS_FSYNCED_INODE)) 216 fsynced = true; 217 up_read(&nm_i->nat_tree_lock); 218 return fsynced; 219 } 220 221 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 222 { 223 struct f2fs_nm_info *nm_i = NM_I(sbi); 224 struct nat_entry *e; 225 bool need_update = true; 226 227 down_read(&nm_i->nat_tree_lock); 228 e = __lookup_nat_cache(nm_i, ino); 229 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 230 (get_nat_flag(e, IS_CHECKPOINTED) || 231 get_nat_flag(e, HAS_FSYNCED_INODE))) 232 need_update = false; 233 up_read(&nm_i->nat_tree_lock); 234 return need_update; 235 } 236 237 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 238 { 239 struct nat_entry *new; 240 241 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 242 if (!new) 243 return NULL; 244 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 245 kmem_cache_free(nat_entry_slab, new); 246 return NULL; 247 } 248 memset(new, 0, sizeof(struct nat_entry)); 249 nat_set_nid(new, nid); 250 nat_reset_flag(new); 251 list_add_tail(&new->list, &nm_i->nat_entries); 252 nm_i->nat_cnt++; 253 return new; 254 } 255 256 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 257 struct f2fs_nat_entry *ne) 258 { 259 struct nat_entry *e; 260 retry: 261 down_write(&nm_i->nat_tree_lock); 262 e = __lookup_nat_cache(nm_i, nid); 263 if (!e) { 264 e = grab_nat_entry(nm_i, nid); 265 if (!e) { 266 up_write(&nm_i->nat_tree_lock); 267 goto retry; 268 } 269 node_info_from_raw_nat(&e->ni, ne); 270 } 271 up_write(&nm_i->nat_tree_lock); 272 } 273 274 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 275 block_t new_blkaddr, bool fsync_done) 276 { 277 struct f2fs_nm_info *nm_i = NM_I(sbi); 278 struct nat_entry *e; 279 retry: 280 down_write(&nm_i->nat_tree_lock); 281 e = __lookup_nat_cache(nm_i, ni->nid); 282 if (!e) { 283 e = grab_nat_entry(nm_i, ni->nid); 284 if (!e) { 285 up_write(&nm_i->nat_tree_lock); 286 goto retry; 287 } 288 e->ni = *ni; 289 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 290 } else if (new_blkaddr == NEW_ADDR) { 291 /* 292 * when nid is reallocated, 293 * previous nat entry can be remained in nat cache. 294 * So, reinitialize it with new information. 295 */ 296 e->ni = *ni; 297 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 298 } 299 300 /* sanity check */ 301 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 302 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 303 new_blkaddr == NULL_ADDR); 304 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 305 new_blkaddr == NEW_ADDR); 306 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 307 nat_get_blkaddr(e) != NULL_ADDR && 308 new_blkaddr == NEW_ADDR); 309 310 /* increment version no as node is removed */ 311 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 312 unsigned char version = nat_get_version(e); 313 nat_set_version(e, inc_node_version(version)); 314 } 315 316 /* change address */ 317 nat_set_blkaddr(e, new_blkaddr); 318 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 319 set_nat_flag(e, IS_CHECKPOINTED, false); 320 __set_nat_cache_dirty(nm_i, e); 321 322 /* update fsync_mark if its inode nat entry is still alive */ 323 e = __lookup_nat_cache(nm_i, ni->ino); 324 if (e) { 325 if (fsync_done && ni->nid == ni->ino) 326 set_nat_flag(e, HAS_FSYNCED_INODE, true); 327 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 328 } 329 up_write(&nm_i->nat_tree_lock); 330 } 331 332 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 333 { 334 struct f2fs_nm_info *nm_i = NM_I(sbi); 335 336 if (available_free_memory(sbi, NAT_ENTRIES)) 337 return 0; 338 339 down_write(&nm_i->nat_tree_lock); 340 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 341 struct nat_entry *ne; 342 ne = list_first_entry(&nm_i->nat_entries, 343 struct nat_entry, list); 344 __del_from_nat_cache(nm_i, ne); 345 nr_shrink--; 346 } 347 up_write(&nm_i->nat_tree_lock); 348 return nr_shrink; 349 } 350 351 /* 352 * This function always returns success 353 */ 354 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 355 { 356 struct f2fs_nm_info *nm_i = NM_I(sbi); 357 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 358 struct f2fs_summary_block *sum = curseg->sum_blk; 359 nid_t start_nid = START_NID(nid); 360 struct f2fs_nat_block *nat_blk; 361 struct page *page = NULL; 362 struct f2fs_nat_entry ne; 363 struct nat_entry *e; 364 int i; 365 366 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 367 ni->nid = nid; 368 369 /* Check nat cache */ 370 down_read(&nm_i->nat_tree_lock); 371 e = __lookup_nat_cache(nm_i, nid); 372 if (e) { 373 ni->ino = nat_get_ino(e); 374 ni->blk_addr = nat_get_blkaddr(e); 375 ni->version = nat_get_version(e); 376 } 377 up_read(&nm_i->nat_tree_lock); 378 if (e) 379 return; 380 381 /* Check current segment summary */ 382 mutex_lock(&curseg->curseg_mutex); 383 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 384 if (i >= 0) { 385 ne = nat_in_journal(sum, i); 386 node_info_from_raw_nat(ni, &ne); 387 } 388 mutex_unlock(&curseg->curseg_mutex); 389 if (i >= 0) 390 goto cache; 391 392 /* Fill node_info from nat page */ 393 page = get_current_nat_page(sbi, start_nid); 394 nat_blk = (struct f2fs_nat_block *)page_address(page); 395 ne = nat_blk->entries[nid - start_nid]; 396 node_info_from_raw_nat(ni, &ne); 397 f2fs_put_page(page, 1); 398 cache: 399 /* cache nat entry */ 400 cache_nat_entry(NM_I(sbi), nid, &ne); 401 } 402 403 /* 404 * The maximum depth is four. 405 * Offset[0] will have raw inode offset. 406 */ 407 static int get_node_path(struct f2fs_inode_info *fi, long block, 408 int offset[4], unsigned int noffset[4]) 409 { 410 const long direct_index = ADDRS_PER_INODE(fi); 411 const long direct_blks = ADDRS_PER_BLOCK; 412 const long dptrs_per_blk = NIDS_PER_BLOCK; 413 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 414 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 415 int n = 0; 416 int level = 0; 417 418 noffset[0] = 0; 419 420 if (block < direct_index) { 421 offset[n] = block; 422 goto got; 423 } 424 block -= direct_index; 425 if (block < direct_blks) { 426 offset[n++] = NODE_DIR1_BLOCK; 427 noffset[n] = 1; 428 offset[n] = block; 429 level = 1; 430 goto got; 431 } 432 block -= direct_blks; 433 if (block < direct_blks) { 434 offset[n++] = NODE_DIR2_BLOCK; 435 noffset[n] = 2; 436 offset[n] = block; 437 level = 1; 438 goto got; 439 } 440 block -= direct_blks; 441 if (block < indirect_blks) { 442 offset[n++] = NODE_IND1_BLOCK; 443 noffset[n] = 3; 444 offset[n++] = block / direct_blks; 445 noffset[n] = 4 + offset[n - 1]; 446 offset[n] = block % direct_blks; 447 level = 2; 448 goto got; 449 } 450 block -= indirect_blks; 451 if (block < indirect_blks) { 452 offset[n++] = NODE_IND2_BLOCK; 453 noffset[n] = 4 + dptrs_per_blk; 454 offset[n++] = block / direct_blks; 455 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 456 offset[n] = block % direct_blks; 457 level = 2; 458 goto got; 459 } 460 block -= indirect_blks; 461 if (block < dindirect_blks) { 462 offset[n++] = NODE_DIND_BLOCK; 463 noffset[n] = 5 + (dptrs_per_blk * 2); 464 offset[n++] = block / indirect_blks; 465 noffset[n] = 6 + (dptrs_per_blk * 2) + 466 offset[n - 1] * (dptrs_per_blk + 1); 467 offset[n++] = (block / direct_blks) % dptrs_per_blk; 468 noffset[n] = 7 + (dptrs_per_blk * 2) + 469 offset[n - 2] * (dptrs_per_blk + 1) + 470 offset[n - 1]; 471 offset[n] = block % direct_blks; 472 level = 3; 473 goto got; 474 } else { 475 BUG(); 476 } 477 got: 478 return level; 479 } 480 481 /* 482 * Caller should call f2fs_put_dnode(dn). 483 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 484 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 485 * In the case of RDONLY_NODE, we don't need to care about mutex. 486 */ 487 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 488 { 489 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 490 struct page *npage[4]; 491 struct page *parent; 492 int offset[4]; 493 unsigned int noffset[4]; 494 nid_t nids[4]; 495 int level, i; 496 int err = 0; 497 498 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 499 500 nids[0] = dn->inode->i_ino; 501 npage[0] = dn->inode_page; 502 503 if (!npage[0]) { 504 npage[0] = get_node_page(sbi, nids[0]); 505 if (IS_ERR(npage[0])) 506 return PTR_ERR(npage[0]); 507 } 508 parent = npage[0]; 509 if (level != 0) 510 nids[1] = get_nid(parent, offset[0], true); 511 dn->inode_page = npage[0]; 512 dn->inode_page_locked = true; 513 514 /* get indirect or direct nodes */ 515 for (i = 1; i <= level; i++) { 516 bool done = false; 517 518 if (!nids[i] && mode == ALLOC_NODE) { 519 /* alloc new node */ 520 if (!alloc_nid(sbi, &(nids[i]))) { 521 err = -ENOSPC; 522 goto release_pages; 523 } 524 525 dn->nid = nids[i]; 526 npage[i] = new_node_page(dn, noffset[i], NULL); 527 if (IS_ERR(npage[i])) { 528 alloc_nid_failed(sbi, nids[i]); 529 err = PTR_ERR(npage[i]); 530 goto release_pages; 531 } 532 533 set_nid(parent, offset[i - 1], nids[i], i == 1); 534 alloc_nid_done(sbi, nids[i]); 535 done = true; 536 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 537 npage[i] = get_node_page_ra(parent, offset[i - 1]); 538 if (IS_ERR(npage[i])) { 539 err = PTR_ERR(npage[i]); 540 goto release_pages; 541 } 542 done = true; 543 } 544 if (i == 1) { 545 dn->inode_page_locked = false; 546 unlock_page(parent); 547 } else { 548 f2fs_put_page(parent, 1); 549 } 550 551 if (!done) { 552 npage[i] = get_node_page(sbi, nids[i]); 553 if (IS_ERR(npage[i])) { 554 err = PTR_ERR(npage[i]); 555 f2fs_put_page(npage[0], 0); 556 goto release_out; 557 } 558 } 559 if (i < level) { 560 parent = npage[i]; 561 nids[i + 1] = get_nid(parent, offset[i], false); 562 } 563 } 564 dn->nid = nids[level]; 565 dn->ofs_in_node = offset[level]; 566 dn->node_page = npage[level]; 567 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 568 return 0; 569 570 release_pages: 571 f2fs_put_page(parent, 1); 572 if (i > 1) 573 f2fs_put_page(npage[0], 0); 574 release_out: 575 dn->inode_page = NULL; 576 dn->node_page = NULL; 577 return err; 578 } 579 580 static void truncate_node(struct dnode_of_data *dn) 581 { 582 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 583 struct node_info ni; 584 585 get_node_info(sbi, dn->nid, &ni); 586 if (dn->inode->i_blocks == 0) { 587 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); 588 goto invalidate; 589 } 590 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 591 592 /* Deallocate node address */ 593 invalidate_blocks(sbi, ni.blk_addr); 594 dec_valid_node_count(sbi, dn->inode); 595 set_node_addr(sbi, &ni, NULL_ADDR, false); 596 597 if (dn->nid == dn->inode->i_ino) { 598 remove_orphan_inode(sbi, dn->nid); 599 dec_valid_inode_count(sbi); 600 } else { 601 sync_inode_page(dn); 602 } 603 invalidate: 604 clear_node_page_dirty(dn->node_page); 605 F2FS_SET_SB_DIRT(sbi); 606 607 f2fs_put_page(dn->node_page, 1); 608 609 invalidate_mapping_pages(NODE_MAPPING(sbi), 610 dn->node_page->index, dn->node_page->index); 611 612 dn->node_page = NULL; 613 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 614 } 615 616 static int truncate_dnode(struct dnode_of_data *dn) 617 { 618 struct page *page; 619 620 if (dn->nid == 0) 621 return 1; 622 623 /* get direct node */ 624 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 625 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 626 return 1; 627 else if (IS_ERR(page)) 628 return PTR_ERR(page); 629 630 /* Make dnode_of_data for parameter */ 631 dn->node_page = page; 632 dn->ofs_in_node = 0; 633 truncate_data_blocks(dn); 634 truncate_node(dn); 635 return 1; 636 } 637 638 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 639 int ofs, int depth) 640 { 641 struct dnode_of_data rdn = *dn; 642 struct page *page; 643 struct f2fs_node *rn; 644 nid_t child_nid; 645 unsigned int child_nofs; 646 int freed = 0; 647 int i, ret; 648 649 if (dn->nid == 0) 650 return NIDS_PER_BLOCK + 1; 651 652 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 653 654 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 655 if (IS_ERR(page)) { 656 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 657 return PTR_ERR(page); 658 } 659 660 rn = F2FS_NODE(page); 661 if (depth < 3) { 662 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 663 child_nid = le32_to_cpu(rn->in.nid[i]); 664 if (child_nid == 0) 665 continue; 666 rdn.nid = child_nid; 667 ret = truncate_dnode(&rdn); 668 if (ret < 0) 669 goto out_err; 670 set_nid(page, i, 0, false); 671 } 672 } else { 673 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 674 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 675 child_nid = le32_to_cpu(rn->in.nid[i]); 676 if (child_nid == 0) { 677 child_nofs += NIDS_PER_BLOCK + 1; 678 continue; 679 } 680 rdn.nid = child_nid; 681 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 682 if (ret == (NIDS_PER_BLOCK + 1)) { 683 set_nid(page, i, 0, false); 684 child_nofs += ret; 685 } else if (ret < 0 && ret != -ENOENT) { 686 goto out_err; 687 } 688 } 689 freed = child_nofs; 690 } 691 692 if (!ofs) { 693 /* remove current indirect node */ 694 dn->node_page = page; 695 truncate_node(dn); 696 freed++; 697 } else { 698 f2fs_put_page(page, 1); 699 } 700 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 701 return freed; 702 703 out_err: 704 f2fs_put_page(page, 1); 705 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 706 return ret; 707 } 708 709 static int truncate_partial_nodes(struct dnode_of_data *dn, 710 struct f2fs_inode *ri, int *offset, int depth) 711 { 712 struct page *pages[2]; 713 nid_t nid[3]; 714 nid_t child_nid; 715 int err = 0; 716 int i; 717 int idx = depth - 2; 718 719 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 720 if (!nid[0]) 721 return 0; 722 723 /* get indirect nodes in the path */ 724 for (i = 0; i < idx + 1; i++) { 725 /* reference count'll be increased */ 726 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 727 if (IS_ERR(pages[i])) { 728 err = PTR_ERR(pages[i]); 729 idx = i - 1; 730 goto fail; 731 } 732 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 733 } 734 735 /* free direct nodes linked to a partial indirect node */ 736 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 737 child_nid = get_nid(pages[idx], i, false); 738 if (!child_nid) 739 continue; 740 dn->nid = child_nid; 741 err = truncate_dnode(dn); 742 if (err < 0) 743 goto fail; 744 set_nid(pages[idx], i, 0, false); 745 } 746 747 if (offset[idx + 1] == 0) { 748 dn->node_page = pages[idx]; 749 dn->nid = nid[idx]; 750 truncate_node(dn); 751 } else { 752 f2fs_put_page(pages[idx], 1); 753 } 754 offset[idx]++; 755 offset[idx + 1] = 0; 756 idx--; 757 fail: 758 for (i = idx; i >= 0; i--) 759 f2fs_put_page(pages[i], 1); 760 761 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 762 763 return err; 764 } 765 766 /* 767 * All the block addresses of data and nodes should be nullified. 768 */ 769 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 770 { 771 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 772 int err = 0, cont = 1; 773 int level, offset[4], noffset[4]; 774 unsigned int nofs = 0; 775 struct f2fs_inode *ri; 776 struct dnode_of_data dn; 777 struct page *page; 778 779 trace_f2fs_truncate_inode_blocks_enter(inode, from); 780 781 level = get_node_path(F2FS_I(inode), from, offset, noffset); 782 restart: 783 page = get_node_page(sbi, inode->i_ino); 784 if (IS_ERR(page)) { 785 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 786 return PTR_ERR(page); 787 } 788 789 set_new_dnode(&dn, inode, page, NULL, 0); 790 unlock_page(page); 791 792 ri = F2FS_INODE(page); 793 switch (level) { 794 case 0: 795 case 1: 796 nofs = noffset[1]; 797 break; 798 case 2: 799 nofs = noffset[1]; 800 if (!offset[level - 1]) 801 goto skip_partial; 802 err = truncate_partial_nodes(&dn, ri, offset, level); 803 if (err < 0 && err != -ENOENT) 804 goto fail; 805 nofs += 1 + NIDS_PER_BLOCK; 806 break; 807 case 3: 808 nofs = 5 + 2 * NIDS_PER_BLOCK; 809 if (!offset[level - 1]) 810 goto skip_partial; 811 err = truncate_partial_nodes(&dn, ri, offset, level); 812 if (err < 0 && err != -ENOENT) 813 goto fail; 814 break; 815 default: 816 BUG(); 817 } 818 819 skip_partial: 820 while (cont) { 821 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 822 switch (offset[0]) { 823 case NODE_DIR1_BLOCK: 824 case NODE_DIR2_BLOCK: 825 err = truncate_dnode(&dn); 826 break; 827 828 case NODE_IND1_BLOCK: 829 case NODE_IND2_BLOCK: 830 err = truncate_nodes(&dn, nofs, offset[1], 2); 831 break; 832 833 case NODE_DIND_BLOCK: 834 err = truncate_nodes(&dn, nofs, offset[1], 3); 835 cont = 0; 836 break; 837 838 default: 839 BUG(); 840 } 841 if (err < 0 && err != -ENOENT) 842 goto fail; 843 if (offset[1] == 0 && 844 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 845 lock_page(page); 846 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 847 f2fs_put_page(page, 1); 848 goto restart; 849 } 850 f2fs_wait_on_page_writeback(page, NODE); 851 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 852 set_page_dirty(page); 853 unlock_page(page); 854 } 855 offset[1] = 0; 856 offset[0]++; 857 nofs += err; 858 } 859 fail: 860 f2fs_put_page(page, 0); 861 trace_f2fs_truncate_inode_blocks_exit(inode, err); 862 return err > 0 ? 0 : err; 863 } 864 865 int truncate_xattr_node(struct inode *inode, struct page *page) 866 { 867 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 868 nid_t nid = F2FS_I(inode)->i_xattr_nid; 869 struct dnode_of_data dn; 870 struct page *npage; 871 872 if (!nid) 873 return 0; 874 875 npage = get_node_page(sbi, nid); 876 if (IS_ERR(npage)) 877 return PTR_ERR(npage); 878 879 F2FS_I(inode)->i_xattr_nid = 0; 880 881 /* need to do checkpoint during fsync */ 882 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 883 884 set_new_dnode(&dn, inode, page, npage, nid); 885 886 if (page) 887 dn.inode_page_locked = true; 888 truncate_node(&dn); 889 return 0; 890 } 891 892 /* 893 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 894 * f2fs_unlock_op(). 895 */ 896 void remove_inode_page(struct inode *inode) 897 { 898 struct dnode_of_data dn; 899 900 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 901 if (get_dnode_of_data(&dn, 0, LOOKUP_NODE)) 902 return; 903 904 if (truncate_xattr_node(inode, dn.inode_page)) { 905 f2fs_put_dnode(&dn); 906 return; 907 } 908 909 /* remove potential inline_data blocks */ 910 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 911 S_ISLNK(inode->i_mode)) 912 truncate_data_blocks_range(&dn, 1); 913 914 /* 0 is possible, after f2fs_new_inode() has failed */ 915 f2fs_bug_on(F2FS_I_SB(inode), 916 inode->i_blocks != 0 && inode->i_blocks != 1); 917 918 /* will put inode & node pages */ 919 truncate_node(&dn); 920 } 921 922 struct page *new_inode_page(struct inode *inode) 923 { 924 struct dnode_of_data dn; 925 926 /* allocate inode page for new inode */ 927 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 928 929 /* caller should f2fs_put_page(page, 1); */ 930 return new_node_page(&dn, 0, NULL); 931 } 932 933 struct page *new_node_page(struct dnode_of_data *dn, 934 unsigned int ofs, struct page *ipage) 935 { 936 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 937 struct node_info old_ni, new_ni; 938 struct page *page; 939 int err; 940 941 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 942 return ERR_PTR(-EPERM); 943 944 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 945 if (!page) 946 return ERR_PTR(-ENOMEM); 947 948 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 949 err = -ENOSPC; 950 goto fail; 951 } 952 953 get_node_info(sbi, dn->nid, &old_ni); 954 955 /* Reinitialize old_ni with new node page */ 956 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); 957 new_ni = old_ni; 958 new_ni.ino = dn->inode->i_ino; 959 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 960 961 f2fs_wait_on_page_writeback(page, NODE); 962 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 963 set_cold_node(dn->inode, page); 964 SetPageUptodate(page); 965 set_page_dirty(page); 966 967 if (f2fs_has_xattr_block(ofs)) 968 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 969 970 dn->node_page = page; 971 if (ipage) 972 update_inode(dn->inode, ipage); 973 else 974 sync_inode_page(dn); 975 if (ofs == 0) 976 inc_valid_inode_count(sbi); 977 978 return page; 979 980 fail: 981 clear_node_page_dirty(page); 982 f2fs_put_page(page, 1); 983 return ERR_PTR(err); 984 } 985 986 /* 987 * Caller should do after getting the following values. 988 * 0: f2fs_put_page(page, 0) 989 * LOCKED_PAGE: f2fs_put_page(page, 1) 990 * error: nothing 991 */ 992 static int read_node_page(struct page *page, int rw) 993 { 994 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 995 struct node_info ni; 996 997 get_node_info(sbi, page->index, &ni); 998 999 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1000 f2fs_put_page(page, 1); 1001 return -ENOENT; 1002 } 1003 1004 if (PageUptodate(page)) 1005 return LOCKED_PAGE; 1006 1007 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); 1008 } 1009 1010 /* 1011 * Readahead a node page 1012 */ 1013 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1014 { 1015 struct page *apage; 1016 int err; 1017 1018 apage = find_get_page(NODE_MAPPING(sbi), nid); 1019 if (apage && PageUptodate(apage)) { 1020 f2fs_put_page(apage, 0); 1021 return; 1022 } 1023 f2fs_put_page(apage, 0); 1024 1025 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 1026 if (!apage) 1027 return; 1028 1029 err = read_node_page(apage, READA); 1030 if (err == 0) 1031 f2fs_put_page(apage, 0); 1032 else if (err == LOCKED_PAGE) 1033 f2fs_put_page(apage, 1); 1034 } 1035 1036 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1037 { 1038 struct page *page; 1039 int err; 1040 repeat: 1041 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1042 if (!page) 1043 return ERR_PTR(-ENOMEM); 1044 1045 err = read_node_page(page, READ_SYNC); 1046 if (err < 0) 1047 return ERR_PTR(err); 1048 else if (err == LOCKED_PAGE) 1049 goto got_it; 1050 1051 lock_page(page); 1052 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 1053 f2fs_put_page(page, 1); 1054 return ERR_PTR(-EIO); 1055 } 1056 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1057 f2fs_put_page(page, 1); 1058 goto repeat; 1059 } 1060 got_it: 1061 return page; 1062 } 1063 1064 /* 1065 * Return a locked page for the desired node page. 1066 * And, readahead MAX_RA_NODE number of node pages. 1067 */ 1068 struct page *get_node_page_ra(struct page *parent, int start) 1069 { 1070 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1071 struct blk_plug plug; 1072 struct page *page; 1073 int err, i, end; 1074 nid_t nid; 1075 1076 /* First, try getting the desired direct node. */ 1077 nid = get_nid(parent, start, false); 1078 if (!nid) 1079 return ERR_PTR(-ENOENT); 1080 repeat: 1081 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1082 if (!page) 1083 return ERR_PTR(-ENOMEM); 1084 1085 err = read_node_page(page, READ_SYNC); 1086 if (err < 0) 1087 return ERR_PTR(err); 1088 else if (err == LOCKED_PAGE) 1089 goto page_hit; 1090 1091 blk_start_plug(&plug); 1092 1093 /* Then, try readahead for siblings of the desired node */ 1094 end = start + MAX_RA_NODE; 1095 end = min(end, NIDS_PER_BLOCK); 1096 for (i = start + 1; i < end; i++) { 1097 nid = get_nid(parent, i, false); 1098 if (!nid) 1099 continue; 1100 ra_node_page(sbi, nid); 1101 } 1102 1103 blk_finish_plug(&plug); 1104 1105 lock_page(page); 1106 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1107 f2fs_put_page(page, 1); 1108 goto repeat; 1109 } 1110 page_hit: 1111 if (unlikely(!PageUptodate(page))) { 1112 f2fs_put_page(page, 1); 1113 return ERR_PTR(-EIO); 1114 } 1115 return page; 1116 } 1117 1118 void sync_inode_page(struct dnode_of_data *dn) 1119 { 1120 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1121 update_inode(dn->inode, dn->node_page); 1122 } else if (dn->inode_page) { 1123 if (!dn->inode_page_locked) 1124 lock_page(dn->inode_page); 1125 update_inode(dn->inode, dn->inode_page); 1126 if (!dn->inode_page_locked) 1127 unlock_page(dn->inode_page); 1128 } else { 1129 update_inode_page(dn->inode); 1130 } 1131 } 1132 1133 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1134 struct writeback_control *wbc) 1135 { 1136 pgoff_t index, end; 1137 struct pagevec pvec; 1138 int step = ino ? 2 : 0; 1139 int nwritten = 0, wrote = 0; 1140 1141 pagevec_init(&pvec, 0); 1142 1143 next_step: 1144 index = 0; 1145 end = LONG_MAX; 1146 1147 while (index <= end) { 1148 int i, nr_pages; 1149 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1150 PAGECACHE_TAG_DIRTY, 1151 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1152 if (nr_pages == 0) 1153 break; 1154 1155 for (i = 0; i < nr_pages; i++) { 1156 struct page *page = pvec.pages[i]; 1157 1158 /* 1159 * flushing sequence with step: 1160 * 0. indirect nodes 1161 * 1. dentry dnodes 1162 * 2. file dnodes 1163 */ 1164 if (step == 0 && IS_DNODE(page)) 1165 continue; 1166 if (step == 1 && (!IS_DNODE(page) || 1167 is_cold_node(page))) 1168 continue; 1169 if (step == 2 && (!IS_DNODE(page) || 1170 !is_cold_node(page))) 1171 continue; 1172 1173 /* 1174 * If an fsync mode, 1175 * we should not skip writing node pages. 1176 */ 1177 if (ino && ino_of_node(page) == ino) 1178 lock_page(page); 1179 else if (!trylock_page(page)) 1180 continue; 1181 1182 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1183 continue_unlock: 1184 unlock_page(page); 1185 continue; 1186 } 1187 if (ino && ino_of_node(page) != ino) 1188 goto continue_unlock; 1189 1190 if (!PageDirty(page)) { 1191 /* someone wrote it for us */ 1192 goto continue_unlock; 1193 } 1194 1195 if (!clear_page_dirty_for_io(page)) 1196 goto continue_unlock; 1197 1198 /* called by fsync() */ 1199 if (ino && IS_DNODE(page)) { 1200 set_fsync_mark(page, 1); 1201 if (IS_INODE(page)) { 1202 if (!is_checkpointed_node(sbi, ino) && 1203 !has_fsynced_inode(sbi, ino)) 1204 set_dentry_mark(page, 1); 1205 else 1206 set_dentry_mark(page, 0); 1207 } 1208 nwritten++; 1209 } else { 1210 set_fsync_mark(page, 0); 1211 set_dentry_mark(page, 0); 1212 } 1213 1214 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1215 unlock_page(page); 1216 else 1217 wrote++; 1218 1219 if (--wbc->nr_to_write == 0) 1220 break; 1221 } 1222 pagevec_release(&pvec); 1223 cond_resched(); 1224 1225 if (wbc->nr_to_write == 0) { 1226 step = 2; 1227 break; 1228 } 1229 } 1230 1231 if (step < 2) { 1232 step++; 1233 goto next_step; 1234 } 1235 1236 if (wrote) 1237 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1238 return nwritten; 1239 } 1240 1241 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1242 { 1243 pgoff_t index = 0, end = LONG_MAX; 1244 struct pagevec pvec; 1245 int ret2 = 0, ret = 0; 1246 1247 pagevec_init(&pvec, 0); 1248 1249 while (index <= end) { 1250 int i, nr_pages; 1251 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1252 PAGECACHE_TAG_WRITEBACK, 1253 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1254 if (nr_pages == 0) 1255 break; 1256 1257 for (i = 0; i < nr_pages; i++) { 1258 struct page *page = pvec.pages[i]; 1259 1260 /* until radix tree lookup accepts end_index */ 1261 if (unlikely(page->index > end)) 1262 continue; 1263 1264 if (ino && ino_of_node(page) == ino) { 1265 f2fs_wait_on_page_writeback(page, NODE); 1266 if (TestClearPageError(page)) 1267 ret = -EIO; 1268 } 1269 } 1270 pagevec_release(&pvec); 1271 cond_resched(); 1272 } 1273 1274 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1275 ret2 = -ENOSPC; 1276 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1277 ret2 = -EIO; 1278 if (!ret) 1279 ret = ret2; 1280 return ret; 1281 } 1282 1283 static int f2fs_write_node_page(struct page *page, 1284 struct writeback_control *wbc) 1285 { 1286 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1287 nid_t nid; 1288 block_t new_addr; 1289 struct node_info ni; 1290 struct f2fs_io_info fio = { 1291 .type = NODE, 1292 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1293 }; 1294 1295 trace_f2fs_writepage(page, NODE); 1296 1297 if (unlikely(sbi->por_doing)) 1298 goto redirty_out; 1299 if (unlikely(f2fs_cp_error(sbi))) 1300 goto redirty_out; 1301 1302 f2fs_wait_on_page_writeback(page, NODE); 1303 1304 /* get old block addr of this node page */ 1305 nid = nid_of_node(page); 1306 f2fs_bug_on(sbi, page->index != nid); 1307 1308 get_node_info(sbi, nid, &ni); 1309 1310 /* This page is already truncated */ 1311 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1312 dec_page_count(sbi, F2FS_DIRTY_NODES); 1313 unlock_page(page); 1314 return 0; 1315 } 1316 1317 if (wbc->for_reclaim) { 1318 if (!down_read_trylock(&sbi->node_write)) 1319 goto redirty_out; 1320 } else { 1321 down_read(&sbi->node_write); 1322 } 1323 set_page_writeback(page); 1324 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1325 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1326 dec_page_count(sbi, F2FS_DIRTY_NODES); 1327 up_read(&sbi->node_write); 1328 unlock_page(page); 1329 1330 if (wbc->for_reclaim) 1331 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1332 1333 return 0; 1334 1335 redirty_out: 1336 redirty_page_for_writepage(wbc, page); 1337 return AOP_WRITEPAGE_ACTIVATE; 1338 } 1339 1340 static int f2fs_write_node_pages(struct address_space *mapping, 1341 struct writeback_control *wbc) 1342 { 1343 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1344 long diff; 1345 1346 trace_f2fs_writepages(mapping->host, wbc, NODE); 1347 1348 /* balancing f2fs's metadata in background */ 1349 f2fs_balance_fs_bg(sbi); 1350 1351 /* collect a number of dirty node pages and write together */ 1352 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1353 goto skip_write; 1354 1355 diff = nr_pages_to_write(sbi, NODE, wbc); 1356 wbc->sync_mode = WB_SYNC_NONE; 1357 sync_node_pages(sbi, 0, wbc); 1358 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1359 return 0; 1360 1361 skip_write: 1362 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1363 return 0; 1364 } 1365 1366 static int f2fs_set_node_page_dirty(struct page *page) 1367 { 1368 trace_f2fs_set_page_dirty(page, NODE); 1369 1370 SetPageUptodate(page); 1371 if (!PageDirty(page)) { 1372 __set_page_dirty_nobuffers(page); 1373 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1374 SetPagePrivate(page); 1375 return 1; 1376 } 1377 return 0; 1378 } 1379 1380 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1381 unsigned int length) 1382 { 1383 struct inode *inode = page->mapping->host; 1384 if (PageDirty(page)) 1385 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_NODES); 1386 ClearPagePrivate(page); 1387 } 1388 1389 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1390 { 1391 ClearPagePrivate(page); 1392 return 1; 1393 } 1394 1395 /* 1396 * Structure of the f2fs node operations 1397 */ 1398 const struct address_space_operations f2fs_node_aops = { 1399 .writepage = f2fs_write_node_page, 1400 .writepages = f2fs_write_node_pages, 1401 .set_page_dirty = f2fs_set_node_page_dirty, 1402 .invalidatepage = f2fs_invalidate_node_page, 1403 .releasepage = f2fs_release_node_page, 1404 }; 1405 1406 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1407 nid_t n) 1408 { 1409 return radix_tree_lookup(&nm_i->free_nid_root, n); 1410 } 1411 1412 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1413 struct free_nid *i) 1414 { 1415 list_del(&i->list); 1416 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1417 } 1418 1419 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1420 { 1421 struct f2fs_nm_info *nm_i = NM_I(sbi); 1422 struct free_nid *i; 1423 struct nat_entry *ne; 1424 bool allocated = false; 1425 1426 if (!available_free_memory(sbi, FREE_NIDS)) 1427 return -1; 1428 1429 /* 0 nid should not be used */ 1430 if (unlikely(nid == 0)) 1431 return 0; 1432 1433 if (build) { 1434 /* do not add allocated nids */ 1435 down_read(&nm_i->nat_tree_lock); 1436 ne = __lookup_nat_cache(nm_i, nid); 1437 if (ne && 1438 (!get_nat_flag(ne, IS_CHECKPOINTED) || 1439 nat_get_blkaddr(ne) != NULL_ADDR)) 1440 allocated = true; 1441 up_read(&nm_i->nat_tree_lock); 1442 if (allocated) 1443 return 0; 1444 } 1445 1446 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1447 i->nid = nid; 1448 i->state = NID_NEW; 1449 1450 if (radix_tree_preload(GFP_NOFS)) { 1451 kmem_cache_free(free_nid_slab, i); 1452 return 0; 1453 } 1454 1455 spin_lock(&nm_i->free_nid_list_lock); 1456 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1457 spin_unlock(&nm_i->free_nid_list_lock); 1458 radix_tree_preload_end(); 1459 kmem_cache_free(free_nid_slab, i); 1460 return 0; 1461 } 1462 list_add_tail(&i->list, &nm_i->free_nid_list); 1463 nm_i->fcnt++; 1464 spin_unlock(&nm_i->free_nid_list_lock); 1465 radix_tree_preload_end(); 1466 return 1; 1467 } 1468 1469 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1470 { 1471 struct free_nid *i; 1472 bool need_free = false; 1473 1474 spin_lock(&nm_i->free_nid_list_lock); 1475 i = __lookup_free_nid_list(nm_i, nid); 1476 if (i && i->state == NID_NEW) { 1477 __del_from_free_nid_list(nm_i, i); 1478 nm_i->fcnt--; 1479 need_free = true; 1480 } 1481 spin_unlock(&nm_i->free_nid_list_lock); 1482 1483 if (need_free) 1484 kmem_cache_free(free_nid_slab, i); 1485 } 1486 1487 static void scan_nat_page(struct f2fs_sb_info *sbi, 1488 struct page *nat_page, nid_t start_nid) 1489 { 1490 struct f2fs_nm_info *nm_i = NM_I(sbi); 1491 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1492 block_t blk_addr; 1493 int i; 1494 1495 i = start_nid % NAT_ENTRY_PER_BLOCK; 1496 1497 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1498 1499 if (unlikely(start_nid >= nm_i->max_nid)) 1500 break; 1501 1502 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1503 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1504 if (blk_addr == NULL_ADDR) { 1505 if (add_free_nid(sbi, start_nid, true) < 0) 1506 break; 1507 } 1508 } 1509 } 1510 1511 static void build_free_nids(struct f2fs_sb_info *sbi) 1512 { 1513 struct f2fs_nm_info *nm_i = NM_I(sbi); 1514 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1515 struct f2fs_summary_block *sum = curseg->sum_blk; 1516 int i = 0; 1517 nid_t nid = nm_i->next_scan_nid; 1518 1519 /* Enough entries */ 1520 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1521 return; 1522 1523 /* readahead nat pages to be scanned */ 1524 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); 1525 1526 while (1) { 1527 struct page *page = get_current_nat_page(sbi, nid); 1528 1529 scan_nat_page(sbi, page, nid); 1530 f2fs_put_page(page, 1); 1531 1532 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1533 if (unlikely(nid >= nm_i->max_nid)) 1534 nid = 0; 1535 1536 if (i++ == FREE_NID_PAGES) 1537 break; 1538 } 1539 1540 /* go to the next free nat pages to find free nids abundantly */ 1541 nm_i->next_scan_nid = nid; 1542 1543 /* find free nids from current sum_pages */ 1544 mutex_lock(&curseg->curseg_mutex); 1545 for (i = 0; i < nats_in_cursum(sum); i++) { 1546 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1547 nid = le32_to_cpu(nid_in_journal(sum, i)); 1548 if (addr == NULL_ADDR) 1549 add_free_nid(sbi, nid, true); 1550 else 1551 remove_free_nid(nm_i, nid); 1552 } 1553 mutex_unlock(&curseg->curseg_mutex); 1554 } 1555 1556 /* 1557 * If this function returns success, caller can obtain a new nid 1558 * from second parameter of this function. 1559 * The returned nid could be used ino as well as nid when inode is created. 1560 */ 1561 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1562 { 1563 struct f2fs_nm_info *nm_i = NM_I(sbi); 1564 struct free_nid *i = NULL; 1565 retry: 1566 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1567 return false; 1568 1569 spin_lock(&nm_i->free_nid_list_lock); 1570 1571 /* We should not use stale free nids created by build_free_nids */ 1572 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1573 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 1574 list_for_each_entry(i, &nm_i->free_nid_list, list) 1575 if (i->state == NID_NEW) 1576 break; 1577 1578 f2fs_bug_on(sbi, i->state != NID_NEW); 1579 *nid = i->nid; 1580 i->state = NID_ALLOC; 1581 nm_i->fcnt--; 1582 spin_unlock(&nm_i->free_nid_list_lock); 1583 return true; 1584 } 1585 spin_unlock(&nm_i->free_nid_list_lock); 1586 1587 /* Let's scan nat pages and its caches to get free nids */ 1588 mutex_lock(&nm_i->build_lock); 1589 build_free_nids(sbi); 1590 mutex_unlock(&nm_i->build_lock); 1591 goto retry; 1592 } 1593 1594 /* 1595 * alloc_nid() should be called prior to this function. 1596 */ 1597 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1598 { 1599 struct f2fs_nm_info *nm_i = NM_I(sbi); 1600 struct free_nid *i; 1601 1602 spin_lock(&nm_i->free_nid_list_lock); 1603 i = __lookup_free_nid_list(nm_i, nid); 1604 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1605 __del_from_free_nid_list(nm_i, i); 1606 spin_unlock(&nm_i->free_nid_list_lock); 1607 1608 kmem_cache_free(free_nid_slab, i); 1609 } 1610 1611 /* 1612 * alloc_nid() should be called prior to this function. 1613 */ 1614 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1615 { 1616 struct f2fs_nm_info *nm_i = NM_I(sbi); 1617 struct free_nid *i; 1618 bool need_free = false; 1619 1620 if (!nid) 1621 return; 1622 1623 spin_lock(&nm_i->free_nid_list_lock); 1624 i = __lookup_free_nid_list(nm_i, nid); 1625 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1626 if (!available_free_memory(sbi, FREE_NIDS)) { 1627 __del_from_free_nid_list(nm_i, i); 1628 need_free = true; 1629 } else { 1630 i->state = NID_NEW; 1631 nm_i->fcnt++; 1632 } 1633 spin_unlock(&nm_i->free_nid_list_lock); 1634 1635 if (need_free) 1636 kmem_cache_free(free_nid_slab, i); 1637 } 1638 1639 void recover_inline_xattr(struct inode *inode, struct page *page) 1640 { 1641 void *src_addr, *dst_addr; 1642 size_t inline_size; 1643 struct page *ipage; 1644 struct f2fs_inode *ri; 1645 1646 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 1647 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 1648 1649 ri = F2FS_INODE(page); 1650 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1651 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); 1652 goto update_inode; 1653 } 1654 1655 dst_addr = inline_xattr_addr(ipage); 1656 src_addr = inline_xattr_addr(page); 1657 inline_size = inline_xattr_size(inode); 1658 1659 f2fs_wait_on_page_writeback(ipage, NODE); 1660 memcpy(dst_addr, src_addr, inline_size); 1661 update_inode: 1662 update_inode(inode, ipage); 1663 f2fs_put_page(ipage, 1); 1664 } 1665 1666 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1667 { 1668 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1669 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1670 nid_t new_xnid = nid_of_node(page); 1671 struct node_info ni; 1672 1673 /* 1: invalidate the previous xattr nid */ 1674 if (!prev_xnid) 1675 goto recover_xnid; 1676 1677 /* Deallocate node address */ 1678 get_node_info(sbi, prev_xnid, &ni); 1679 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 1680 invalidate_blocks(sbi, ni.blk_addr); 1681 dec_valid_node_count(sbi, inode); 1682 set_node_addr(sbi, &ni, NULL_ADDR, false); 1683 1684 recover_xnid: 1685 /* 2: allocate new xattr nid */ 1686 if (unlikely(!inc_valid_node_count(sbi, inode))) 1687 f2fs_bug_on(sbi, 1); 1688 1689 remove_free_nid(NM_I(sbi), new_xnid); 1690 get_node_info(sbi, new_xnid, &ni); 1691 ni.ino = inode->i_ino; 1692 set_node_addr(sbi, &ni, NEW_ADDR, false); 1693 F2FS_I(inode)->i_xattr_nid = new_xnid; 1694 1695 /* 3: update xattr blkaddr */ 1696 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1697 set_node_addr(sbi, &ni, blkaddr, false); 1698 1699 update_inode_page(inode); 1700 } 1701 1702 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1703 { 1704 struct f2fs_inode *src, *dst; 1705 nid_t ino = ino_of_node(page); 1706 struct node_info old_ni, new_ni; 1707 struct page *ipage; 1708 1709 get_node_info(sbi, ino, &old_ni); 1710 1711 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1712 return -EINVAL; 1713 1714 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1715 if (!ipage) 1716 return -ENOMEM; 1717 1718 /* Should not use this inode from free nid list */ 1719 remove_free_nid(NM_I(sbi), ino); 1720 1721 SetPageUptodate(ipage); 1722 fill_node_footer(ipage, ino, ino, 0, true); 1723 1724 src = F2FS_INODE(page); 1725 dst = F2FS_INODE(ipage); 1726 1727 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1728 dst->i_size = 0; 1729 dst->i_blocks = cpu_to_le64(1); 1730 dst->i_links = cpu_to_le32(1); 1731 dst->i_xattr_nid = 0; 1732 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 1733 1734 new_ni = old_ni; 1735 new_ni.ino = ino; 1736 1737 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1738 WARN_ON(1); 1739 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1740 inc_valid_inode_count(sbi); 1741 set_page_dirty(ipage); 1742 f2fs_put_page(ipage, 1); 1743 return 0; 1744 } 1745 1746 /* 1747 * ra_sum_pages() merge contiguous pages into one bio and submit. 1748 * these pre-read pages are allocated in bd_inode's mapping tree. 1749 */ 1750 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, 1751 int start, int nrpages) 1752 { 1753 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1754 struct address_space *mapping = inode->i_mapping; 1755 int i, page_idx = start; 1756 struct f2fs_io_info fio = { 1757 .type = META, 1758 .rw = READ_SYNC | REQ_META | REQ_PRIO 1759 }; 1760 1761 for (i = 0; page_idx < start + nrpages; page_idx++, i++) { 1762 /* alloc page in bd_inode for reading node summary info */ 1763 pages[i] = grab_cache_page(mapping, page_idx); 1764 if (!pages[i]) 1765 break; 1766 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); 1767 } 1768 1769 f2fs_submit_merged_bio(sbi, META, READ); 1770 return i; 1771 } 1772 1773 int restore_node_summary(struct f2fs_sb_info *sbi, 1774 unsigned int segno, struct f2fs_summary_block *sum) 1775 { 1776 struct f2fs_node *rn; 1777 struct f2fs_summary *sum_entry; 1778 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1779 block_t addr; 1780 int bio_blocks = MAX_BIO_BLOCKS(sbi); 1781 struct page *pages[bio_blocks]; 1782 int i, idx, last_offset, nrpages, err = 0; 1783 1784 /* scan the node segment */ 1785 last_offset = sbi->blocks_per_seg; 1786 addr = START_BLOCK(sbi, segno); 1787 sum_entry = &sum->entries[0]; 1788 1789 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { 1790 nrpages = min(last_offset - i, bio_blocks); 1791 1792 /* readahead node pages */ 1793 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1794 if (!nrpages) 1795 return -ENOMEM; 1796 1797 for (idx = 0; idx < nrpages; idx++) { 1798 if (err) 1799 goto skip; 1800 1801 lock_page(pages[idx]); 1802 if (unlikely(!PageUptodate(pages[idx]))) { 1803 err = -EIO; 1804 } else { 1805 rn = F2FS_NODE(pages[idx]); 1806 sum_entry->nid = rn->footer.nid; 1807 sum_entry->version = 0; 1808 sum_entry->ofs_in_node = 0; 1809 sum_entry++; 1810 } 1811 unlock_page(pages[idx]); 1812 skip: 1813 page_cache_release(pages[idx]); 1814 } 1815 1816 invalidate_mapping_pages(inode->i_mapping, addr, 1817 addr + nrpages); 1818 } 1819 return err; 1820 } 1821 1822 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1823 { 1824 struct f2fs_nm_info *nm_i = NM_I(sbi); 1825 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1826 struct f2fs_summary_block *sum = curseg->sum_blk; 1827 int i; 1828 1829 mutex_lock(&curseg->curseg_mutex); 1830 for (i = 0; i < nats_in_cursum(sum); i++) { 1831 struct nat_entry *ne; 1832 struct f2fs_nat_entry raw_ne; 1833 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1834 1835 raw_ne = nat_in_journal(sum, i); 1836 retry: 1837 down_write(&nm_i->nat_tree_lock); 1838 ne = __lookup_nat_cache(nm_i, nid); 1839 if (ne) 1840 goto found; 1841 1842 ne = grab_nat_entry(nm_i, nid); 1843 if (!ne) { 1844 up_write(&nm_i->nat_tree_lock); 1845 goto retry; 1846 } 1847 node_info_from_raw_nat(&ne->ni, &raw_ne); 1848 found: 1849 __set_nat_cache_dirty(nm_i, ne); 1850 up_write(&nm_i->nat_tree_lock); 1851 } 1852 update_nats_in_cursum(sum, -i); 1853 mutex_unlock(&curseg->curseg_mutex); 1854 } 1855 1856 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 1857 struct list_head *head, int max) 1858 { 1859 struct nat_entry_set *cur; 1860 1861 if (nes->entry_cnt >= max) 1862 goto add_out; 1863 1864 list_for_each_entry(cur, head, set_list) { 1865 if (cur->entry_cnt >= nes->entry_cnt) { 1866 list_add(&nes->set_list, cur->set_list.prev); 1867 return; 1868 } 1869 } 1870 add_out: 1871 list_add_tail(&nes->set_list, head); 1872 } 1873 1874 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 1875 struct nat_entry_set *set) 1876 { 1877 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1878 struct f2fs_summary_block *sum = curseg->sum_blk; 1879 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 1880 bool to_journal = true; 1881 struct f2fs_nat_block *nat_blk; 1882 struct nat_entry *ne, *cur; 1883 struct page *page = NULL; 1884 1885 /* 1886 * there are two steps to flush nat entries: 1887 * #1, flush nat entries to journal in current hot data summary block. 1888 * #2, flush nat entries to nat page. 1889 */ 1890 if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL)) 1891 to_journal = false; 1892 1893 if (to_journal) { 1894 mutex_lock(&curseg->curseg_mutex); 1895 } else { 1896 page = get_next_nat_page(sbi, start_nid); 1897 nat_blk = page_address(page); 1898 f2fs_bug_on(sbi, !nat_blk); 1899 } 1900 1901 /* flush dirty nats in nat entry set */ 1902 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 1903 struct f2fs_nat_entry *raw_ne; 1904 nid_t nid = nat_get_nid(ne); 1905 int offset; 1906 1907 if (nat_get_blkaddr(ne) == NEW_ADDR) 1908 continue; 1909 1910 if (to_journal) { 1911 offset = lookup_journal_in_cursum(sum, 1912 NAT_JOURNAL, nid, 1); 1913 f2fs_bug_on(sbi, offset < 0); 1914 raw_ne = &nat_in_journal(sum, offset); 1915 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1916 } else { 1917 raw_ne = &nat_blk->entries[nid - start_nid]; 1918 } 1919 raw_nat_from_node_info(raw_ne, &ne->ni); 1920 1921 down_write(&NM_I(sbi)->nat_tree_lock); 1922 nat_reset_flag(ne); 1923 __clear_nat_cache_dirty(NM_I(sbi), ne); 1924 up_write(&NM_I(sbi)->nat_tree_lock); 1925 1926 if (nat_get_blkaddr(ne) == NULL_ADDR) 1927 add_free_nid(sbi, nid, false); 1928 } 1929 1930 if (to_journal) 1931 mutex_unlock(&curseg->curseg_mutex); 1932 else 1933 f2fs_put_page(page, 1); 1934 1935 f2fs_bug_on(sbi, set->entry_cnt); 1936 1937 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 1938 kmem_cache_free(nat_entry_set_slab, set); 1939 } 1940 1941 /* 1942 * This function is called during the checkpointing process. 1943 */ 1944 void flush_nat_entries(struct f2fs_sb_info *sbi) 1945 { 1946 struct f2fs_nm_info *nm_i = NM_I(sbi); 1947 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1948 struct f2fs_summary_block *sum = curseg->sum_blk; 1949 struct nat_entry_set *setvec[NATVEC_SIZE]; 1950 struct nat_entry_set *set, *tmp; 1951 unsigned int found; 1952 nid_t set_idx = 0; 1953 LIST_HEAD(sets); 1954 1955 if (!nm_i->dirty_nat_cnt) 1956 return; 1957 /* 1958 * if there are no enough space in journal to store dirty nat 1959 * entries, remove all entries from journal and merge them 1960 * into nat entry set. 1961 */ 1962 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1963 remove_nats_in_journal(sbi); 1964 1965 while ((found = __gang_lookup_nat_set(nm_i, 1966 set_idx, NATVEC_SIZE, setvec))) { 1967 unsigned idx; 1968 set_idx = setvec[found - 1]->set + 1; 1969 for (idx = 0; idx < found; idx++) 1970 __adjust_nat_entry_set(setvec[idx], &sets, 1971 MAX_NAT_JENTRIES(sum)); 1972 } 1973 1974 /* flush dirty nats in nat entry set */ 1975 list_for_each_entry_safe(set, tmp, &sets, set_list) 1976 __flush_nat_entry_set(sbi, set); 1977 1978 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 1979 } 1980 1981 static int init_node_manager(struct f2fs_sb_info *sbi) 1982 { 1983 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1984 struct f2fs_nm_info *nm_i = NM_I(sbi); 1985 unsigned char *version_bitmap; 1986 unsigned int nat_segs, nat_blocks; 1987 1988 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1989 1990 /* segment_count_nat includes pair segment so divide to 2. */ 1991 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1992 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1993 1994 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1995 1996 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1997 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 1998 nm_i->fcnt = 0; 1999 nm_i->nat_cnt = 0; 2000 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 2001 2002 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 2003 INIT_LIST_HEAD(&nm_i->free_nid_list); 2004 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 2005 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 2006 INIT_LIST_HEAD(&nm_i->nat_entries); 2007 2008 mutex_init(&nm_i->build_lock); 2009 spin_lock_init(&nm_i->free_nid_list_lock); 2010 init_rwsem(&nm_i->nat_tree_lock); 2011 2012 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 2013 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 2014 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 2015 if (!version_bitmap) 2016 return -EFAULT; 2017 2018 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 2019 GFP_KERNEL); 2020 if (!nm_i->nat_bitmap) 2021 return -ENOMEM; 2022 return 0; 2023 } 2024 2025 int build_node_manager(struct f2fs_sb_info *sbi) 2026 { 2027 int err; 2028 2029 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 2030 if (!sbi->nm_info) 2031 return -ENOMEM; 2032 2033 err = init_node_manager(sbi); 2034 if (err) 2035 return err; 2036 2037 build_free_nids(sbi); 2038 return 0; 2039 } 2040 2041 void destroy_node_manager(struct f2fs_sb_info *sbi) 2042 { 2043 struct f2fs_nm_info *nm_i = NM_I(sbi); 2044 struct free_nid *i, *next_i; 2045 struct nat_entry *natvec[NATVEC_SIZE]; 2046 nid_t nid = 0; 2047 unsigned int found; 2048 2049 if (!nm_i) 2050 return; 2051 2052 /* destroy free nid list */ 2053 spin_lock(&nm_i->free_nid_list_lock); 2054 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2055 f2fs_bug_on(sbi, i->state == NID_ALLOC); 2056 __del_from_free_nid_list(nm_i, i); 2057 nm_i->fcnt--; 2058 spin_unlock(&nm_i->free_nid_list_lock); 2059 kmem_cache_free(free_nid_slab, i); 2060 spin_lock(&nm_i->free_nid_list_lock); 2061 } 2062 f2fs_bug_on(sbi, nm_i->fcnt); 2063 spin_unlock(&nm_i->free_nid_list_lock); 2064 2065 /* destroy nat cache */ 2066 down_write(&nm_i->nat_tree_lock); 2067 while ((found = __gang_lookup_nat_cache(nm_i, 2068 nid, NATVEC_SIZE, natvec))) { 2069 unsigned idx; 2070 nid = nat_get_nid(natvec[found - 1]) + 1; 2071 for (idx = 0; idx < found; idx++) 2072 __del_from_nat_cache(nm_i, natvec[idx]); 2073 } 2074 f2fs_bug_on(sbi, nm_i->nat_cnt); 2075 up_write(&nm_i->nat_tree_lock); 2076 2077 kfree(nm_i->nat_bitmap); 2078 sbi->nm_info = NULL; 2079 kfree(nm_i); 2080 } 2081 2082 int __init create_node_manager_caches(void) 2083 { 2084 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2085 sizeof(struct nat_entry)); 2086 if (!nat_entry_slab) 2087 goto fail; 2088 2089 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2090 sizeof(struct free_nid)); 2091 if (!free_nid_slab) 2092 goto destroy_nat_entry; 2093 2094 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2095 sizeof(struct nat_entry_set)); 2096 if (!nat_entry_set_slab) 2097 goto destroy_free_nid; 2098 return 0; 2099 2100 destroy_free_nid: 2101 kmem_cache_destroy(free_nid_slab); 2102 destroy_nat_entry: 2103 kmem_cache_destroy(nat_entry_slab); 2104 fail: 2105 return -ENOMEM; 2106 } 2107 2108 void destroy_node_manager_caches(void) 2109 { 2110 kmem_cache_destroy(nat_entry_set_slab); 2111 kmem_cache_destroy(free_nid_slab); 2112 kmem_cache_destroy(nat_entry_slab); 2113 } 2114