1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include <trace/events/f2fs.h> 23 24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25 26 static struct kmem_cache *nat_entry_slab; 27 static struct kmem_cache *free_nid_slab; 28 static struct kmem_cache *nat_entry_set_slab; 29 30 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 31 { 32 struct f2fs_nm_info *nm_i = NM_I(sbi); 33 struct sysinfo val; 34 unsigned long mem_size = 0; 35 bool res = false; 36 37 si_meminfo(&val); 38 /* give 25%, 25%, 50% memory for each components respectively */ 39 if (type == FREE_NIDS) { 40 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12; 41 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 42 } else if (type == NAT_ENTRIES) { 43 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 44 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 45 } else if (type == DIRTY_DENTS) { 46 if (sbi->sb->s_bdi->dirty_exceeded) 47 return false; 48 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 49 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 50 } 51 return res; 52 } 53 54 static void clear_node_page_dirty(struct page *page) 55 { 56 struct address_space *mapping = page->mapping; 57 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 58 unsigned int long flags; 59 60 if (PageDirty(page)) { 61 spin_lock_irqsave(&mapping->tree_lock, flags); 62 radix_tree_tag_clear(&mapping->page_tree, 63 page_index(page), 64 PAGECACHE_TAG_DIRTY); 65 spin_unlock_irqrestore(&mapping->tree_lock, flags); 66 67 clear_page_dirty_for_io(page); 68 dec_page_count(sbi, F2FS_DIRTY_NODES); 69 } 70 ClearPageUptodate(page); 71 } 72 73 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 74 { 75 pgoff_t index = current_nat_addr(sbi, nid); 76 return get_meta_page(sbi, index); 77 } 78 79 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 80 { 81 struct page *src_page; 82 struct page *dst_page; 83 pgoff_t src_off; 84 pgoff_t dst_off; 85 void *src_addr; 86 void *dst_addr; 87 struct f2fs_nm_info *nm_i = NM_I(sbi); 88 89 src_off = current_nat_addr(sbi, nid); 90 dst_off = next_nat_addr(sbi, src_off); 91 92 /* get current nat block page with lock */ 93 src_page = get_meta_page(sbi, src_off); 94 dst_page = grab_meta_page(sbi, dst_off); 95 f2fs_bug_on(PageDirty(src_page)); 96 97 src_addr = page_address(src_page); 98 dst_addr = page_address(dst_page); 99 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 100 set_page_dirty(dst_page); 101 f2fs_put_page(src_page, 1); 102 103 set_to_next_nat(nm_i, nid); 104 105 return dst_page; 106 } 107 108 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 109 { 110 return radix_tree_lookup(&nm_i->nat_root, n); 111 } 112 113 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 114 nid_t start, unsigned int nr, struct nat_entry **ep) 115 { 116 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 117 } 118 119 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 120 { 121 list_del(&e->list); 122 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 123 nm_i->nat_cnt--; 124 kmem_cache_free(nat_entry_slab, e); 125 } 126 127 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 128 { 129 struct f2fs_nm_info *nm_i = NM_I(sbi); 130 struct nat_entry *e; 131 int is_cp = 1; 132 133 read_lock(&nm_i->nat_tree_lock); 134 e = __lookup_nat_cache(nm_i, nid); 135 if (e && !e->checkpointed) 136 is_cp = 0; 137 read_unlock(&nm_i->nat_tree_lock); 138 return is_cp; 139 } 140 141 bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid) 142 { 143 struct f2fs_nm_info *nm_i = NM_I(sbi); 144 struct nat_entry *e; 145 bool fsync_done = false; 146 147 read_lock(&nm_i->nat_tree_lock); 148 e = __lookup_nat_cache(nm_i, nid); 149 if (e) 150 fsync_done = e->fsync_done; 151 read_unlock(&nm_i->nat_tree_lock); 152 return fsync_done; 153 } 154 155 void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid) 156 { 157 struct f2fs_nm_info *nm_i = NM_I(sbi); 158 struct nat_entry *e; 159 160 write_lock(&nm_i->nat_tree_lock); 161 e = __lookup_nat_cache(nm_i, nid); 162 if (e) 163 e->fsync_done = false; 164 write_unlock(&nm_i->nat_tree_lock); 165 } 166 167 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 168 { 169 struct nat_entry *new; 170 171 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 172 if (!new) 173 return NULL; 174 if (radix_tree_insert(&nm_i->nat_root, nid, new)) { 175 kmem_cache_free(nat_entry_slab, new); 176 return NULL; 177 } 178 memset(new, 0, sizeof(struct nat_entry)); 179 nat_set_nid(new, nid); 180 new->checkpointed = true; 181 list_add_tail(&new->list, &nm_i->nat_entries); 182 nm_i->nat_cnt++; 183 return new; 184 } 185 186 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 187 struct f2fs_nat_entry *ne) 188 { 189 struct nat_entry *e; 190 retry: 191 write_lock(&nm_i->nat_tree_lock); 192 e = __lookup_nat_cache(nm_i, nid); 193 if (!e) { 194 e = grab_nat_entry(nm_i, nid); 195 if (!e) { 196 write_unlock(&nm_i->nat_tree_lock); 197 goto retry; 198 } 199 node_info_from_raw_nat(&e->ni, ne); 200 } 201 write_unlock(&nm_i->nat_tree_lock); 202 } 203 204 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 205 block_t new_blkaddr, bool fsync_done) 206 { 207 struct f2fs_nm_info *nm_i = NM_I(sbi); 208 struct nat_entry *e; 209 retry: 210 write_lock(&nm_i->nat_tree_lock); 211 e = __lookup_nat_cache(nm_i, ni->nid); 212 if (!e) { 213 e = grab_nat_entry(nm_i, ni->nid); 214 if (!e) { 215 write_unlock(&nm_i->nat_tree_lock); 216 goto retry; 217 } 218 e->ni = *ni; 219 f2fs_bug_on(ni->blk_addr == NEW_ADDR); 220 } else if (new_blkaddr == NEW_ADDR) { 221 /* 222 * when nid is reallocated, 223 * previous nat entry can be remained in nat cache. 224 * So, reinitialize it with new information. 225 */ 226 e->ni = *ni; 227 f2fs_bug_on(ni->blk_addr != NULL_ADDR); 228 } 229 230 /* sanity check */ 231 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr); 232 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR && 233 new_blkaddr == NULL_ADDR); 234 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR && 235 new_blkaddr == NEW_ADDR); 236 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR && 237 nat_get_blkaddr(e) != NULL_ADDR && 238 new_blkaddr == NEW_ADDR); 239 240 /* increment version no as node is removed */ 241 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 242 unsigned char version = nat_get_version(e); 243 nat_set_version(e, inc_node_version(version)); 244 } 245 246 /* change address */ 247 nat_set_blkaddr(e, new_blkaddr); 248 __set_nat_cache_dirty(nm_i, e); 249 250 /* update fsync_mark if its inode nat entry is still alive */ 251 e = __lookup_nat_cache(nm_i, ni->ino); 252 if (e) 253 e->fsync_done = fsync_done; 254 write_unlock(&nm_i->nat_tree_lock); 255 } 256 257 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 258 { 259 struct f2fs_nm_info *nm_i = NM_I(sbi); 260 261 if (available_free_memory(sbi, NAT_ENTRIES)) 262 return 0; 263 264 write_lock(&nm_i->nat_tree_lock); 265 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 266 struct nat_entry *ne; 267 ne = list_first_entry(&nm_i->nat_entries, 268 struct nat_entry, list); 269 __del_from_nat_cache(nm_i, ne); 270 nr_shrink--; 271 } 272 write_unlock(&nm_i->nat_tree_lock); 273 return nr_shrink; 274 } 275 276 /* 277 * This function always returns success 278 */ 279 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 280 { 281 struct f2fs_nm_info *nm_i = NM_I(sbi); 282 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 283 struct f2fs_summary_block *sum = curseg->sum_blk; 284 nid_t start_nid = START_NID(nid); 285 struct f2fs_nat_block *nat_blk; 286 struct page *page = NULL; 287 struct f2fs_nat_entry ne; 288 struct nat_entry *e; 289 int i; 290 291 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 292 ni->nid = nid; 293 294 /* Check nat cache */ 295 read_lock(&nm_i->nat_tree_lock); 296 e = __lookup_nat_cache(nm_i, nid); 297 if (e) { 298 ni->ino = nat_get_ino(e); 299 ni->blk_addr = nat_get_blkaddr(e); 300 ni->version = nat_get_version(e); 301 } 302 read_unlock(&nm_i->nat_tree_lock); 303 if (e) 304 return; 305 306 /* Check current segment summary */ 307 mutex_lock(&curseg->curseg_mutex); 308 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 309 if (i >= 0) { 310 ne = nat_in_journal(sum, i); 311 node_info_from_raw_nat(ni, &ne); 312 } 313 mutex_unlock(&curseg->curseg_mutex); 314 if (i >= 0) 315 goto cache; 316 317 /* Fill node_info from nat page */ 318 page = get_current_nat_page(sbi, start_nid); 319 nat_blk = (struct f2fs_nat_block *)page_address(page); 320 ne = nat_blk->entries[nid - start_nid]; 321 node_info_from_raw_nat(ni, &ne); 322 f2fs_put_page(page, 1); 323 cache: 324 /* cache nat entry */ 325 cache_nat_entry(NM_I(sbi), nid, &ne); 326 } 327 328 /* 329 * The maximum depth is four. 330 * Offset[0] will have raw inode offset. 331 */ 332 static int get_node_path(struct f2fs_inode_info *fi, long block, 333 int offset[4], unsigned int noffset[4]) 334 { 335 const long direct_index = ADDRS_PER_INODE(fi); 336 const long direct_blks = ADDRS_PER_BLOCK; 337 const long dptrs_per_blk = NIDS_PER_BLOCK; 338 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 339 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 340 int n = 0; 341 int level = 0; 342 343 noffset[0] = 0; 344 345 if (block < direct_index) { 346 offset[n] = block; 347 goto got; 348 } 349 block -= direct_index; 350 if (block < direct_blks) { 351 offset[n++] = NODE_DIR1_BLOCK; 352 noffset[n] = 1; 353 offset[n] = block; 354 level = 1; 355 goto got; 356 } 357 block -= direct_blks; 358 if (block < direct_blks) { 359 offset[n++] = NODE_DIR2_BLOCK; 360 noffset[n] = 2; 361 offset[n] = block; 362 level = 1; 363 goto got; 364 } 365 block -= direct_blks; 366 if (block < indirect_blks) { 367 offset[n++] = NODE_IND1_BLOCK; 368 noffset[n] = 3; 369 offset[n++] = block / direct_blks; 370 noffset[n] = 4 + offset[n - 1]; 371 offset[n] = block % direct_blks; 372 level = 2; 373 goto got; 374 } 375 block -= indirect_blks; 376 if (block < indirect_blks) { 377 offset[n++] = NODE_IND2_BLOCK; 378 noffset[n] = 4 + dptrs_per_blk; 379 offset[n++] = block / direct_blks; 380 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 381 offset[n] = block % direct_blks; 382 level = 2; 383 goto got; 384 } 385 block -= indirect_blks; 386 if (block < dindirect_blks) { 387 offset[n++] = NODE_DIND_BLOCK; 388 noffset[n] = 5 + (dptrs_per_blk * 2); 389 offset[n++] = block / indirect_blks; 390 noffset[n] = 6 + (dptrs_per_blk * 2) + 391 offset[n - 1] * (dptrs_per_blk + 1); 392 offset[n++] = (block / direct_blks) % dptrs_per_blk; 393 noffset[n] = 7 + (dptrs_per_blk * 2) + 394 offset[n - 2] * (dptrs_per_blk + 1) + 395 offset[n - 1]; 396 offset[n] = block % direct_blks; 397 level = 3; 398 goto got; 399 } else { 400 BUG(); 401 } 402 got: 403 return level; 404 } 405 406 /* 407 * Caller should call f2fs_put_dnode(dn). 408 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 409 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 410 * In the case of RDONLY_NODE, we don't need to care about mutex. 411 */ 412 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 413 { 414 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 415 struct page *npage[4]; 416 struct page *parent; 417 int offset[4]; 418 unsigned int noffset[4]; 419 nid_t nids[4]; 420 int level, i; 421 int err = 0; 422 423 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 424 425 nids[0] = dn->inode->i_ino; 426 npage[0] = dn->inode_page; 427 428 if (!npage[0]) { 429 npage[0] = get_node_page(sbi, nids[0]); 430 if (IS_ERR(npage[0])) 431 return PTR_ERR(npage[0]); 432 } 433 parent = npage[0]; 434 if (level != 0) 435 nids[1] = get_nid(parent, offset[0], true); 436 dn->inode_page = npage[0]; 437 dn->inode_page_locked = true; 438 439 /* get indirect or direct nodes */ 440 for (i = 1; i <= level; i++) { 441 bool done = false; 442 443 if (!nids[i] && mode == ALLOC_NODE) { 444 /* alloc new node */ 445 if (!alloc_nid(sbi, &(nids[i]))) { 446 err = -ENOSPC; 447 goto release_pages; 448 } 449 450 dn->nid = nids[i]; 451 npage[i] = new_node_page(dn, noffset[i], NULL); 452 if (IS_ERR(npage[i])) { 453 alloc_nid_failed(sbi, nids[i]); 454 err = PTR_ERR(npage[i]); 455 goto release_pages; 456 } 457 458 set_nid(parent, offset[i - 1], nids[i], i == 1); 459 alloc_nid_done(sbi, nids[i]); 460 done = true; 461 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 462 npage[i] = get_node_page_ra(parent, offset[i - 1]); 463 if (IS_ERR(npage[i])) { 464 err = PTR_ERR(npage[i]); 465 goto release_pages; 466 } 467 done = true; 468 } 469 if (i == 1) { 470 dn->inode_page_locked = false; 471 unlock_page(parent); 472 } else { 473 f2fs_put_page(parent, 1); 474 } 475 476 if (!done) { 477 npage[i] = get_node_page(sbi, nids[i]); 478 if (IS_ERR(npage[i])) { 479 err = PTR_ERR(npage[i]); 480 f2fs_put_page(npage[0], 0); 481 goto release_out; 482 } 483 } 484 if (i < level) { 485 parent = npage[i]; 486 nids[i + 1] = get_nid(parent, offset[i], false); 487 } 488 } 489 dn->nid = nids[level]; 490 dn->ofs_in_node = offset[level]; 491 dn->node_page = npage[level]; 492 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 493 return 0; 494 495 release_pages: 496 f2fs_put_page(parent, 1); 497 if (i > 1) 498 f2fs_put_page(npage[0], 0); 499 release_out: 500 dn->inode_page = NULL; 501 dn->node_page = NULL; 502 return err; 503 } 504 505 static void truncate_node(struct dnode_of_data *dn) 506 { 507 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 508 struct node_info ni; 509 510 get_node_info(sbi, dn->nid, &ni); 511 if (dn->inode->i_blocks == 0) { 512 f2fs_bug_on(ni.blk_addr != NULL_ADDR); 513 goto invalidate; 514 } 515 f2fs_bug_on(ni.blk_addr == NULL_ADDR); 516 517 /* Deallocate node address */ 518 invalidate_blocks(sbi, ni.blk_addr); 519 dec_valid_node_count(sbi, dn->inode); 520 set_node_addr(sbi, &ni, NULL_ADDR, false); 521 522 if (dn->nid == dn->inode->i_ino) { 523 remove_orphan_inode(sbi, dn->nid); 524 dec_valid_inode_count(sbi); 525 } else { 526 sync_inode_page(dn); 527 } 528 invalidate: 529 clear_node_page_dirty(dn->node_page); 530 F2FS_SET_SB_DIRT(sbi); 531 532 f2fs_put_page(dn->node_page, 1); 533 534 invalidate_mapping_pages(NODE_MAPPING(sbi), 535 dn->node_page->index, dn->node_page->index); 536 537 dn->node_page = NULL; 538 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 539 } 540 541 static int truncate_dnode(struct dnode_of_data *dn) 542 { 543 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 544 struct page *page; 545 546 if (dn->nid == 0) 547 return 1; 548 549 /* get direct node */ 550 page = get_node_page(sbi, dn->nid); 551 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 552 return 1; 553 else if (IS_ERR(page)) 554 return PTR_ERR(page); 555 556 /* Make dnode_of_data for parameter */ 557 dn->node_page = page; 558 dn->ofs_in_node = 0; 559 truncate_data_blocks(dn); 560 truncate_node(dn); 561 return 1; 562 } 563 564 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 565 int ofs, int depth) 566 { 567 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 568 struct dnode_of_data rdn = *dn; 569 struct page *page; 570 struct f2fs_node *rn; 571 nid_t child_nid; 572 unsigned int child_nofs; 573 int freed = 0; 574 int i, ret; 575 576 if (dn->nid == 0) 577 return NIDS_PER_BLOCK + 1; 578 579 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 580 581 page = get_node_page(sbi, dn->nid); 582 if (IS_ERR(page)) { 583 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 584 return PTR_ERR(page); 585 } 586 587 rn = F2FS_NODE(page); 588 if (depth < 3) { 589 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 590 child_nid = le32_to_cpu(rn->in.nid[i]); 591 if (child_nid == 0) 592 continue; 593 rdn.nid = child_nid; 594 ret = truncate_dnode(&rdn); 595 if (ret < 0) 596 goto out_err; 597 set_nid(page, i, 0, false); 598 } 599 } else { 600 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 601 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 602 child_nid = le32_to_cpu(rn->in.nid[i]); 603 if (child_nid == 0) { 604 child_nofs += NIDS_PER_BLOCK + 1; 605 continue; 606 } 607 rdn.nid = child_nid; 608 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 609 if (ret == (NIDS_PER_BLOCK + 1)) { 610 set_nid(page, i, 0, false); 611 child_nofs += ret; 612 } else if (ret < 0 && ret != -ENOENT) { 613 goto out_err; 614 } 615 } 616 freed = child_nofs; 617 } 618 619 if (!ofs) { 620 /* remove current indirect node */ 621 dn->node_page = page; 622 truncate_node(dn); 623 freed++; 624 } else { 625 f2fs_put_page(page, 1); 626 } 627 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 628 return freed; 629 630 out_err: 631 f2fs_put_page(page, 1); 632 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 633 return ret; 634 } 635 636 static int truncate_partial_nodes(struct dnode_of_data *dn, 637 struct f2fs_inode *ri, int *offset, int depth) 638 { 639 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 640 struct page *pages[2]; 641 nid_t nid[3]; 642 nid_t child_nid; 643 int err = 0; 644 int i; 645 int idx = depth - 2; 646 647 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 648 if (!nid[0]) 649 return 0; 650 651 /* get indirect nodes in the path */ 652 for (i = 0; i < idx + 1; i++) { 653 /* reference count'll be increased */ 654 pages[i] = get_node_page(sbi, nid[i]); 655 if (IS_ERR(pages[i])) { 656 err = PTR_ERR(pages[i]); 657 idx = i - 1; 658 goto fail; 659 } 660 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 661 } 662 663 /* free direct nodes linked to a partial indirect node */ 664 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 665 child_nid = get_nid(pages[idx], i, false); 666 if (!child_nid) 667 continue; 668 dn->nid = child_nid; 669 err = truncate_dnode(dn); 670 if (err < 0) 671 goto fail; 672 set_nid(pages[idx], i, 0, false); 673 } 674 675 if (offset[idx + 1] == 0) { 676 dn->node_page = pages[idx]; 677 dn->nid = nid[idx]; 678 truncate_node(dn); 679 } else { 680 f2fs_put_page(pages[idx], 1); 681 } 682 offset[idx]++; 683 offset[idx + 1] = 0; 684 idx--; 685 fail: 686 for (i = idx; i >= 0; i--) 687 f2fs_put_page(pages[i], 1); 688 689 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 690 691 return err; 692 } 693 694 /* 695 * All the block addresses of data and nodes should be nullified. 696 */ 697 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 698 { 699 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 700 int err = 0, cont = 1; 701 int level, offset[4], noffset[4]; 702 unsigned int nofs = 0; 703 struct f2fs_inode *ri; 704 struct dnode_of_data dn; 705 struct page *page; 706 707 trace_f2fs_truncate_inode_blocks_enter(inode, from); 708 709 level = get_node_path(F2FS_I(inode), from, offset, noffset); 710 restart: 711 page = get_node_page(sbi, inode->i_ino); 712 if (IS_ERR(page)) { 713 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 714 return PTR_ERR(page); 715 } 716 717 set_new_dnode(&dn, inode, page, NULL, 0); 718 unlock_page(page); 719 720 ri = F2FS_INODE(page); 721 switch (level) { 722 case 0: 723 case 1: 724 nofs = noffset[1]; 725 break; 726 case 2: 727 nofs = noffset[1]; 728 if (!offset[level - 1]) 729 goto skip_partial; 730 err = truncate_partial_nodes(&dn, ri, offset, level); 731 if (err < 0 && err != -ENOENT) 732 goto fail; 733 nofs += 1 + NIDS_PER_BLOCK; 734 break; 735 case 3: 736 nofs = 5 + 2 * NIDS_PER_BLOCK; 737 if (!offset[level - 1]) 738 goto skip_partial; 739 err = truncate_partial_nodes(&dn, ri, offset, level); 740 if (err < 0 && err != -ENOENT) 741 goto fail; 742 break; 743 default: 744 BUG(); 745 } 746 747 skip_partial: 748 while (cont) { 749 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 750 switch (offset[0]) { 751 case NODE_DIR1_BLOCK: 752 case NODE_DIR2_BLOCK: 753 err = truncate_dnode(&dn); 754 break; 755 756 case NODE_IND1_BLOCK: 757 case NODE_IND2_BLOCK: 758 err = truncate_nodes(&dn, nofs, offset[1], 2); 759 break; 760 761 case NODE_DIND_BLOCK: 762 err = truncate_nodes(&dn, nofs, offset[1], 3); 763 cont = 0; 764 break; 765 766 default: 767 BUG(); 768 } 769 if (err < 0 && err != -ENOENT) 770 goto fail; 771 if (offset[1] == 0 && 772 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 773 lock_page(page); 774 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 775 f2fs_put_page(page, 1); 776 goto restart; 777 } 778 f2fs_wait_on_page_writeback(page, NODE); 779 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 780 set_page_dirty(page); 781 unlock_page(page); 782 } 783 offset[1] = 0; 784 offset[0]++; 785 nofs += err; 786 } 787 fail: 788 f2fs_put_page(page, 0); 789 trace_f2fs_truncate_inode_blocks_exit(inode, err); 790 return err > 0 ? 0 : err; 791 } 792 793 int truncate_xattr_node(struct inode *inode, struct page *page) 794 { 795 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 796 nid_t nid = F2FS_I(inode)->i_xattr_nid; 797 struct dnode_of_data dn; 798 struct page *npage; 799 800 if (!nid) 801 return 0; 802 803 npage = get_node_page(sbi, nid); 804 if (IS_ERR(npage)) 805 return PTR_ERR(npage); 806 807 F2FS_I(inode)->i_xattr_nid = 0; 808 809 /* need to do checkpoint during fsync */ 810 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 811 812 set_new_dnode(&dn, inode, page, npage, nid); 813 814 if (page) 815 dn.inode_page_locked = true; 816 truncate_node(&dn); 817 return 0; 818 } 819 820 /* 821 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 822 * f2fs_unlock_op(). 823 */ 824 void remove_inode_page(struct inode *inode) 825 { 826 struct dnode_of_data dn; 827 828 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 829 if (get_dnode_of_data(&dn, 0, LOOKUP_NODE)) 830 return; 831 832 if (truncate_xattr_node(inode, dn.inode_page)) { 833 f2fs_put_dnode(&dn); 834 return; 835 } 836 837 /* remove potential inline_data blocks */ 838 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 839 S_ISLNK(inode->i_mode)) 840 truncate_data_blocks_range(&dn, 1); 841 842 /* 0 is possible, after f2fs_new_inode() has failed */ 843 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1); 844 845 /* will put inode & node pages */ 846 truncate_node(&dn); 847 } 848 849 struct page *new_inode_page(struct inode *inode) 850 { 851 struct dnode_of_data dn; 852 853 /* allocate inode page for new inode */ 854 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 855 856 /* caller should f2fs_put_page(page, 1); */ 857 return new_node_page(&dn, 0, NULL); 858 } 859 860 struct page *new_node_page(struct dnode_of_data *dn, 861 unsigned int ofs, struct page *ipage) 862 { 863 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 864 struct node_info old_ni, new_ni; 865 struct page *page; 866 int err; 867 868 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 869 return ERR_PTR(-EPERM); 870 871 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 872 if (!page) 873 return ERR_PTR(-ENOMEM); 874 875 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 876 err = -ENOSPC; 877 goto fail; 878 } 879 880 get_node_info(sbi, dn->nid, &old_ni); 881 882 /* Reinitialize old_ni with new node page */ 883 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR); 884 new_ni = old_ni; 885 new_ni.ino = dn->inode->i_ino; 886 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 887 888 f2fs_wait_on_page_writeback(page, NODE); 889 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 890 set_cold_node(dn->inode, page); 891 SetPageUptodate(page); 892 set_page_dirty(page); 893 894 if (f2fs_has_xattr_block(ofs)) 895 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 896 897 dn->node_page = page; 898 if (ipage) 899 update_inode(dn->inode, ipage); 900 else 901 sync_inode_page(dn); 902 if (ofs == 0) 903 inc_valid_inode_count(sbi); 904 905 return page; 906 907 fail: 908 clear_node_page_dirty(page); 909 f2fs_put_page(page, 1); 910 return ERR_PTR(err); 911 } 912 913 /* 914 * Caller should do after getting the following values. 915 * 0: f2fs_put_page(page, 0) 916 * LOCKED_PAGE: f2fs_put_page(page, 1) 917 * error: nothing 918 */ 919 static int read_node_page(struct page *page, int rw) 920 { 921 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 922 struct node_info ni; 923 924 get_node_info(sbi, page->index, &ni); 925 926 if (unlikely(ni.blk_addr == NULL_ADDR)) { 927 f2fs_put_page(page, 1); 928 return -ENOENT; 929 } 930 931 if (PageUptodate(page)) 932 return LOCKED_PAGE; 933 934 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); 935 } 936 937 /* 938 * Readahead a node page 939 */ 940 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 941 { 942 struct page *apage; 943 int err; 944 945 apage = find_get_page(NODE_MAPPING(sbi), nid); 946 if (apage && PageUptodate(apage)) { 947 f2fs_put_page(apage, 0); 948 return; 949 } 950 f2fs_put_page(apage, 0); 951 952 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 953 if (!apage) 954 return; 955 956 err = read_node_page(apage, READA); 957 if (err == 0) 958 f2fs_put_page(apage, 0); 959 else if (err == LOCKED_PAGE) 960 f2fs_put_page(apage, 1); 961 } 962 963 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 964 { 965 struct page *page; 966 int err; 967 repeat: 968 page = grab_cache_page(NODE_MAPPING(sbi), nid); 969 if (!page) 970 return ERR_PTR(-ENOMEM); 971 972 err = read_node_page(page, READ_SYNC); 973 if (err < 0) 974 return ERR_PTR(err); 975 else if (err == LOCKED_PAGE) 976 goto got_it; 977 978 lock_page(page); 979 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 980 f2fs_put_page(page, 1); 981 return ERR_PTR(-EIO); 982 } 983 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 984 f2fs_put_page(page, 1); 985 goto repeat; 986 } 987 got_it: 988 return page; 989 } 990 991 /* 992 * Return a locked page for the desired node page. 993 * And, readahead MAX_RA_NODE number of node pages. 994 */ 995 struct page *get_node_page_ra(struct page *parent, int start) 996 { 997 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); 998 struct blk_plug plug; 999 struct page *page; 1000 int err, i, end; 1001 nid_t nid; 1002 1003 /* First, try getting the desired direct node. */ 1004 nid = get_nid(parent, start, false); 1005 if (!nid) 1006 return ERR_PTR(-ENOENT); 1007 repeat: 1008 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1009 if (!page) 1010 return ERR_PTR(-ENOMEM); 1011 1012 err = read_node_page(page, READ_SYNC); 1013 if (err < 0) 1014 return ERR_PTR(err); 1015 else if (err == LOCKED_PAGE) 1016 goto page_hit; 1017 1018 blk_start_plug(&plug); 1019 1020 /* Then, try readahead for siblings of the desired node */ 1021 end = start + MAX_RA_NODE; 1022 end = min(end, NIDS_PER_BLOCK); 1023 for (i = start + 1; i < end; i++) { 1024 nid = get_nid(parent, i, false); 1025 if (!nid) 1026 continue; 1027 ra_node_page(sbi, nid); 1028 } 1029 1030 blk_finish_plug(&plug); 1031 1032 lock_page(page); 1033 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1034 f2fs_put_page(page, 1); 1035 goto repeat; 1036 } 1037 page_hit: 1038 if (unlikely(!PageUptodate(page))) { 1039 f2fs_put_page(page, 1); 1040 return ERR_PTR(-EIO); 1041 } 1042 return page; 1043 } 1044 1045 void sync_inode_page(struct dnode_of_data *dn) 1046 { 1047 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1048 update_inode(dn->inode, dn->node_page); 1049 } else if (dn->inode_page) { 1050 if (!dn->inode_page_locked) 1051 lock_page(dn->inode_page); 1052 update_inode(dn->inode, dn->inode_page); 1053 if (!dn->inode_page_locked) 1054 unlock_page(dn->inode_page); 1055 } else { 1056 update_inode_page(dn->inode); 1057 } 1058 } 1059 1060 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1061 struct writeback_control *wbc) 1062 { 1063 pgoff_t index, end; 1064 struct pagevec pvec; 1065 int step = ino ? 2 : 0; 1066 int nwritten = 0, wrote = 0; 1067 1068 pagevec_init(&pvec, 0); 1069 1070 next_step: 1071 index = 0; 1072 end = LONG_MAX; 1073 1074 while (index <= end) { 1075 int i, nr_pages; 1076 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1077 PAGECACHE_TAG_DIRTY, 1078 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1079 if (nr_pages == 0) 1080 break; 1081 1082 for (i = 0; i < nr_pages; i++) { 1083 struct page *page = pvec.pages[i]; 1084 1085 /* 1086 * flushing sequence with step: 1087 * 0. indirect nodes 1088 * 1. dentry dnodes 1089 * 2. file dnodes 1090 */ 1091 if (step == 0 && IS_DNODE(page)) 1092 continue; 1093 if (step == 1 && (!IS_DNODE(page) || 1094 is_cold_node(page))) 1095 continue; 1096 if (step == 2 && (!IS_DNODE(page) || 1097 !is_cold_node(page))) 1098 continue; 1099 1100 /* 1101 * If an fsync mode, 1102 * we should not skip writing node pages. 1103 */ 1104 if (ino && ino_of_node(page) == ino) 1105 lock_page(page); 1106 else if (!trylock_page(page)) 1107 continue; 1108 1109 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1110 continue_unlock: 1111 unlock_page(page); 1112 continue; 1113 } 1114 if (ino && ino_of_node(page) != ino) 1115 goto continue_unlock; 1116 1117 if (!PageDirty(page)) { 1118 /* someone wrote it for us */ 1119 goto continue_unlock; 1120 } 1121 1122 if (!clear_page_dirty_for_io(page)) 1123 goto continue_unlock; 1124 1125 /* called by fsync() */ 1126 if (ino && IS_DNODE(page)) { 1127 int mark = !is_checkpointed_node(sbi, ino); 1128 set_fsync_mark(page, 1); 1129 if (IS_INODE(page)) 1130 set_dentry_mark(page, mark); 1131 nwritten++; 1132 } else { 1133 set_fsync_mark(page, 0); 1134 set_dentry_mark(page, 0); 1135 } 1136 1137 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1138 unlock_page(page); 1139 else 1140 wrote++; 1141 1142 if (--wbc->nr_to_write == 0) 1143 break; 1144 } 1145 pagevec_release(&pvec); 1146 cond_resched(); 1147 1148 if (wbc->nr_to_write == 0) { 1149 step = 2; 1150 break; 1151 } 1152 } 1153 1154 if (step < 2) { 1155 step++; 1156 goto next_step; 1157 } 1158 1159 if (wrote) 1160 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1161 return nwritten; 1162 } 1163 1164 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1165 { 1166 pgoff_t index = 0, end = LONG_MAX; 1167 struct pagevec pvec; 1168 int ret2 = 0, ret = 0; 1169 1170 pagevec_init(&pvec, 0); 1171 1172 while (index <= end) { 1173 int i, nr_pages; 1174 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1175 PAGECACHE_TAG_WRITEBACK, 1176 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1177 if (nr_pages == 0) 1178 break; 1179 1180 for (i = 0; i < nr_pages; i++) { 1181 struct page *page = pvec.pages[i]; 1182 1183 /* until radix tree lookup accepts end_index */ 1184 if (unlikely(page->index > end)) 1185 continue; 1186 1187 if (ino && ino_of_node(page) == ino) { 1188 f2fs_wait_on_page_writeback(page, NODE); 1189 if (TestClearPageError(page)) 1190 ret = -EIO; 1191 } 1192 } 1193 pagevec_release(&pvec); 1194 cond_resched(); 1195 } 1196 1197 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1198 ret2 = -ENOSPC; 1199 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1200 ret2 = -EIO; 1201 if (!ret) 1202 ret = ret2; 1203 return ret; 1204 } 1205 1206 static int f2fs_write_node_page(struct page *page, 1207 struct writeback_control *wbc) 1208 { 1209 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 1210 nid_t nid; 1211 block_t new_addr; 1212 struct node_info ni; 1213 struct f2fs_io_info fio = { 1214 .type = NODE, 1215 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1216 }; 1217 1218 trace_f2fs_writepage(page, NODE); 1219 1220 if (unlikely(sbi->por_doing)) 1221 goto redirty_out; 1222 if (unlikely(f2fs_cp_error(sbi))) 1223 goto redirty_out; 1224 1225 f2fs_wait_on_page_writeback(page, NODE); 1226 1227 /* get old block addr of this node page */ 1228 nid = nid_of_node(page); 1229 f2fs_bug_on(page->index != nid); 1230 1231 get_node_info(sbi, nid, &ni); 1232 1233 /* This page is already truncated */ 1234 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1235 dec_page_count(sbi, F2FS_DIRTY_NODES); 1236 unlock_page(page); 1237 return 0; 1238 } 1239 1240 if (wbc->for_reclaim) 1241 goto redirty_out; 1242 1243 down_read(&sbi->node_write); 1244 set_page_writeback(page); 1245 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1246 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1247 dec_page_count(sbi, F2FS_DIRTY_NODES); 1248 up_read(&sbi->node_write); 1249 unlock_page(page); 1250 return 0; 1251 1252 redirty_out: 1253 redirty_page_for_writepage(wbc, page); 1254 return AOP_WRITEPAGE_ACTIVATE; 1255 } 1256 1257 static int f2fs_write_node_pages(struct address_space *mapping, 1258 struct writeback_control *wbc) 1259 { 1260 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1261 long diff; 1262 1263 trace_f2fs_writepages(mapping->host, wbc, NODE); 1264 1265 /* balancing f2fs's metadata in background */ 1266 f2fs_balance_fs_bg(sbi); 1267 1268 /* collect a number of dirty node pages and write together */ 1269 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1270 goto skip_write; 1271 1272 diff = nr_pages_to_write(sbi, NODE, wbc); 1273 wbc->sync_mode = WB_SYNC_NONE; 1274 sync_node_pages(sbi, 0, wbc); 1275 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1276 return 0; 1277 1278 skip_write: 1279 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1280 return 0; 1281 } 1282 1283 static int f2fs_set_node_page_dirty(struct page *page) 1284 { 1285 struct address_space *mapping = page->mapping; 1286 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); 1287 1288 trace_f2fs_set_page_dirty(page, NODE); 1289 1290 SetPageUptodate(page); 1291 if (!PageDirty(page)) { 1292 __set_page_dirty_nobuffers(page); 1293 inc_page_count(sbi, F2FS_DIRTY_NODES); 1294 SetPagePrivate(page); 1295 return 1; 1296 } 1297 return 0; 1298 } 1299 1300 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1301 unsigned int length) 1302 { 1303 struct inode *inode = page->mapping->host; 1304 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1305 if (PageDirty(page)) 1306 dec_page_count(sbi, F2FS_DIRTY_NODES); 1307 ClearPagePrivate(page); 1308 } 1309 1310 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1311 { 1312 ClearPagePrivate(page); 1313 return 1; 1314 } 1315 1316 /* 1317 * Structure of the f2fs node operations 1318 */ 1319 const struct address_space_operations f2fs_node_aops = { 1320 .writepage = f2fs_write_node_page, 1321 .writepages = f2fs_write_node_pages, 1322 .set_page_dirty = f2fs_set_node_page_dirty, 1323 .invalidatepage = f2fs_invalidate_node_page, 1324 .releasepage = f2fs_release_node_page, 1325 }; 1326 1327 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1328 nid_t n) 1329 { 1330 return radix_tree_lookup(&nm_i->free_nid_root, n); 1331 } 1332 1333 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1334 struct free_nid *i) 1335 { 1336 list_del(&i->list); 1337 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1338 } 1339 1340 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1341 { 1342 struct f2fs_nm_info *nm_i = NM_I(sbi); 1343 struct free_nid *i; 1344 struct nat_entry *ne; 1345 bool allocated = false; 1346 1347 if (!available_free_memory(sbi, FREE_NIDS)) 1348 return -1; 1349 1350 /* 0 nid should not be used */ 1351 if (unlikely(nid == 0)) 1352 return 0; 1353 1354 if (build) { 1355 /* do not add allocated nids */ 1356 read_lock(&nm_i->nat_tree_lock); 1357 ne = __lookup_nat_cache(nm_i, nid); 1358 if (ne && 1359 (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR)) 1360 allocated = true; 1361 read_unlock(&nm_i->nat_tree_lock); 1362 if (allocated) 1363 return 0; 1364 } 1365 1366 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1367 i->nid = nid; 1368 i->state = NID_NEW; 1369 1370 spin_lock(&nm_i->free_nid_list_lock); 1371 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1372 spin_unlock(&nm_i->free_nid_list_lock); 1373 kmem_cache_free(free_nid_slab, i); 1374 return 0; 1375 } 1376 list_add_tail(&i->list, &nm_i->free_nid_list); 1377 nm_i->fcnt++; 1378 spin_unlock(&nm_i->free_nid_list_lock); 1379 return 1; 1380 } 1381 1382 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1383 { 1384 struct free_nid *i; 1385 bool need_free = false; 1386 1387 spin_lock(&nm_i->free_nid_list_lock); 1388 i = __lookup_free_nid_list(nm_i, nid); 1389 if (i && i->state == NID_NEW) { 1390 __del_from_free_nid_list(nm_i, i); 1391 nm_i->fcnt--; 1392 need_free = true; 1393 } 1394 spin_unlock(&nm_i->free_nid_list_lock); 1395 1396 if (need_free) 1397 kmem_cache_free(free_nid_slab, i); 1398 } 1399 1400 static void scan_nat_page(struct f2fs_sb_info *sbi, 1401 struct page *nat_page, nid_t start_nid) 1402 { 1403 struct f2fs_nm_info *nm_i = NM_I(sbi); 1404 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1405 block_t blk_addr; 1406 int i; 1407 1408 i = start_nid % NAT_ENTRY_PER_BLOCK; 1409 1410 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1411 1412 if (unlikely(start_nid >= nm_i->max_nid)) 1413 break; 1414 1415 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1416 f2fs_bug_on(blk_addr == NEW_ADDR); 1417 if (blk_addr == NULL_ADDR) { 1418 if (add_free_nid(sbi, start_nid, true) < 0) 1419 break; 1420 } 1421 } 1422 } 1423 1424 static void build_free_nids(struct f2fs_sb_info *sbi) 1425 { 1426 struct f2fs_nm_info *nm_i = NM_I(sbi); 1427 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1428 struct f2fs_summary_block *sum = curseg->sum_blk; 1429 int i = 0; 1430 nid_t nid = nm_i->next_scan_nid; 1431 1432 /* Enough entries */ 1433 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1434 return; 1435 1436 /* readahead nat pages to be scanned */ 1437 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); 1438 1439 while (1) { 1440 struct page *page = get_current_nat_page(sbi, nid); 1441 1442 scan_nat_page(sbi, page, nid); 1443 f2fs_put_page(page, 1); 1444 1445 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1446 if (unlikely(nid >= nm_i->max_nid)) 1447 nid = 0; 1448 1449 if (i++ == FREE_NID_PAGES) 1450 break; 1451 } 1452 1453 /* go to the next free nat pages to find free nids abundantly */ 1454 nm_i->next_scan_nid = nid; 1455 1456 /* find free nids from current sum_pages */ 1457 mutex_lock(&curseg->curseg_mutex); 1458 for (i = 0; i < nats_in_cursum(sum); i++) { 1459 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1460 nid = le32_to_cpu(nid_in_journal(sum, i)); 1461 if (addr == NULL_ADDR) 1462 add_free_nid(sbi, nid, true); 1463 else 1464 remove_free_nid(nm_i, nid); 1465 } 1466 mutex_unlock(&curseg->curseg_mutex); 1467 } 1468 1469 /* 1470 * If this function returns success, caller can obtain a new nid 1471 * from second parameter of this function. 1472 * The returned nid could be used ino as well as nid when inode is created. 1473 */ 1474 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1475 { 1476 struct f2fs_nm_info *nm_i = NM_I(sbi); 1477 struct free_nid *i = NULL; 1478 retry: 1479 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1480 return false; 1481 1482 spin_lock(&nm_i->free_nid_list_lock); 1483 1484 /* We should not use stale free nids created by build_free_nids */ 1485 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1486 f2fs_bug_on(list_empty(&nm_i->free_nid_list)); 1487 list_for_each_entry(i, &nm_i->free_nid_list, list) 1488 if (i->state == NID_NEW) 1489 break; 1490 1491 f2fs_bug_on(i->state != NID_NEW); 1492 *nid = i->nid; 1493 i->state = NID_ALLOC; 1494 nm_i->fcnt--; 1495 spin_unlock(&nm_i->free_nid_list_lock); 1496 return true; 1497 } 1498 spin_unlock(&nm_i->free_nid_list_lock); 1499 1500 /* Let's scan nat pages and its caches to get free nids */ 1501 mutex_lock(&nm_i->build_lock); 1502 build_free_nids(sbi); 1503 mutex_unlock(&nm_i->build_lock); 1504 goto retry; 1505 } 1506 1507 /* 1508 * alloc_nid() should be called prior to this function. 1509 */ 1510 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1511 { 1512 struct f2fs_nm_info *nm_i = NM_I(sbi); 1513 struct free_nid *i; 1514 1515 spin_lock(&nm_i->free_nid_list_lock); 1516 i = __lookup_free_nid_list(nm_i, nid); 1517 f2fs_bug_on(!i || i->state != NID_ALLOC); 1518 __del_from_free_nid_list(nm_i, i); 1519 spin_unlock(&nm_i->free_nid_list_lock); 1520 1521 kmem_cache_free(free_nid_slab, i); 1522 } 1523 1524 /* 1525 * alloc_nid() should be called prior to this function. 1526 */ 1527 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1528 { 1529 struct f2fs_nm_info *nm_i = NM_I(sbi); 1530 struct free_nid *i; 1531 bool need_free = false; 1532 1533 if (!nid) 1534 return; 1535 1536 spin_lock(&nm_i->free_nid_list_lock); 1537 i = __lookup_free_nid_list(nm_i, nid); 1538 f2fs_bug_on(!i || i->state != NID_ALLOC); 1539 if (!available_free_memory(sbi, FREE_NIDS)) { 1540 __del_from_free_nid_list(nm_i, i); 1541 need_free = true; 1542 } else { 1543 i->state = NID_NEW; 1544 nm_i->fcnt++; 1545 } 1546 spin_unlock(&nm_i->free_nid_list_lock); 1547 1548 if (need_free) 1549 kmem_cache_free(free_nid_slab, i); 1550 } 1551 1552 void recover_inline_xattr(struct inode *inode, struct page *page) 1553 { 1554 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1555 void *src_addr, *dst_addr; 1556 size_t inline_size; 1557 struct page *ipage; 1558 struct f2fs_inode *ri; 1559 1560 ipage = get_node_page(sbi, inode->i_ino); 1561 f2fs_bug_on(IS_ERR(ipage)); 1562 1563 ri = F2FS_INODE(page); 1564 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1565 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); 1566 goto update_inode; 1567 } 1568 1569 dst_addr = inline_xattr_addr(ipage); 1570 src_addr = inline_xattr_addr(page); 1571 inline_size = inline_xattr_size(inode); 1572 1573 f2fs_wait_on_page_writeback(ipage, NODE); 1574 memcpy(dst_addr, src_addr, inline_size); 1575 update_inode: 1576 update_inode(inode, ipage); 1577 f2fs_put_page(ipage, 1); 1578 } 1579 1580 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1581 { 1582 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1583 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1584 nid_t new_xnid = nid_of_node(page); 1585 struct node_info ni; 1586 1587 /* 1: invalidate the previous xattr nid */ 1588 if (!prev_xnid) 1589 goto recover_xnid; 1590 1591 /* Deallocate node address */ 1592 get_node_info(sbi, prev_xnid, &ni); 1593 f2fs_bug_on(ni.blk_addr == NULL_ADDR); 1594 invalidate_blocks(sbi, ni.blk_addr); 1595 dec_valid_node_count(sbi, inode); 1596 set_node_addr(sbi, &ni, NULL_ADDR, false); 1597 1598 recover_xnid: 1599 /* 2: allocate new xattr nid */ 1600 if (unlikely(!inc_valid_node_count(sbi, inode))) 1601 f2fs_bug_on(1); 1602 1603 remove_free_nid(NM_I(sbi), new_xnid); 1604 get_node_info(sbi, new_xnid, &ni); 1605 ni.ino = inode->i_ino; 1606 set_node_addr(sbi, &ni, NEW_ADDR, false); 1607 F2FS_I(inode)->i_xattr_nid = new_xnid; 1608 1609 /* 3: update xattr blkaddr */ 1610 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1611 set_node_addr(sbi, &ni, blkaddr, false); 1612 1613 update_inode_page(inode); 1614 } 1615 1616 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1617 { 1618 struct f2fs_inode *src, *dst; 1619 nid_t ino = ino_of_node(page); 1620 struct node_info old_ni, new_ni; 1621 struct page *ipage; 1622 1623 get_node_info(sbi, ino, &old_ni); 1624 1625 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1626 return -EINVAL; 1627 1628 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1629 if (!ipage) 1630 return -ENOMEM; 1631 1632 /* Should not use this inode from free nid list */ 1633 remove_free_nid(NM_I(sbi), ino); 1634 1635 SetPageUptodate(ipage); 1636 fill_node_footer(ipage, ino, ino, 0, true); 1637 1638 src = F2FS_INODE(page); 1639 dst = F2FS_INODE(ipage); 1640 1641 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1642 dst->i_size = 0; 1643 dst->i_blocks = cpu_to_le64(1); 1644 dst->i_links = cpu_to_le32(1); 1645 dst->i_xattr_nid = 0; 1646 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 1647 1648 new_ni = old_ni; 1649 new_ni.ino = ino; 1650 1651 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1652 WARN_ON(1); 1653 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1654 inc_valid_inode_count(sbi); 1655 set_page_dirty(ipage); 1656 f2fs_put_page(ipage, 1); 1657 return 0; 1658 } 1659 1660 /* 1661 * ra_sum_pages() merge contiguous pages into one bio and submit. 1662 * these pre-read pages are allocated in bd_inode's mapping tree. 1663 */ 1664 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, 1665 int start, int nrpages) 1666 { 1667 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1668 struct address_space *mapping = inode->i_mapping; 1669 int i, page_idx = start; 1670 struct f2fs_io_info fio = { 1671 .type = META, 1672 .rw = READ_SYNC | REQ_META | REQ_PRIO 1673 }; 1674 1675 for (i = 0; page_idx < start + nrpages; page_idx++, i++) { 1676 /* alloc page in bd_inode for reading node summary info */ 1677 pages[i] = grab_cache_page(mapping, page_idx); 1678 if (!pages[i]) 1679 break; 1680 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); 1681 } 1682 1683 f2fs_submit_merged_bio(sbi, META, READ); 1684 return i; 1685 } 1686 1687 int restore_node_summary(struct f2fs_sb_info *sbi, 1688 unsigned int segno, struct f2fs_summary_block *sum) 1689 { 1690 struct f2fs_node *rn; 1691 struct f2fs_summary *sum_entry; 1692 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1693 block_t addr; 1694 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 1695 struct page *pages[bio_blocks]; 1696 int i, idx, last_offset, nrpages, err = 0; 1697 1698 /* scan the node segment */ 1699 last_offset = sbi->blocks_per_seg; 1700 addr = START_BLOCK(sbi, segno); 1701 sum_entry = &sum->entries[0]; 1702 1703 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { 1704 nrpages = min(last_offset - i, bio_blocks); 1705 1706 /* readahead node pages */ 1707 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1708 if (!nrpages) 1709 return -ENOMEM; 1710 1711 for (idx = 0; idx < nrpages; idx++) { 1712 if (err) 1713 goto skip; 1714 1715 lock_page(pages[idx]); 1716 if (unlikely(!PageUptodate(pages[idx]))) { 1717 err = -EIO; 1718 } else { 1719 rn = F2FS_NODE(pages[idx]); 1720 sum_entry->nid = rn->footer.nid; 1721 sum_entry->version = 0; 1722 sum_entry->ofs_in_node = 0; 1723 sum_entry++; 1724 } 1725 unlock_page(pages[idx]); 1726 skip: 1727 page_cache_release(pages[idx]); 1728 } 1729 1730 invalidate_mapping_pages(inode->i_mapping, addr, 1731 addr + nrpages); 1732 } 1733 return err; 1734 } 1735 1736 static struct nat_entry_set *grab_nat_entry_set(void) 1737 { 1738 struct nat_entry_set *nes = 1739 f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 1740 1741 nes->entry_cnt = 0; 1742 INIT_LIST_HEAD(&nes->set_list); 1743 INIT_LIST_HEAD(&nes->entry_list); 1744 return nes; 1745 } 1746 1747 static void release_nat_entry_set(struct nat_entry_set *nes, 1748 struct f2fs_nm_info *nm_i) 1749 { 1750 f2fs_bug_on(!list_empty(&nes->entry_list)); 1751 1752 nm_i->dirty_nat_cnt -= nes->entry_cnt; 1753 list_del(&nes->set_list); 1754 kmem_cache_free(nat_entry_set_slab, nes); 1755 } 1756 1757 static void adjust_nat_entry_set(struct nat_entry_set *nes, 1758 struct list_head *head) 1759 { 1760 struct nat_entry_set *next = nes; 1761 1762 if (list_is_last(&nes->set_list, head)) 1763 return; 1764 1765 list_for_each_entry_continue(next, head, set_list) 1766 if (nes->entry_cnt <= next->entry_cnt) 1767 break; 1768 1769 list_move_tail(&nes->set_list, &next->set_list); 1770 } 1771 1772 static void add_nat_entry(struct nat_entry *ne, struct list_head *head) 1773 { 1774 struct nat_entry_set *nes; 1775 nid_t start_nid = START_NID(ne->ni.nid); 1776 1777 list_for_each_entry(nes, head, set_list) { 1778 if (nes->start_nid == start_nid) { 1779 list_move_tail(&ne->list, &nes->entry_list); 1780 nes->entry_cnt++; 1781 adjust_nat_entry_set(nes, head); 1782 return; 1783 } 1784 } 1785 1786 nes = grab_nat_entry_set(); 1787 1788 nes->start_nid = start_nid; 1789 list_move_tail(&ne->list, &nes->entry_list); 1790 nes->entry_cnt++; 1791 list_add(&nes->set_list, head); 1792 } 1793 1794 static void merge_nats_in_set(struct f2fs_sb_info *sbi) 1795 { 1796 struct f2fs_nm_info *nm_i = NM_I(sbi); 1797 struct list_head *dirty_list = &nm_i->dirty_nat_entries; 1798 struct list_head *set_list = &nm_i->nat_entry_set; 1799 struct nat_entry *ne, *tmp; 1800 1801 write_lock(&nm_i->nat_tree_lock); 1802 list_for_each_entry_safe(ne, tmp, dirty_list, list) { 1803 if (nat_get_blkaddr(ne) == NEW_ADDR) 1804 continue; 1805 add_nat_entry(ne, set_list); 1806 nm_i->dirty_nat_cnt++; 1807 } 1808 write_unlock(&nm_i->nat_tree_lock); 1809 } 1810 1811 static bool __has_cursum_space(struct f2fs_summary_block *sum, int size) 1812 { 1813 if (nats_in_cursum(sum) + size <= NAT_JOURNAL_ENTRIES) 1814 return true; 1815 else 1816 return false; 1817 } 1818 1819 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1820 { 1821 struct f2fs_nm_info *nm_i = NM_I(sbi); 1822 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1823 struct f2fs_summary_block *sum = curseg->sum_blk; 1824 int i; 1825 1826 mutex_lock(&curseg->curseg_mutex); 1827 for (i = 0; i < nats_in_cursum(sum); i++) { 1828 struct nat_entry *ne; 1829 struct f2fs_nat_entry raw_ne; 1830 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1831 1832 raw_ne = nat_in_journal(sum, i); 1833 retry: 1834 write_lock(&nm_i->nat_tree_lock); 1835 ne = __lookup_nat_cache(nm_i, nid); 1836 if (ne) 1837 goto found; 1838 1839 ne = grab_nat_entry(nm_i, nid); 1840 if (!ne) { 1841 write_unlock(&nm_i->nat_tree_lock); 1842 goto retry; 1843 } 1844 node_info_from_raw_nat(&ne->ni, &raw_ne); 1845 found: 1846 __set_nat_cache_dirty(nm_i, ne); 1847 write_unlock(&nm_i->nat_tree_lock); 1848 } 1849 update_nats_in_cursum(sum, -i); 1850 mutex_unlock(&curseg->curseg_mutex); 1851 } 1852 1853 /* 1854 * This function is called during the checkpointing process. 1855 */ 1856 void flush_nat_entries(struct f2fs_sb_info *sbi) 1857 { 1858 struct f2fs_nm_info *nm_i = NM_I(sbi); 1859 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1860 struct f2fs_summary_block *sum = curseg->sum_blk; 1861 struct nat_entry_set *nes, *tmp; 1862 struct list_head *head = &nm_i->nat_entry_set; 1863 bool to_journal = true; 1864 1865 /* merge nat entries of dirty list to nat entry set temporarily */ 1866 merge_nats_in_set(sbi); 1867 1868 /* 1869 * if there are no enough space in journal to store dirty nat 1870 * entries, remove all entries from journal and merge them 1871 * into nat entry set. 1872 */ 1873 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt)) { 1874 remove_nats_in_journal(sbi); 1875 1876 /* 1877 * merge nat entries of dirty list to nat entry set temporarily 1878 */ 1879 merge_nats_in_set(sbi); 1880 } 1881 1882 if (!nm_i->dirty_nat_cnt) 1883 return; 1884 1885 /* 1886 * there are two steps to flush nat entries: 1887 * #1, flush nat entries to journal in current hot data summary block. 1888 * #2, flush nat entries to nat page. 1889 */ 1890 list_for_each_entry_safe(nes, tmp, head, set_list) { 1891 struct f2fs_nat_block *nat_blk; 1892 struct nat_entry *ne, *cur; 1893 struct page *page; 1894 nid_t start_nid = nes->start_nid; 1895 1896 if (to_journal && !__has_cursum_space(sum, nes->entry_cnt)) 1897 to_journal = false; 1898 1899 if (to_journal) { 1900 mutex_lock(&curseg->curseg_mutex); 1901 } else { 1902 page = get_next_nat_page(sbi, start_nid); 1903 nat_blk = page_address(page); 1904 f2fs_bug_on(!nat_blk); 1905 } 1906 1907 /* flush dirty nats in nat entry set */ 1908 list_for_each_entry_safe(ne, cur, &nes->entry_list, list) { 1909 struct f2fs_nat_entry *raw_ne; 1910 nid_t nid = nat_get_nid(ne); 1911 int offset; 1912 1913 if (to_journal) { 1914 offset = lookup_journal_in_cursum(sum, 1915 NAT_JOURNAL, nid, 1); 1916 f2fs_bug_on(offset < 0); 1917 raw_ne = &nat_in_journal(sum, offset); 1918 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1919 } else { 1920 raw_ne = &nat_blk->entries[nid - start_nid]; 1921 } 1922 raw_nat_from_node_info(raw_ne, &ne->ni); 1923 1924 if (nat_get_blkaddr(ne) == NULL_ADDR && 1925 add_free_nid(sbi, nid, false) <= 0) { 1926 write_lock(&nm_i->nat_tree_lock); 1927 __del_from_nat_cache(nm_i, ne); 1928 write_unlock(&nm_i->nat_tree_lock); 1929 } else { 1930 write_lock(&nm_i->nat_tree_lock); 1931 __clear_nat_cache_dirty(nm_i, ne); 1932 write_unlock(&nm_i->nat_tree_lock); 1933 } 1934 } 1935 1936 if (to_journal) 1937 mutex_unlock(&curseg->curseg_mutex); 1938 else 1939 f2fs_put_page(page, 1); 1940 1941 release_nat_entry_set(nes, nm_i); 1942 } 1943 1944 f2fs_bug_on(!list_empty(head)); 1945 f2fs_bug_on(nm_i->dirty_nat_cnt); 1946 } 1947 1948 static int init_node_manager(struct f2fs_sb_info *sbi) 1949 { 1950 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1951 struct f2fs_nm_info *nm_i = NM_I(sbi); 1952 unsigned char *version_bitmap; 1953 unsigned int nat_segs, nat_blocks; 1954 1955 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1956 1957 /* segment_count_nat includes pair segment so divide to 2. */ 1958 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1959 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1960 1961 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1962 1963 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1964 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 1965 nm_i->fcnt = 0; 1966 nm_i->nat_cnt = 0; 1967 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 1968 1969 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 1970 INIT_LIST_HEAD(&nm_i->free_nid_list); 1971 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1972 INIT_LIST_HEAD(&nm_i->nat_entries); 1973 INIT_LIST_HEAD(&nm_i->dirty_nat_entries); 1974 INIT_LIST_HEAD(&nm_i->nat_entry_set); 1975 1976 mutex_init(&nm_i->build_lock); 1977 spin_lock_init(&nm_i->free_nid_list_lock); 1978 rwlock_init(&nm_i->nat_tree_lock); 1979 1980 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1981 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1982 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1983 if (!version_bitmap) 1984 return -EFAULT; 1985 1986 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1987 GFP_KERNEL); 1988 if (!nm_i->nat_bitmap) 1989 return -ENOMEM; 1990 return 0; 1991 } 1992 1993 int build_node_manager(struct f2fs_sb_info *sbi) 1994 { 1995 int err; 1996 1997 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 1998 if (!sbi->nm_info) 1999 return -ENOMEM; 2000 2001 err = init_node_manager(sbi); 2002 if (err) 2003 return err; 2004 2005 build_free_nids(sbi); 2006 return 0; 2007 } 2008 2009 void destroy_node_manager(struct f2fs_sb_info *sbi) 2010 { 2011 struct f2fs_nm_info *nm_i = NM_I(sbi); 2012 struct free_nid *i, *next_i; 2013 struct nat_entry *natvec[NATVEC_SIZE]; 2014 nid_t nid = 0; 2015 unsigned int found; 2016 2017 if (!nm_i) 2018 return; 2019 2020 /* destroy free nid list */ 2021 spin_lock(&nm_i->free_nid_list_lock); 2022 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2023 f2fs_bug_on(i->state == NID_ALLOC); 2024 __del_from_free_nid_list(nm_i, i); 2025 nm_i->fcnt--; 2026 spin_unlock(&nm_i->free_nid_list_lock); 2027 kmem_cache_free(free_nid_slab, i); 2028 spin_lock(&nm_i->free_nid_list_lock); 2029 } 2030 f2fs_bug_on(nm_i->fcnt); 2031 spin_unlock(&nm_i->free_nid_list_lock); 2032 2033 /* destroy nat cache */ 2034 write_lock(&nm_i->nat_tree_lock); 2035 while ((found = __gang_lookup_nat_cache(nm_i, 2036 nid, NATVEC_SIZE, natvec))) { 2037 unsigned idx; 2038 nid = nat_get_nid(natvec[found - 1]) + 1; 2039 for (idx = 0; idx < found; idx++) 2040 __del_from_nat_cache(nm_i, natvec[idx]); 2041 } 2042 f2fs_bug_on(nm_i->nat_cnt); 2043 write_unlock(&nm_i->nat_tree_lock); 2044 2045 kfree(nm_i->nat_bitmap); 2046 sbi->nm_info = NULL; 2047 kfree(nm_i); 2048 } 2049 2050 int __init create_node_manager_caches(void) 2051 { 2052 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2053 sizeof(struct nat_entry)); 2054 if (!nat_entry_slab) 2055 goto fail; 2056 2057 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2058 sizeof(struct free_nid)); 2059 if (!free_nid_slab) 2060 goto destory_nat_entry; 2061 2062 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2063 sizeof(struct nat_entry_set)); 2064 if (!nat_entry_set_slab) 2065 goto destory_free_nid; 2066 return 0; 2067 2068 destory_free_nid: 2069 kmem_cache_destroy(free_nid_slab); 2070 destory_nat_entry: 2071 kmem_cache_destroy(nat_entry_slab); 2072 fail: 2073 return -ENOMEM; 2074 } 2075 2076 void destroy_node_manager_caches(void) 2077 { 2078 kmem_cache_destroy(nat_entry_set_slab); 2079 kmem_cache_destroy(free_nid_slab); 2080 kmem_cache_destroy(nat_entry_slab); 2081 } 2082