1 /* 2 * fs/f2fs/node.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/mpage.h> 14 #include <linux/backing-dev.h> 15 #include <linux/blkdev.h> 16 #include <linux/pagevec.h> 17 #include <linux/swap.h> 18 19 #include "f2fs.h" 20 #include "node.h" 21 #include "segment.h" 22 #include <trace/events/f2fs.h> 23 24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) 25 26 static struct kmem_cache *nat_entry_slab; 27 static struct kmem_cache *free_nid_slab; 28 static struct kmem_cache *nat_entry_set_slab; 29 30 bool available_free_memory(struct f2fs_sb_info *sbi, int type) 31 { 32 struct f2fs_nm_info *nm_i = NM_I(sbi); 33 struct sysinfo val; 34 unsigned long avail_ram; 35 unsigned long mem_size = 0; 36 bool res = false; 37 38 si_meminfo(&val); 39 40 /* only uses low memory */ 41 avail_ram = val.totalram - val.totalhigh; 42 43 /* give 25%, 25%, 50%, 50% memory for each components respectively */ 44 if (type == FREE_NIDS) { 45 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 46 PAGE_CACHE_SHIFT; 47 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 48 } else if (type == NAT_ENTRIES) { 49 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 50 PAGE_CACHE_SHIFT; 51 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 52 } else if (type == DIRTY_DENTS) { 53 if (sbi->sb->s_bdi->dirty_exceeded) 54 return false; 55 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 56 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 57 } else if (type == INO_ENTRIES) { 58 int i; 59 60 if (sbi->sb->s_bdi->dirty_exceeded) 61 return false; 62 for (i = 0; i <= UPDATE_INO; i++) 63 mem_size += (sbi->im[i].ino_num * 64 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; 65 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 66 } 67 return res; 68 } 69 70 static void clear_node_page_dirty(struct page *page) 71 { 72 struct address_space *mapping = page->mapping; 73 unsigned int long flags; 74 75 if (PageDirty(page)) { 76 spin_lock_irqsave(&mapping->tree_lock, flags); 77 radix_tree_tag_clear(&mapping->page_tree, 78 page_index(page), 79 PAGECACHE_TAG_DIRTY); 80 spin_unlock_irqrestore(&mapping->tree_lock, flags); 81 82 clear_page_dirty_for_io(page); 83 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 84 } 85 ClearPageUptodate(page); 86 } 87 88 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 89 { 90 pgoff_t index = current_nat_addr(sbi, nid); 91 return get_meta_page(sbi, index); 92 } 93 94 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 95 { 96 struct page *src_page; 97 struct page *dst_page; 98 pgoff_t src_off; 99 pgoff_t dst_off; 100 void *src_addr; 101 void *dst_addr; 102 struct f2fs_nm_info *nm_i = NM_I(sbi); 103 104 src_off = current_nat_addr(sbi, nid); 105 dst_off = next_nat_addr(sbi, src_off); 106 107 /* get current nat block page with lock */ 108 src_page = get_meta_page(sbi, src_off); 109 dst_page = grab_meta_page(sbi, dst_off); 110 f2fs_bug_on(sbi, PageDirty(src_page)); 111 112 src_addr = page_address(src_page); 113 dst_addr = page_address(dst_page); 114 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 115 set_page_dirty(dst_page); 116 f2fs_put_page(src_page, 1); 117 118 set_to_next_nat(nm_i, nid); 119 120 return dst_page; 121 } 122 123 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 124 { 125 return radix_tree_lookup(&nm_i->nat_root, n); 126 } 127 128 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 129 nid_t start, unsigned int nr, struct nat_entry **ep) 130 { 131 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 132 } 133 134 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 135 { 136 list_del(&e->list); 137 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 138 nm_i->nat_cnt--; 139 kmem_cache_free(nat_entry_slab, e); 140 } 141 142 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 143 struct nat_entry *ne) 144 { 145 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 146 struct nat_entry_set *head; 147 148 if (get_nat_flag(ne, IS_DIRTY)) 149 return; 150 151 head = radix_tree_lookup(&nm_i->nat_set_root, set); 152 if (!head) { 153 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 154 155 INIT_LIST_HEAD(&head->entry_list); 156 INIT_LIST_HEAD(&head->set_list); 157 head->set = set; 158 head->entry_cnt = 0; 159 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 160 } 161 list_move_tail(&ne->list, &head->entry_list); 162 nm_i->dirty_nat_cnt++; 163 head->entry_cnt++; 164 set_nat_flag(ne, IS_DIRTY, true); 165 } 166 167 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 168 struct nat_entry *ne) 169 { 170 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 171 struct nat_entry_set *head; 172 173 head = radix_tree_lookup(&nm_i->nat_set_root, set); 174 if (head) { 175 list_move_tail(&ne->list, &nm_i->nat_entries); 176 set_nat_flag(ne, IS_DIRTY, false); 177 head->entry_cnt--; 178 nm_i->dirty_nat_cnt--; 179 } 180 } 181 182 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 183 nid_t start, unsigned int nr, struct nat_entry_set **ep) 184 { 185 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 186 start, nr); 187 } 188 189 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 190 { 191 struct f2fs_nm_info *nm_i = NM_I(sbi); 192 struct nat_entry *e; 193 bool is_cp = true; 194 195 down_read(&nm_i->nat_tree_lock); 196 e = __lookup_nat_cache(nm_i, nid); 197 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 198 is_cp = false; 199 up_read(&nm_i->nat_tree_lock); 200 return is_cp; 201 } 202 203 bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino) 204 { 205 struct f2fs_nm_info *nm_i = NM_I(sbi); 206 struct nat_entry *e; 207 bool fsynced = false; 208 209 down_read(&nm_i->nat_tree_lock); 210 e = __lookup_nat_cache(nm_i, ino); 211 if (e && get_nat_flag(e, HAS_FSYNCED_INODE)) 212 fsynced = true; 213 up_read(&nm_i->nat_tree_lock); 214 return fsynced; 215 } 216 217 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 218 { 219 struct f2fs_nm_info *nm_i = NM_I(sbi); 220 struct nat_entry *e; 221 bool need_update = true; 222 223 down_read(&nm_i->nat_tree_lock); 224 e = __lookup_nat_cache(nm_i, ino); 225 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 226 (get_nat_flag(e, IS_CHECKPOINTED) || 227 get_nat_flag(e, HAS_FSYNCED_INODE))) 228 need_update = false; 229 up_read(&nm_i->nat_tree_lock); 230 return need_update; 231 } 232 233 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) 234 { 235 struct nat_entry *new; 236 237 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 238 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); 239 memset(new, 0, sizeof(struct nat_entry)); 240 nat_set_nid(new, nid); 241 nat_reset_flag(new); 242 list_add_tail(&new->list, &nm_i->nat_entries); 243 nm_i->nat_cnt++; 244 return new; 245 } 246 247 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, 248 struct f2fs_nat_entry *ne) 249 { 250 struct nat_entry *e; 251 252 down_write(&nm_i->nat_tree_lock); 253 e = __lookup_nat_cache(nm_i, nid); 254 if (!e) { 255 e = grab_nat_entry(nm_i, nid); 256 node_info_from_raw_nat(&e->ni, ne); 257 } 258 up_write(&nm_i->nat_tree_lock); 259 } 260 261 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 262 block_t new_blkaddr, bool fsync_done) 263 { 264 struct f2fs_nm_info *nm_i = NM_I(sbi); 265 struct nat_entry *e; 266 267 down_write(&nm_i->nat_tree_lock); 268 e = __lookup_nat_cache(nm_i, ni->nid); 269 if (!e) { 270 e = grab_nat_entry(nm_i, ni->nid); 271 e->ni = *ni; 272 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 273 } else if (new_blkaddr == NEW_ADDR) { 274 /* 275 * when nid is reallocated, 276 * previous nat entry can be remained in nat cache. 277 * So, reinitialize it with new information. 278 */ 279 e->ni = *ni; 280 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 281 } 282 283 /* sanity check */ 284 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 285 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 286 new_blkaddr == NULL_ADDR); 287 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 288 new_blkaddr == NEW_ADDR); 289 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && 290 nat_get_blkaddr(e) != NULL_ADDR && 291 new_blkaddr == NEW_ADDR); 292 293 /* increment version no as node is removed */ 294 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 295 unsigned char version = nat_get_version(e); 296 nat_set_version(e, inc_node_version(version)); 297 } 298 299 /* change address */ 300 nat_set_blkaddr(e, new_blkaddr); 301 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) 302 set_nat_flag(e, IS_CHECKPOINTED, false); 303 __set_nat_cache_dirty(nm_i, e); 304 305 /* update fsync_mark if its inode nat entry is still alive */ 306 e = __lookup_nat_cache(nm_i, ni->ino); 307 if (e) { 308 if (fsync_done && ni->nid == ni->ino) 309 set_nat_flag(e, HAS_FSYNCED_INODE, true); 310 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 311 } 312 up_write(&nm_i->nat_tree_lock); 313 } 314 315 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 316 { 317 struct f2fs_nm_info *nm_i = NM_I(sbi); 318 319 if (available_free_memory(sbi, NAT_ENTRIES)) 320 return 0; 321 322 down_write(&nm_i->nat_tree_lock); 323 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 324 struct nat_entry *ne; 325 ne = list_first_entry(&nm_i->nat_entries, 326 struct nat_entry, list); 327 __del_from_nat_cache(nm_i, ne); 328 nr_shrink--; 329 } 330 up_write(&nm_i->nat_tree_lock); 331 return nr_shrink; 332 } 333 334 /* 335 * This function always returns success 336 */ 337 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 338 { 339 struct f2fs_nm_info *nm_i = NM_I(sbi); 340 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 341 struct f2fs_summary_block *sum = curseg->sum_blk; 342 nid_t start_nid = START_NID(nid); 343 struct f2fs_nat_block *nat_blk; 344 struct page *page = NULL; 345 struct f2fs_nat_entry ne; 346 struct nat_entry *e; 347 int i; 348 349 memset(&ne, 0, sizeof(struct f2fs_nat_entry)); 350 ni->nid = nid; 351 352 /* Check nat cache */ 353 down_read(&nm_i->nat_tree_lock); 354 e = __lookup_nat_cache(nm_i, nid); 355 if (e) { 356 ni->ino = nat_get_ino(e); 357 ni->blk_addr = nat_get_blkaddr(e); 358 ni->version = nat_get_version(e); 359 } 360 up_read(&nm_i->nat_tree_lock); 361 if (e) 362 return; 363 364 /* Check current segment summary */ 365 mutex_lock(&curseg->curseg_mutex); 366 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); 367 if (i >= 0) { 368 ne = nat_in_journal(sum, i); 369 node_info_from_raw_nat(ni, &ne); 370 } 371 mutex_unlock(&curseg->curseg_mutex); 372 if (i >= 0) 373 goto cache; 374 375 /* Fill node_info from nat page */ 376 page = get_current_nat_page(sbi, start_nid); 377 nat_blk = (struct f2fs_nat_block *)page_address(page); 378 ne = nat_blk->entries[nid - start_nid]; 379 node_info_from_raw_nat(ni, &ne); 380 f2fs_put_page(page, 1); 381 cache: 382 /* cache nat entry */ 383 cache_nat_entry(NM_I(sbi), nid, &ne); 384 } 385 386 /* 387 * The maximum depth is four. 388 * Offset[0] will have raw inode offset. 389 */ 390 static int get_node_path(struct f2fs_inode_info *fi, long block, 391 int offset[4], unsigned int noffset[4]) 392 { 393 const long direct_index = ADDRS_PER_INODE(fi); 394 const long direct_blks = ADDRS_PER_BLOCK; 395 const long dptrs_per_blk = NIDS_PER_BLOCK; 396 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; 397 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 398 int n = 0; 399 int level = 0; 400 401 noffset[0] = 0; 402 403 if (block < direct_index) { 404 offset[n] = block; 405 goto got; 406 } 407 block -= direct_index; 408 if (block < direct_blks) { 409 offset[n++] = NODE_DIR1_BLOCK; 410 noffset[n] = 1; 411 offset[n] = block; 412 level = 1; 413 goto got; 414 } 415 block -= direct_blks; 416 if (block < direct_blks) { 417 offset[n++] = NODE_DIR2_BLOCK; 418 noffset[n] = 2; 419 offset[n] = block; 420 level = 1; 421 goto got; 422 } 423 block -= direct_blks; 424 if (block < indirect_blks) { 425 offset[n++] = NODE_IND1_BLOCK; 426 noffset[n] = 3; 427 offset[n++] = block / direct_blks; 428 noffset[n] = 4 + offset[n - 1]; 429 offset[n] = block % direct_blks; 430 level = 2; 431 goto got; 432 } 433 block -= indirect_blks; 434 if (block < indirect_blks) { 435 offset[n++] = NODE_IND2_BLOCK; 436 noffset[n] = 4 + dptrs_per_blk; 437 offset[n++] = block / direct_blks; 438 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 439 offset[n] = block % direct_blks; 440 level = 2; 441 goto got; 442 } 443 block -= indirect_blks; 444 if (block < dindirect_blks) { 445 offset[n++] = NODE_DIND_BLOCK; 446 noffset[n] = 5 + (dptrs_per_blk * 2); 447 offset[n++] = block / indirect_blks; 448 noffset[n] = 6 + (dptrs_per_blk * 2) + 449 offset[n - 1] * (dptrs_per_blk + 1); 450 offset[n++] = (block / direct_blks) % dptrs_per_blk; 451 noffset[n] = 7 + (dptrs_per_blk * 2) + 452 offset[n - 2] * (dptrs_per_blk + 1) + 453 offset[n - 1]; 454 offset[n] = block % direct_blks; 455 level = 3; 456 goto got; 457 } else { 458 BUG(); 459 } 460 got: 461 return level; 462 } 463 464 /* 465 * Caller should call f2fs_put_dnode(dn). 466 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 467 * f2fs_unlock_op() only if ro is not set RDONLY_NODE. 468 * In the case of RDONLY_NODE, we don't need to care about mutex. 469 */ 470 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 471 { 472 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 473 struct page *npage[4]; 474 struct page *parent; 475 int offset[4]; 476 unsigned int noffset[4]; 477 nid_t nids[4]; 478 int level, i; 479 int err = 0; 480 481 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); 482 483 nids[0] = dn->inode->i_ino; 484 npage[0] = dn->inode_page; 485 486 if (!npage[0]) { 487 npage[0] = get_node_page(sbi, nids[0]); 488 if (IS_ERR(npage[0])) 489 return PTR_ERR(npage[0]); 490 } 491 parent = npage[0]; 492 if (level != 0) 493 nids[1] = get_nid(parent, offset[0], true); 494 dn->inode_page = npage[0]; 495 dn->inode_page_locked = true; 496 497 /* get indirect or direct nodes */ 498 for (i = 1; i <= level; i++) { 499 bool done = false; 500 501 if (!nids[i] && mode == ALLOC_NODE) { 502 /* alloc new node */ 503 if (!alloc_nid(sbi, &(nids[i]))) { 504 err = -ENOSPC; 505 goto release_pages; 506 } 507 508 dn->nid = nids[i]; 509 npage[i] = new_node_page(dn, noffset[i], NULL); 510 if (IS_ERR(npage[i])) { 511 alloc_nid_failed(sbi, nids[i]); 512 err = PTR_ERR(npage[i]); 513 goto release_pages; 514 } 515 516 set_nid(parent, offset[i - 1], nids[i], i == 1); 517 alloc_nid_done(sbi, nids[i]); 518 done = true; 519 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 520 npage[i] = get_node_page_ra(parent, offset[i - 1]); 521 if (IS_ERR(npage[i])) { 522 err = PTR_ERR(npage[i]); 523 goto release_pages; 524 } 525 done = true; 526 } 527 if (i == 1) { 528 dn->inode_page_locked = false; 529 unlock_page(parent); 530 } else { 531 f2fs_put_page(parent, 1); 532 } 533 534 if (!done) { 535 npage[i] = get_node_page(sbi, nids[i]); 536 if (IS_ERR(npage[i])) { 537 err = PTR_ERR(npage[i]); 538 f2fs_put_page(npage[0], 0); 539 goto release_out; 540 } 541 } 542 if (i < level) { 543 parent = npage[i]; 544 nids[i + 1] = get_nid(parent, offset[i], false); 545 } 546 } 547 dn->nid = nids[level]; 548 dn->ofs_in_node = offset[level]; 549 dn->node_page = npage[level]; 550 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 551 return 0; 552 553 release_pages: 554 f2fs_put_page(parent, 1); 555 if (i > 1) 556 f2fs_put_page(npage[0], 0); 557 release_out: 558 dn->inode_page = NULL; 559 dn->node_page = NULL; 560 return err; 561 } 562 563 static void truncate_node(struct dnode_of_data *dn) 564 { 565 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 566 struct node_info ni; 567 568 get_node_info(sbi, dn->nid, &ni); 569 if (dn->inode->i_blocks == 0) { 570 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); 571 goto invalidate; 572 } 573 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 574 575 /* Deallocate node address */ 576 invalidate_blocks(sbi, ni.blk_addr); 577 dec_valid_node_count(sbi, dn->inode); 578 set_node_addr(sbi, &ni, NULL_ADDR, false); 579 580 if (dn->nid == dn->inode->i_ino) { 581 remove_orphan_inode(sbi, dn->nid); 582 dec_valid_inode_count(sbi); 583 } else { 584 sync_inode_page(dn); 585 } 586 invalidate: 587 clear_node_page_dirty(dn->node_page); 588 F2FS_SET_SB_DIRT(sbi); 589 590 f2fs_put_page(dn->node_page, 1); 591 592 invalidate_mapping_pages(NODE_MAPPING(sbi), 593 dn->node_page->index, dn->node_page->index); 594 595 dn->node_page = NULL; 596 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 597 } 598 599 static int truncate_dnode(struct dnode_of_data *dn) 600 { 601 struct page *page; 602 603 if (dn->nid == 0) 604 return 1; 605 606 /* get direct node */ 607 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 608 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) 609 return 1; 610 else if (IS_ERR(page)) 611 return PTR_ERR(page); 612 613 /* Make dnode_of_data for parameter */ 614 dn->node_page = page; 615 dn->ofs_in_node = 0; 616 truncate_data_blocks(dn); 617 truncate_node(dn); 618 return 1; 619 } 620 621 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 622 int ofs, int depth) 623 { 624 struct dnode_of_data rdn = *dn; 625 struct page *page; 626 struct f2fs_node *rn; 627 nid_t child_nid; 628 unsigned int child_nofs; 629 int freed = 0; 630 int i, ret; 631 632 if (dn->nid == 0) 633 return NIDS_PER_BLOCK + 1; 634 635 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 636 637 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); 638 if (IS_ERR(page)) { 639 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 640 return PTR_ERR(page); 641 } 642 643 rn = F2FS_NODE(page); 644 if (depth < 3) { 645 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 646 child_nid = le32_to_cpu(rn->in.nid[i]); 647 if (child_nid == 0) 648 continue; 649 rdn.nid = child_nid; 650 ret = truncate_dnode(&rdn); 651 if (ret < 0) 652 goto out_err; 653 set_nid(page, i, 0, false); 654 } 655 } else { 656 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 657 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 658 child_nid = le32_to_cpu(rn->in.nid[i]); 659 if (child_nid == 0) { 660 child_nofs += NIDS_PER_BLOCK + 1; 661 continue; 662 } 663 rdn.nid = child_nid; 664 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 665 if (ret == (NIDS_PER_BLOCK + 1)) { 666 set_nid(page, i, 0, false); 667 child_nofs += ret; 668 } else if (ret < 0 && ret != -ENOENT) { 669 goto out_err; 670 } 671 } 672 freed = child_nofs; 673 } 674 675 if (!ofs) { 676 /* remove current indirect node */ 677 dn->node_page = page; 678 truncate_node(dn); 679 freed++; 680 } else { 681 f2fs_put_page(page, 1); 682 } 683 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 684 return freed; 685 686 out_err: 687 f2fs_put_page(page, 1); 688 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 689 return ret; 690 } 691 692 static int truncate_partial_nodes(struct dnode_of_data *dn, 693 struct f2fs_inode *ri, int *offset, int depth) 694 { 695 struct page *pages[2]; 696 nid_t nid[3]; 697 nid_t child_nid; 698 int err = 0; 699 int i; 700 int idx = depth - 2; 701 702 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 703 if (!nid[0]) 704 return 0; 705 706 /* get indirect nodes in the path */ 707 for (i = 0; i < idx + 1; i++) { 708 /* reference count'll be increased */ 709 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); 710 if (IS_ERR(pages[i])) { 711 err = PTR_ERR(pages[i]); 712 idx = i - 1; 713 goto fail; 714 } 715 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 716 } 717 718 /* free direct nodes linked to a partial indirect node */ 719 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 720 child_nid = get_nid(pages[idx], i, false); 721 if (!child_nid) 722 continue; 723 dn->nid = child_nid; 724 err = truncate_dnode(dn); 725 if (err < 0) 726 goto fail; 727 set_nid(pages[idx], i, 0, false); 728 } 729 730 if (offset[idx + 1] == 0) { 731 dn->node_page = pages[idx]; 732 dn->nid = nid[idx]; 733 truncate_node(dn); 734 } else { 735 f2fs_put_page(pages[idx], 1); 736 } 737 offset[idx]++; 738 offset[idx + 1] = 0; 739 idx--; 740 fail: 741 for (i = idx; i >= 0; i--) 742 f2fs_put_page(pages[i], 1); 743 744 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 745 746 return err; 747 } 748 749 /* 750 * All the block addresses of data and nodes should be nullified. 751 */ 752 int truncate_inode_blocks(struct inode *inode, pgoff_t from) 753 { 754 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 755 int err = 0, cont = 1; 756 int level, offset[4], noffset[4]; 757 unsigned int nofs = 0; 758 struct f2fs_inode *ri; 759 struct dnode_of_data dn; 760 struct page *page; 761 762 trace_f2fs_truncate_inode_blocks_enter(inode, from); 763 764 level = get_node_path(F2FS_I(inode), from, offset, noffset); 765 restart: 766 page = get_node_page(sbi, inode->i_ino); 767 if (IS_ERR(page)) { 768 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 769 return PTR_ERR(page); 770 } 771 772 set_new_dnode(&dn, inode, page, NULL, 0); 773 unlock_page(page); 774 775 ri = F2FS_INODE(page); 776 switch (level) { 777 case 0: 778 case 1: 779 nofs = noffset[1]; 780 break; 781 case 2: 782 nofs = noffset[1]; 783 if (!offset[level - 1]) 784 goto skip_partial; 785 err = truncate_partial_nodes(&dn, ri, offset, level); 786 if (err < 0 && err != -ENOENT) 787 goto fail; 788 nofs += 1 + NIDS_PER_BLOCK; 789 break; 790 case 3: 791 nofs = 5 + 2 * NIDS_PER_BLOCK; 792 if (!offset[level - 1]) 793 goto skip_partial; 794 err = truncate_partial_nodes(&dn, ri, offset, level); 795 if (err < 0 && err != -ENOENT) 796 goto fail; 797 break; 798 default: 799 BUG(); 800 } 801 802 skip_partial: 803 while (cont) { 804 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); 805 switch (offset[0]) { 806 case NODE_DIR1_BLOCK: 807 case NODE_DIR2_BLOCK: 808 err = truncate_dnode(&dn); 809 break; 810 811 case NODE_IND1_BLOCK: 812 case NODE_IND2_BLOCK: 813 err = truncate_nodes(&dn, nofs, offset[1], 2); 814 break; 815 816 case NODE_DIND_BLOCK: 817 err = truncate_nodes(&dn, nofs, offset[1], 3); 818 cont = 0; 819 break; 820 821 default: 822 BUG(); 823 } 824 if (err < 0 && err != -ENOENT) 825 goto fail; 826 if (offset[1] == 0 && 827 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { 828 lock_page(page); 829 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 830 f2fs_put_page(page, 1); 831 goto restart; 832 } 833 f2fs_wait_on_page_writeback(page, NODE); 834 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; 835 set_page_dirty(page); 836 unlock_page(page); 837 } 838 offset[1] = 0; 839 offset[0]++; 840 nofs += err; 841 } 842 fail: 843 f2fs_put_page(page, 0); 844 trace_f2fs_truncate_inode_blocks_exit(inode, err); 845 return err > 0 ? 0 : err; 846 } 847 848 int truncate_xattr_node(struct inode *inode, struct page *page) 849 { 850 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 851 nid_t nid = F2FS_I(inode)->i_xattr_nid; 852 struct dnode_of_data dn; 853 struct page *npage; 854 855 if (!nid) 856 return 0; 857 858 npage = get_node_page(sbi, nid); 859 if (IS_ERR(npage)) 860 return PTR_ERR(npage); 861 862 F2FS_I(inode)->i_xattr_nid = 0; 863 864 /* need to do checkpoint during fsync */ 865 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); 866 867 set_new_dnode(&dn, inode, page, npage, nid); 868 869 if (page) 870 dn.inode_page_locked = true; 871 truncate_node(&dn); 872 return 0; 873 } 874 875 /* 876 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 877 * f2fs_unlock_op(). 878 */ 879 void remove_inode_page(struct inode *inode) 880 { 881 struct dnode_of_data dn; 882 883 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 884 if (get_dnode_of_data(&dn, 0, LOOKUP_NODE)) 885 return; 886 887 if (truncate_xattr_node(inode, dn.inode_page)) { 888 f2fs_put_dnode(&dn); 889 return; 890 } 891 892 /* remove potential inline_data blocks */ 893 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 894 S_ISLNK(inode->i_mode)) 895 truncate_data_blocks_range(&dn, 1); 896 897 /* 0 is possible, after f2fs_new_inode() has failed */ 898 f2fs_bug_on(F2FS_I_SB(inode), 899 inode->i_blocks != 0 && inode->i_blocks != 1); 900 901 /* will put inode & node pages */ 902 truncate_node(&dn); 903 } 904 905 struct page *new_inode_page(struct inode *inode) 906 { 907 struct dnode_of_data dn; 908 909 /* allocate inode page for new inode */ 910 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 911 912 /* caller should f2fs_put_page(page, 1); */ 913 return new_node_page(&dn, 0, NULL); 914 } 915 916 struct page *new_node_page(struct dnode_of_data *dn, 917 unsigned int ofs, struct page *ipage) 918 { 919 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 920 struct node_info old_ni, new_ni; 921 struct page *page; 922 int err; 923 924 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 925 return ERR_PTR(-EPERM); 926 927 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); 928 if (!page) 929 return ERR_PTR(-ENOMEM); 930 931 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { 932 err = -ENOSPC; 933 goto fail; 934 } 935 936 get_node_info(sbi, dn->nid, &old_ni); 937 938 /* Reinitialize old_ni with new node page */ 939 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR); 940 new_ni = old_ni; 941 new_ni.ino = dn->inode->i_ino; 942 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 943 944 f2fs_wait_on_page_writeback(page, NODE); 945 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 946 set_cold_node(dn->inode, page); 947 SetPageUptodate(page); 948 set_page_dirty(page); 949 950 if (f2fs_has_xattr_block(ofs)) 951 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; 952 953 dn->node_page = page; 954 if (ipage) 955 update_inode(dn->inode, ipage); 956 else 957 sync_inode_page(dn); 958 if (ofs == 0) 959 inc_valid_inode_count(sbi); 960 961 return page; 962 963 fail: 964 clear_node_page_dirty(page); 965 f2fs_put_page(page, 1); 966 return ERR_PTR(err); 967 } 968 969 /* 970 * Caller should do after getting the following values. 971 * 0: f2fs_put_page(page, 0) 972 * LOCKED_PAGE: f2fs_put_page(page, 1) 973 * error: nothing 974 */ 975 static int read_node_page(struct page *page, int rw) 976 { 977 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 978 struct node_info ni; 979 980 get_node_info(sbi, page->index, &ni); 981 982 if (unlikely(ni.blk_addr == NULL_ADDR)) { 983 f2fs_put_page(page, 1); 984 return -ENOENT; 985 } 986 987 if (PageUptodate(page)) 988 return LOCKED_PAGE; 989 990 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); 991 } 992 993 /* 994 * Readahead a node page 995 */ 996 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 997 { 998 struct page *apage; 999 int err; 1000 1001 apage = find_get_page(NODE_MAPPING(sbi), nid); 1002 if (apage && PageUptodate(apage)) { 1003 f2fs_put_page(apage, 0); 1004 return; 1005 } 1006 f2fs_put_page(apage, 0); 1007 1008 apage = grab_cache_page(NODE_MAPPING(sbi), nid); 1009 if (!apage) 1010 return; 1011 1012 err = read_node_page(apage, READA); 1013 if (err == 0) 1014 f2fs_put_page(apage, 0); 1015 else if (err == LOCKED_PAGE) 1016 f2fs_put_page(apage, 1); 1017 } 1018 1019 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1020 { 1021 struct page *page; 1022 int err; 1023 repeat: 1024 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1025 if (!page) 1026 return ERR_PTR(-ENOMEM); 1027 1028 err = read_node_page(page, READ_SYNC); 1029 if (err < 0) 1030 return ERR_PTR(err); 1031 else if (err == LOCKED_PAGE) 1032 goto got_it; 1033 1034 lock_page(page); 1035 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { 1036 f2fs_put_page(page, 1); 1037 return ERR_PTR(-EIO); 1038 } 1039 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1040 f2fs_put_page(page, 1); 1041 goto repeat; 1042 } 1043 got_it: 1044 return page; 1045 } 1046 1047 /* 1048 * Return a locked page for the desired node page. 1049 * And, readahead MAX_RA_NODE number of node pages. 1050 */ 1051 struct page *get_node_page_ra(struct page *parent, int start) 1052 { 1053 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1054 struct blk_plug plug; 1055 struct page *page; 1056 int err, i, end; 1057 nid_t nid; 1058 1059 /* First, try getting the desired direct node. */ 1060 nid = get_nid(parent, start, false); 1061 if (!nid) 1062 return ERR_PTR(-ENOENT); 1063 repeat: 1064 page = grab_cache_page(NODE_MAPPING(sbi), nid); 1065 if (!page) 1066 return ERR_PTR(-ENOMEM); 1067 1068 err = read_node_page(page, READ_SYNC); 1069 if (err < 0) 1070 return ERR_PTR(err); 1071 else if (err == LOCKED_PAGE) 1072 goto page_hit; 1073 1074 blk_start_plug(&plug); 1075 1076 /* Then, try readahead for siblings of the desired node */ 1077 end = start + MAX_RA_NODE; 1078 end = min(end, NIDS_PER_BLOCK); 1079 for (i = start + 1; i < end; i++) { 1080 nid = get_nid(parent, i, false); 1081 if (!nid) 1082 continue; 1083 ra_node_page(sbi, nid); 1084 } 1085 1086 blk_finish_plug(&plug); 1087 1088 lock_page(page); 1089 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1090 f2fs_put_page(page, 1); 1091 goto repeat; 1092 } 1093 page_hit: 1094 if (unlikely(!PageUptodate(page))) { 1095 f2fs_put_page(page, 1); 1096 return ERR_PTR(-EIO); 1097 } 1098 return page; 1099 } 1100 1101 void sync_inode_page(struct dnode_of_data *dn) 1102 { 1103 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { 1104 update_inode(dn->inode, dn->node_page); 1105 } else if (dn->inode_page) { 1106 if (!dn->inode_page_locked) 1107 lock_page(dn->inode_page); 1108 update_inode(dn->inode, dn->inode_page); 1109 if (!dn->inode_page_locked) 1110 unlock_page(dn->inode_page); 1111 } else { 1112 update_inode_page(dn->inode); 1113 } 1114 } 1115 1116 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, 1117 struct writeback_control *wbc) 1118 { 1119 pgoff_t index, end; 1120 struct pagevec pvec; 1121 int step = ino ? 2 : 0; 1122 int nwritten = 0, wrote = 0; 1123 1124 pagevec_init(&pvec, 0); 1125 1126 next_step: 1127 index = 0; 1128 end = LONG_MAX; 1129 1130 while (index <= end) { 1131 int i, nr_pages; 1132 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1133 PAGECACHE_TAG_DIRTY, 1134 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1135 if (nr_pages == 0) 1136 break; 1137 1138 for (i = 0; i < nr_pages; i++) { 1139 struct page *page = pvec.pages[i]; 1140 1141 /* 1142 * flushing sequence with step: 1143 * 0. indirect nodes 1144 * 1. dentry dnodes 1145 * 2. file dnodes 1146 */ 1147 if (step == 0 && IS_DNODE(page)) 1148 continue; 1149 if (step == 1 && (!IS_DNODE(page) || 1150 is_cold_node(page))) 1151 continue; 1152 if (step == 2 && (!IS_DNODE(page) || 1153 !is_cold_node(page))) 1154 continue; 1155 1156 /* 1157 * If an fsync mode, 1158 * we should not skip writing node pages. 1159 */ 1160 if (ino && ino_of_node(page) == ino) 1161 lock_page(page); 1162 else if (!trylock_page(page)) 1163 continue; 1164 1165 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1166 continue_unlock: 1167 unlock_page(page); 1168 continue; 1169 } 1170 if (ino && ino_of_node(page) != ino) 1171 goto continue_unlock; 1172 1173 if (!PageDirty(page)) { 1174 /* someone wrote it for us */ 1175 goto continue_unlock; 1176 } 1177 1178 if (!clear_page_dirty_for_io(page)) 1179 goto continue_unlock; 1180 1181 /* called by fsync() */ 1182 if (ino && IS_DNODE(page)) { 1183 set_fsync_mark(page, 1); 1184 if (IS_INODE(page)) { 1185 if (!is_checkpointed_node(sbi, ino) && 1186 !has_fsynced_inode(sbi, ino)) 1187 set_dentry_mark(page, 1); 1188 else 1189 set_dentry_mark(page, 0); 1190 } 1191 nwritten++; 1192 } else { 1193 set_fsync_mark(page, 0); 1194 set_dentry_mark(page, 0); 1195 } 1196 1197 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) 1198 unlock_page(page); 1199 else 1200 wrote++; 1201 1202 if (--wbc->nr_to_write == 0) 1203 break; 1204 } 1205 pagevec_release(&pvec); 1206 cond_resched(); 1207 1208 if (wbc->nr_to_write == 0) { 1209 step = 2; 1210 break; 1211 } 1212 } 1213 1214 if (step < 2) { 1215 step++; 1216 goto next_step; 1217 } 1218 1219 if (wrote) 1220 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1221 return nwritten; 1222 } 1223 1224 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) 1225 { 1226 pgoff_t index = 0, end = LONG_MAX; 1227 struct pagevec pvec; 1228 int ret2 = 0, ret = 0; 1229 1230 pagevec_init(&pvec, 0); 1231 1232 while (index <= end) { 1233 int i, nr_pages; 1234 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1235 PAGECACHE_TAG_WRITEBACK, 1236 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1237 if (nr_pages == 0) 1238 break; 1239 1240 for (i = 0; i < nr_pages; i++) { 1241 struct page *page = pvec.pages[i]; 1242 1243 /* until radix tree lookup accepts end_index */ 1244 if (unlikely(page->index > end)) 1245 continue; 1246 1247 if (ino && ino_of_node(page) == ino) { 1248 f2fs_wait_on_page_writeback(page, NODE); 1249 if (TestClearPageError(page)) 1250 ret = -EIO; 1251 } 1252 } 1253 pagevec_release(&pvec); 1254 cond_resched(); 1255 } 1256 1257 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) 1258 ret2 = -ENOSPC; 1259 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) 1260 ret2 = -EIO; 1261 if (!ret) 1262 ret = ret2; 1263 return ret; 1264 } 1265 1266 static int f2fs_write_node_page(struct page *page, 1267 struct writeback_control *wbc) 1268 { 1269 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1270 nid_t nid; 1271 block_t new_addr; 1272 struct node_info ni; 1273 struct f2fs_io_info fio = { 1274 .type = NODE, 1275 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1276 }; 1277 1278 trace_f2fs_writepage(page, NODE); 1279 1280 if (unlikely(sbi->por_doing)) 1281 goto redirty_out; 1282 if (unlikely(f2fs_cp_error(sbi))) 1283 goto redirty_out; 1284 1285 f2fs_wait_on_page_writeback(page, NODE); 1286 1287 /* get old block addr of this node page */ 1288 nid = nid_of_node(page); 1289 f2fs_bug_on(sbi, page->index != nid); 1290 1291 get_node_info(sbi, nid, &ni); 1292 1293 /* This page is already truncated */ 1294 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1295 dec_page_count(sbi, F2FS_DIRTY_NODES); 1296 unlock_page(page); 1297 return 0; 1298 } 1299 1300 if (wbc->for_reclaim) { 1301 if (!down_read_trylock(&sbi->node_write)) 1302 goto redirty_out; 1303 } else { 1304 down_read(&sbi->node_write); 1305 } 1306 set_page_writeback(page); 1307 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1308 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1309 dec_page_count(sbi, F2FS_DIRTY_NODES); 1310 up_read(&sbi->node_write); 1311 unlock_page(page); 1312 1313 if (wbc->for_reclaim) 1314 f2fs_submit_merged_bio(sbi, NODE, WRITE); 1315 1316 return 0; 1317 1318 redirty_out: 1319 redirty_page_for_writepage(wbc, page); 1320 return AOP_WRITEPAGE_ACTIVATE; 1321 } 1322 1323 static int f2fs_write_node_pages(struct address_space *mapping, 1324 struct writeback_control *wbc) 1325 { 1326 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 1327 long diff; 1328 1329 trace_f2fs_writepages(mapping->host, wbc, NODE); 1330 1331 /* balancing f2fs's metadata in background */ 1332 f2fs_balance_fs_bg(sbi); 1333 1334 /* collect a number of dirty node pages and write together */ 1335 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) 1336 goto skip_write; 1337 1338 diff = nr_pages_to_write(sbi, NODE, wbc); 1339 wbc->sync_mode = WB_SYNC_NONE; 1340 sync_node_pages(sbi, 0, wbc); 1341 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1342 return 0; 1343 1344 skip_write: 1345 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 1346 return 0; 1347 } 1348 1349 static int f2fs_set_node_page_dirty(struct page *page) 1350 { 1351 trace_f2fs_set_page_dirty(page, NODE); 1352 1353 SetPageUptodate(page); 1354 if (!PageDirty(page)) { 1355 __set_page_dirty_nobuffers(page); 1356 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 1357 SetPagePrivate(page); 1358 return 1; 1359 } 1360 return 0; 1361 } 1362 1363 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, 1364 unsigned int length) 1365 { 1366 struct inode *inode = page->mapping->host; 1367 if (PageDirty(page)) 1368 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_NODES); 1369 ClearPagePrivate(page); 1370 } 1371 1372 static int f2fs_release_node_page(struct page *page, gfp_t wait) 1373 { 1374 ClearPagePrivate(page); 1375 return 1; 1376 } 1377 1378 /* 1379 * Structure of the f2fs node operations 1380 */ 1381 const struct address_space_operations f2fs_node_aops = { 1382 .writepage = f2fs_write_node_page, 1383 .writepages = f2fs_write_node_pages, 1384 .set_page_dirty = f2fs_set_node_page_dirty, 1385 .invalidatepage = f2fs_invalidate_node_page, 1386 .releasepage = f2fs_release_node_page, 1387 }; 1388 1389 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 1390 nid_t n) 1391 { 1392 return radix_tree_lookup(&nm_i->free_nid_root, n); 1393 } 1394 1395 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i, 1396 struct free_nid *i) 1397 { 1398 list_del(&i->list); 1399 radix_tree_delete(&nm_i->free_nid_root, i->nid); 1400 } 1401 1402 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) 1403 { 1404 struct f2fs_nm_info *nm_i = NM_I(sbi); 1405 struct free_nid *i; 1406 struct nat_entry *ne; 1407 bool allocated = false; 1408 1409 if (!available_free_memory(sbi, FREE_NIDS)) 1410 return -1; 1411 1412 /* 0 nid should not be used */ 1413 if (unlikely(nid == 0)) 1414 return 0; 1415 1416 if (build) { 1417 /* do not add allocated nids */ 1418 down_read(&nm_i->nat_tree_lock); 1419 ne = __lookup_nat_cache(nm_i, nid); 1420 if (ne && 1421 (!get_nat_flag(ne, IS_CHECKPOINTED) || 1422 nat_get_blkaddr(ne) != NULL_ADDR)) 1423 allocated = true; 1424 up_read(&nm_i->nat_tree_lock); 1425 if (allocated) 1426 return 0; 1427 } 1428 1429 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1430 i->nid = nid; 1431 i->state = NID_NEW; 1432 1433 if (radix_tree_preload(GFP_NOFS)) { 1434 kmem_cache_free(free_nid_slab, i); 1435 return 0; 1436 } 1437 1438 spin_lock(&nm_i->free_nid_list_lock); 1439 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1440 spin_unlock(&nm_i->free_nid_list_lock); 1441 radix_tree_preload_end(); 1442 kmem_cache_free(free_nid_slab, i); 1443 return 0; 1444 } 1445 list_add_tail(&i->list, &nm_i->free_nid_list); 1446 nm_i->fcnt++; 1447 spin_unlock(&nm_i->free_nid_list_lock); 1448 radix_tree_preload_end(); 1449 return 1; 1450 } 1451 1452 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1453 { 1454 struct free_nid *i; 1455 bool need_free = false; 1456 1457 spin_lock(&nm_i->free_nid_list_lock); 1458 i = __lookup_free_nid_list(nm_i, nid); 1459 if (i && i->state == NID_NEW) { 1460 __del_from_free_nid_list(nm_i, i); 1461 nm_i->fcnt--; 1462 need_free = true; 1463 } 1464 spin_unlock(&nm_i->free_nid_list_lock); 1465 1466 if (need_free) 1467 kmem_cache_free(free_nid_slab, i); 1468 } 1469 1470 static void scan_nat_page(struct f2fs_sb_info *sbi, 1471 struct page *nat_page, nid_t start_nid) 1472 { 1473 struct f2fs_nm_info *nm_i = NM_I(sbi); 1474 struct f2fs_nat_block *nat_blk = page_address(nat_page); 1475 block_t blk_addr; 1476 int i; 1477 1478 i = start_nid % NAT_ENTRY_PER_BLOCK; 1479 1480 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1481 1482 if (unlikely(start_nid >= nm_i->max_nid)) 1483 break; 1484 1485 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1486 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1487 if (blk_addr == NULL_ADDR) { 1488 if (add_free_nid(sbi, start_nid, true) < 0) 1489 break; 1490 } 1491 } 1492 } 1493 1494 static void build_free_nids(struct f2fs_sb_info *sbi) 1495 { 1496 struct f2fs_nm_info *nm_i = NM_I(sbi); 1497 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1498 struct f2fs_summary_block *sum = curseg->sum_blk; 1499 int i = 0; 1500 nid_t nid = nm_i->next_scan_nid; 1501 1502 /* Enough entries */ 1503 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) 1504 return; 1505 1506 /* readahead nat pages to be scanned */ 1507 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); 1508 1509 while (1) { 1510 struct page *page = get_current_nat_page(sbi, nid); 1511 1512 scan_nat_page(sbi, page, nid); 1513 f2fs_put_page(page, 1); 1514 1515 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1516 if (unlikely(nid >= nm_i->max_nid)) 1517 nid = 0; 1518 1519 if (i++ == FREE_NID_PAGES) 1520 break; 1521 } 1522 1523 /* go to the next free nat pages to find free nids abundantly */ 1524 nm_i->next_scan_nid = nid; 1525 1526 /* find free nids from current sum_pages */ 1527 mutex_lock(&curseg->curseg_mutex); 1528 for (i = 0; i < nats_in_cursum(sum); i++) { 1529 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1530 nid = le32_to_cpu(nid_in_journal(sum, i)); 1531 if (addr == NULL_ADDR) 1532 add_free_nid(sbi, nid, true); 1533 else 1534 remove_free_nid(nm_i, nid); 1535 } 1536 mutex_unlock(&curseg->curseg_mutex); 1537 } 1538 1539 /* 1540 * If this function returns success, caller can obtain a new nid 1541 * from second parameter of this function. 1542 * The returned nid could be used ino as well as nid when inode is created. 1543 */ 1544 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 1545 { 1546 struct f2fs_nm_info *nm_i = NM_I(sbi); 1547 struct free_nid *i = NULL; 1548 retry: 1549 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) 1550 return false; 1551 1552 spin_lock(&nm_i->free_nid_list_lock); 1553 1554 /* We should not use stale free nids created by build_free_nids */ 1555 if (nm_i->fcnt && !on_build_free_nids(nm_i)) { 1556 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 1557 list_for_each_entry(i, &nm_i->free_nid_list, list) 1558 if (i->state == NID_NEW) 1559 break; 1560 1561 f2fs_bug_on(sbi, i->state != NID_NEW); 1562 *nid = i->nid; 1563 i->state = NID_ALLOC; 1564 nm_i->fcnt--; 1565 spin_unlock(&nm_i->free_nid_list_lock); 1566 return true; 1567 } 1568 spin_unlock(&nm_i->free_nid_list_lock); 1569 1570 /* Let's scan nat pages and its caches to get free nids */ 1571 mutex_lock(&nm_i->build_lock); 1572 build_free_nids(sbi); 1573 mutex_unlock(&nm_i->build_lock); 1574 goto retry; 1575 } 1576 1577 /* 1578 * alloc_nid() should be called prior to this function. 1579 */ 1580 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 1581 { 1582 struct f2fs_nm_info *nm_i = NM_I(sbi); 1583 struct free_nid *i; 1584 1585 spin_lock(&nm_i->free_nid_list_lock); 1586 i = __lookup_free_nid_list(nm_i, nid); 1587 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1588 __del_from_free_nid_list(nm_i, i); 1589 spin_unlock(&nm_i->free_nid_list_lock); 1590 1591 kmem_cache_free(free_nid_slab, i); 1592 } 1593 1594 /* 1595 * alloc_nid() should be called prior to this function. 1596 */ 1597 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 1598 { 1599 struct f2fs_nm_info *nm_i = NM_I(sbi); 1600 struct free_nid *i; 1601 bool need_free = false; 1602 1603 if (!nid) 1604 return; 1605 1606 spin_lock(&nm_i->free_nid_list_lock); 1607 i = __lookup_free_nid_list(nm_i, nid); 1608 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC); 1609 if (!available_free_memory(sbi, FREE_NIDS)) { 1610 __del_from_free_nid_list(nm_i, i); 1611 need_free = true; 1612 } else { 1613 i->state = NID_NEW; 1614 nm_i->fcnt++; 1615 } 1616 spin_unlock(&nm_i->free_nid_list_lock); 1617 1618 if (need_free) 1619 kmem_cache_free(free_nid_slab, i); 1620 } 1621 1622 void recover_inline_xattr(struct inode *inode, struct page *page) 1623 { 1624 void *src_addr, *dst_addr; 1625 size_t inline_size; 1626 struct page *ipage; 1627 struct f2fs_inode *ri; 1628 1629 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 1630 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); 1631 1632 ri = F2FS_INODE(page); 1633 if (!(ri->i_inline & F2FS_INLINE_XATTR)) { 1634 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); 1635 goto update_inode; 1636 } 1637 1638 dst_addr = inline_xattr_addr(ipage); 1639 src_addr = inline_xattr_addr(page); 1640 inline_size = inline_xattr_size(inode); 1641 1642 f2fs_wait_on_page_writeback(ipage, NODE); 1643 memcpy(dst_addr, src_addr, inline_size); 1644 update_inode: 1645 update_inode(inode, ipage); 1646 f2fs_put_page(ipage, 1); 1647 } 1648 1649 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) 1650 { 1651 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1652 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 1653 nid_t new_xnid = nid_of_node(page); 1654 struct node_info ni; 1655 1656 /* 1: invalidate the previous xattr nid */ 1657 if (!prev_xnid) 1658 goto recover_xnid; 1659 1660 /* Deallocate node address */ 1661 get_node_info(sbi, prev_xnid, &ni); 1662 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); 1663 invalidate_blocks(sbi, ni.blk_addr); 1664 dec_valid_node_count(sbi, inode); 1665 set_node_addr(sbi, &ni, NULL_ADDR, false); 1666 1667 recover_xnid: 1668 /* 2: allocate new xattr nid */ 1669 if (unlikely(!inc_valid_node_count(sbi, inode))) 1670 f2fs_bug_on(sbi, 1); 1671 1672 remove_free_nid(NM_I(sbi), new_xnid); 1673 get_node_info(sbi, new_xnid, &ni); 1674 ni.ino = inode->i_ino; 1675 set_node_addr(sbi, &ni, NEW_ADDR, false); 1676 F2FS_I(inode)->i_xattr_nid = new_xnid; 1677 1678 /* 3: update xattr blkaddr */ 1679 refresh_sit_entry(sbi, NEW_ADDR, blkaddr); 1680 set_node_addr(sbi, &ni, blkaddr, false); 1681 1682 update_inode_page(inode); 1683 } 1684 1685 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 1686 { 1687 struct f2fs_inode *src, *dst; 1688 nid_t ino = ino_of_node(page); 1689 struct node_info old_ni, new_ni; 1690 struct page *ipage; 1691 1692 get_node_info(sbi, ino, &old_ni); 1693 1694 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 1695 return -EINVAL; 1696 1697 ipage = grab_cache_page(NODE_MAPPING(sbi), ino); 1698 if (!ipage) 1699 return -ENOMEM; 1700 1701 /* Should not use this inode from free nid list */ 1702 remove_free_nid(NM_I(sbi), ino); 1703 1704 SetPageUptodate(ipage); 1705 fill_node_footer(ipage, ino, ino, 0, true); 1706 1707 src = F2FS_INODE(page); 1708 dst = F2FS_INODE(ipage); 1709 1710 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); 1711 dst->i_size = 0; 1712 dst->i_blocks = cpu_to_le64(1); 1713 dst->i_links = cpu_to_le32(1); 1714 dst->i_xattr_nid = 0; 1715 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; 1716 1717 new_ni = old_ni; 1718 new_ni.ino = ino; 1719 1720 if (unlikely(!inc_valid_node_count(sbi, NULL))) 1721 WARN_ON(1); 1722 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1723 inc_valid_inode_count(sbi); 1724 set_page_dirty(ipage); 1725 f2fs_put_page(ipage, 1); 1726 return 0; 1727 } 1728 1729 /* 1730 * ra_sum_pages() merge contiguous pages into one bio and submit. 1731 * these pre-read pages are allocated in bd_inode's mapping tree. 1732 */ 1733 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, 1734 int start, int nrpages) 1735 { 1736 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1737 struct address_space *mapping = inode->i_mapping; 1738 int i, page_idx = start; 1739 struct f2fs_io_info fio = { 1740 .type = META, 1741 .rw = READ_SYNC | REQ_META | REQ_PRIO 1742 }; 1743 1744 for (i = 0; page_idx < start + nrpages; page_idx++, i++) { 1745 /* alloc page in bd_inode for reading node summary info */ 1746 pages[i] = grab_cache_page(mapping, page_idx); 1747 if (!pages[i]) 1748 break; 1749 f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); 1750 } 1751 1752 f2fs_submit_merged_bio(sbi, META, READ); 1753 return i; 1754 } 1755 1756 int restore_node_summary(struct f2fs_sb_info *sbi, 1757 unsigned int segno, struct f2fs_summary_block *sum) 1758 { 1759 struct f2fs_node *rn; 1760 struct f2fs_summary *sum_entry; 1761 struct inode *inode = sbi->sb->s_bdev->bd_inode; 1762 block_t addr; 1763 int bio_blocks = MAX_BIO_BLOCKS(sbi); 1764 struct page *pages[bio_blocks]; 1765 int i, idx, last_offset, nrpages, err = 0; 1766 1767 /* scan the node segment */ 1768 last_offset = sbi->blocks_per_seg; 1769 addr = START_BLOCK(sbi, segno); 1770 sum_entry = &sum->entries[0]; 1771 1772 for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { 1773 nrpages = min(last_offset - i, bio_blocks); 1774 1775 /* readahead node pages */ 1776 nrpages = ra_sum_pages(sbi, pages, addr, nrpages); 1777 if (!nrpages) 1778 return -ENOMEM; 1779 1780 for (idx = 0; idx < nrpages; idx++) { 1781 if (err) 1782 goto skip; 1783 1784 lock_page(pages[idx]); 1785 if (unlikely(!PageUptodate(pages[idx]))) { 1786 err = -EIO; 1787 } else { 1788 rn = F2FS_NODE(pages[idx]); 1789 sum_entry->nid = rn->footer.nid; 1790 sum_entry->version = 0; 1791 sum_entry->ofs_in_node = 0; 1792 sum_entry++; 1793 } 1794 unlock_page(pages[idx]); 1795 skip: 1796 page_cache_release(pages[idx]); 1797 } 1798 1799 invalidate_mapping_pages(inode->i_mapping, addr, 1800 addr + nrpages); 1801 } 1802 return err; 1803 } 1804 1805 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 1806 { 1807 struct f2fs_nm_info *nm_i = NM_I(sbi); 1808 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1809 struct f2fs_summary_block *sum = curseg->sum_blk; 1810 int i; 1811 1812 mutex_lock(&curseg->curseg_mutex); 1813 for (i = 0; i < nats_in_cursum(sum); i++) { 1814 struct nat_entry *ne; 1815 struct f2fs_nat_entry raw_ne; 1816 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1817 1818 raw_ne = nat_in_journal(sum, i); 1819 1820 down_write(&nm_i->nat_tree_lock); 1821 ne = __lookup_nat_cache(nm_i, nid); 1822 if (!ne) { 1823 ne = grab_nat_entry(nm_i, nid); 1824 node_info_from_raw_nat(&ne->ni, &raw_ne); 1825 } 1826 __set_nat_cache_dirty(nm_i, ne); 1827 up_write(&nm_i->nat_tree_lock); 1828 } 1829 update_nats_in_cursum(sum, -i); 1830 mutex_unlock(&curseg->curseg_mutex); 1831 } 1832 1833 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 1834 struct list_head *head, int max) 1835 { 1836 struct nat_entry_set *cur; 1837 1838 if (nes->entry_cnt >= max) 1839 goto add_out; 1840 1841 list_for_each_entry(cur, head, set_list) { 1842 if (cur->entry_cnt >= nes->entry_cnt) { 1843 list_add(&nes->set_list, cur->set_list.prev); 1844 return; 1845 } 1846 } 1847 add_out: 1848 list_add_tail(&nes->set_list, head); 1849 } 1850 1851 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 1852 struct nat_entry_set *set) 1853 { 1854 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1855 struct f2fs_summary_block *sum = curseg->sum_blk; 1856 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 1857 bool to_journal = true; 1858 struct f2fs_nat_block *nat_blk; 1859 struct nat_entry *ne, *cur; 1860 struct page *page = NULL; 1861 1862 /* 1863 * there are two steps to flush nat entries: 1864 * #1, flush nat entries to journal in current hot data summary block. 1865 * #2, flush nat entries to nat page. 1866 */ 1867 if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL)) 1868 to_journal = false; 1869 1870 if (to_journal) { 1871 mutex_lock(&curseg->curseg_mutex); 1872 } else { 1873 page = get_next_nat_page(sbi, start_nid); 1874 nat_blk = page_address(page); 1875 f2fs_bug_on(sbi, !nat_blk); 1876 } 1877 1878 /* flush dirty nats in nat entry set */ 1879 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 1880 struct f2fs_nat_entry *raw_ne; 1881 nid_t nid = nat_get_nid(ne); 1882 int offset; 1883 1884 if (nat_get_blkaddr(ne) == NEW_ADDR) 1885 continue; 1886 1887 if (to_journal) { 1888 offset = lookup_journal_in_cursum(sum, 1889 NAT_JOURNAL, nid, 1); 1890 f2fs_bug_on(sbi, offset < 0); 1891 raw_ne = &nat_in_journal(sum, offset); 1892 nid_in_journal(sum, offset) = cpu_to_le32(nid); 1893 } else { 1894 raw_ne = &nat_blk->entries[nid - start_nid]; 1895 } 1896 raw_nat_from_node_info(raw_ne, &ne->ni); 1897 1898 down_write(&NM_I(sbi)->nat_tree_lock); 1899 nat_reset_flag(ne); 1900 __clear_nat_cache_dirty(NM_I(sbi), ne); 1901 up_write(&NM_I(sbi)->nat_tree_lock); 1902 1903 if (nat_get_blkaddr(ne) == NULL_ADDR) 1904 add_free_nid(sbi, nid, false); 1905 } 1906 1907 if (to_journal) 1908 mutex_unlock(&curseg->curseg_mutex); 1909 else 1910 f2fs_put_page(page, 1); 1911 1912 f2fs_bug_on(sbi, set->entry_cnt); 1913 1914 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 1915 kmem_cache_free(nat_entry_set_slab, set); 1916 } 1917 1918 /* 1919 * This function is called during the checkpointing process. 1920 */ 1921 void flush_nat_entries(struct f2fs_sb_info *sbi) 1922 { 1923 struct f2fs_nm_info *nm_i = NM_I(sbi); 1924 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1925 struct f2fs_summary_block *sum = curseg->sum_blk; 1926 struct nat_entry_set *setvec[NATVEC_SIZE]; 1927 struct nat_entry_set *set, *tmp; 1928 unsigned int found; 1929 nid_t set_idx = 0; 1930 LIST_HEAD(sets); 1931 1932 if (!nm_i->dirty_nat_cnt) 1933 return; 1934 /* 1935 * if there are no enough space in journal to store dirty nat 1936 * entries, remove all entries from journal and merge them 1937 * into nat entry set. 1938 */ 1939 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1940 remove_nats_in_journal(sbi); 1941 1942 while ((found = __gang_lookup_nat_set(nm_i, 1943 set_idx, NATVEC_SIZE, setvec))) { 1944 unsigned idx; 1945 set_idx = setvec[found - 1]->set + 1; 1946 for (idx = 0; idx < found; idx++) 1947 __adjust_nat_entry_set(setvec[idx], &sets, 1948 MAX_NAT_JENTRIES(sum)); 1949 } 1950 1951 /* flush dirty nats in nat entry set */ 1952 list_for_each_entry_safe(set, tmp, &sets, set_list) 1953 __flush_nat_entry_set(sbi, set); 1954 1955 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 1956 } 1957 1958 static int init_node_manager(struct f2fs_sb_info *sbi) 1959 { 1960 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 1961 struct f2fs_nm_info *nm_i = NM_I(sbi); 1962 unsigned char *version_bitmap; 1963 unsigned int nat_segs, nat_blocks; 1964 1965 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 1966 1967 /* segment_count_nat includes pair segment so divide to 2. */ 1968 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 1969 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 1970 1971 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 1972 1973 /* not used nids: 0, node, meta, (and root counted as valid node) */ 1974 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; 1975 nm_i->fcnt = 0; 1976 nm_i->nat_cnt = 0; 1977 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 1978 1979 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 1980 INIT_LIST_HEAD(&nm_i->free_nid_list); 1981 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 1982 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 1983 INIT_LIST_HEAD(&nm_i->nat_entries); 1984 1985 mutex_init(&nm_i->build_lock); 1986 spin_lock_init(&nm_i->free_nid_list_lock); 1987 init_rwsem(&nm_i->nat_tree_lock); 1988 1989 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1990 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1991 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 1992 if (!version_bitmap) 1993 return -EFAULT; 1994 1995 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 1996 GFP_KERNEL); 1997 if (!nm_i->nat_bitmap) 1998 return -ENOMEM; 1999 return 0; 2000 } 2001 2002 int build_node_manager(struct f2fs_sb_info *sbi) 2003 { 2004 int err; 2005 2006 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); 2007 if (!sbi->nm_info) 2008 return -ENOMEM; 2009 2010 err = init_node_manager(sbi); 2011 if (err) 2012 return err; 2013 2014 build_free_nids(sbi); 2015 return 0; 2016 } 2017 2018 void destroy_node_manager(struct f2fs_sb_info *sbi) 2019 { 2020 struct f2fs_nm_info *nm_i = NM_I(sbi); 2021 struct free_nid *i, *next_i; 2022 struct nat_entry *natvec[NATVEC_SIZE]; 2023 nid_t nid = 0; 2024 unsigned int found; 2025 2026 if (!nm_i) 2027 return; 2028 2029 /* destroy free nid list */ 2030 spin_lock(&nm_i->free_nid_list_lock); 2031 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 2032 f2fs_bug_on(sbi, i->state == NID_ALLOC); 2033 __del_from_free_nid_list(nm_i, i); 2034 nm_i->fcnt--; 2035 spin_unlock(&nm_i->free_nid_list_lock); 2036 kmem_cache_free(free_nid_slab, i); 2037 spin_lock(&nm_i->free_nid_list_lock); 2038 } 2039 f2fs_bug_on(sbi, nm_i->fcnt); 2040 spin_unlock(&nm_i->free_nid_list_lock); 2041 2042 /* destroy nat cache */ 2043 down_write(&nm_i->nat_tree_lock); 2044 while ((found = __gang_lookup_nat_cache(nm_i, 2045 nid, NATVEC_SIZE, natvec))) { 2046 unsigned idx; 2047 nid = nat_get_nid(natvec[found - 1]) + 1; 2048 for (idx = 0; idx < found; idx++) 2049 __del_from_nat_cache(nm_i, natvec[idx]); 2050 } 2051 f2fs_bug_on(sbi, nm_i->nat_cnt); 2052 up_write(&nm_i->nat_tree_lock); 2053 2054 kfree(nm_i->nat_bitmap); 2055 sbi->nm_info = NULL; 2056 kfree(nm_i); 2057 } 2058 2059 int __init create_node_manager_caches(void) 2060 { 2061 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 2062 sizeof(struct nat_entry)); 2063 if (!nat_entry_slab) 2064 goto fail; 2065 2066 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2067 sizeof(struct free_nid)); 2068 if (!free_nid_slab) 2069 goto destroy_nat_entry; 2070 2071 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2072 sizeof(struct nat_entry_set)); 2073 if (!nat_entry_set_slab) 2074 goto destroy_free_nid; 2075 return 0; 2076 2077 destroy_free_nid: 2078 kmem_cache_destroy(free_nid_slab); 2079 destroy_nat_entry: 2080 kmem_cache_destroy(nat_entry_slab); 2081 fail: 2082 return -ENOMEM; 2083 } 2084 2085 void destroy_node_manager_caches(void) 2086 { 2087 kmem_cache_destroy(nat_entry_set_slab); 2088 kmem_cache_destroy(free_nid_slab); 2089 kmem_cache_destroy(nat_entry_slab); 2090 } 2091